2 * fs/logfs/dev_mtd.c - Device access methods for MTD
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
9 #include <linux/completion.h>
10 #include <linux/mount.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 static int mtd_read(struct super_block
*sb
, loff_t ofs
, size_t len
, void *buf
)
18 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
22 ret
= mtd
->read(mtd
, ofs
, len
, &retlen
, buf
);
23 BUG_ON(ret
== -EINVAL
);
27 /* Not sure if we should loop instead. */
34 static int mtd_write(struct super_block
*sb
, loff_t ofs
, size_t len
, void *buf
)
36 struct logfs_super
*super
= logfs_super(sb
);
37 struct mtd_info
*mtd
= super
->s_mtd
;
39 loff_t page_start
, page_end
;
42 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
45 BUG_ON((ofs
>= mtd
->size
) || (len
> mtd
->size
- ofs
));
46 BUG_ON(ofs
!= (ofs
>> super
->s_writeshift
) << super
->s_writeshift
);
47 BUG_ON(len
> PAGE_CACHE_SIZE
);
48 page_start
= ofs
& PAGE_CACHE_MASK
;
49 page_end
= PAGE_CACHE_ALIGN(ofs
+ len
) - 1;
50 ret
= mtd
->write(mtd
, ofs
, len
, &retlen
, buf
);
51 if (ret
|| (retlen
!= len
))
58 * For as long as I can remember (since about 2001) mtd->erase has been an
59 * asynchronous interface lacking the first driver to actually use the
60 * asynchronous properties. So just to prevent the first implementor of such
61 * a thing from breaking logfs in 2350, we do the usual pointless dance to
62 * declare a completion variable and wait for completion before returning
63 * from mtd_erase(). What an excercise in futility!
65 static void logfs_erase_callback(struct erase_info
*ei
)
67 complete((struct completion
*)ei
->priv
);
70 static int mtd_erase_mapping(struct super_block
*sb
, loff_t ofs
, size_t len
)
72 struct logfs_super
*super
= logfs_super(sb
);
73 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
75 pgoff_t index
= ofs
>> PAGE_SHIFT
;
77 for (index
= ofs
>> PAGE_SHIFT
; index
< (ofs
+ len
) >> PAGE_SHIFT
; index
++) {
78 page
= find_get_page(mapping
, index
);
81 memset(page_address(page
), 0xFF, PAGE_SIZE
);
82 page_cache_release(page
);
87 static int mtd_erase(struct super_block
*sb
, loff_t ofs
, size_t len
,
90 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
92 DECLARE_COMPLETION_ONSTACK(complete
);
95 BUG_ON(len
% mtd
->erasesize
);
96 if (logfs_super(sb
)->s_flags
& LOGFS_SB_FLAG_RO
)
99 memset(&ei
, 0, sizeof(ei
));
103 ei
.callback
= logfs_erase_callback
;
104 ei
.priv
= (long)&complete
;
105 ret
= mtd
->erase(mtd
, &ei
);
109 wait_for_completion(&complete
);
110 if (ei
.state
!= MTD_ERASE_DONE
)
112 return mtd_erase_mapping(sb
, ofs
, len
);
115 static void mtd_sync(struct super_block
*sb
)
117 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
123 static int mtd_readpage(void *_sb
, struct page
*page
)
125 struct super_block
*sb
= _sb
;
128 err
= mtd_read(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
130 if (err
== -EUCLEAN
|| err
== -EBADMSG
) {
131 /* -EBADMSG happens regularly on power failures */
133 /* FIXME: force GC this segment */
136 ClearPageUptodate(page
);
139 SetPageUptodate(page
);
140 ClearPageError(page
);
146 static struct page
*mtd_find_first_sb(struct super_block
*sb
, u64
*ofs
)
148 struct logfs_super
*super
= logfs_super(sb
);
149 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
150 filler_t
*filler
= mtd_readpage
;
151 struct mtd_info
*mtd
= super
->s_mtd
;
153 if (!mtd
->block_isbad
)
157 while (mtd
->block_isbad(mtd
, *ofs
)) {
158 *ofs
+= mtd
->erasesize
;
159 if (*ofs
>= mtd
->size
)
162 BUG_ON(*ofs
& ~PAGE_MASK
);
163 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
166 static struct page
*mtd_find_last_sb(struct super_block
*sb
, u64
*ofs
)
168 struct logfs_super
*super
= logfs_super(sb
);
169 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
170 filler_t
*filler
= mtd_readpage
;
171 struct mtd_info
*mtd
= super
->s_mtd
;
173 if (!mtd
->block_isbad
)
176 *ofs
= mtd
->size
- mtd
->erasesize
;
177 while (mtd
->block_isbad(mtd
, *ofs
)) {
178 *ofs
-= mtd
->erasesize
;
182 *ofs
= *ofs
+ mtd
->erasesize
- 0x1000;
183 BUG_ON(*ofs
& ~PAGE_MASK
);
184 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
187 static int __mtd_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
190 struct logfs_super
*super
= logfs_super(sb
);
191 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
195 for (i
= 0; i
< nr_pages
; i
++) {
196 page
= find_lock_page(mapping
, index
+ i
);
199 err
= mtd_write(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
202 page_cache_release(page
);
209 static void mtd_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
211 struct logfs_super
*super
= logfs_super(sb
);
214 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
218 /* This can happen when the object fit perfectly into a
219 * segment, the segment gets written per sync and subsequently
224 head
= ofs
& (PAGE_SIZE
- 1);
229 len
= PAGE_ALIGN(len
);
230 __mtd_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
233 static void mtd_put_device(struct logfs_super
*s
)
235 put_mtd_device(s
->s_mtd
);
238 static int mtd_can_write_buf(struct super_block
*sb
, u64 ofs
)
240 struct logfs_super
*super
= logfs_super(sb
);
244 buf
= kmalloc(super
->s_writesize
, GFP_KERNEL
);
247 err
= mtd_read(sb
, ofs
, super
->s_writesize
, buf
);
250 if (memchr_inv(buf
, 0xff, super
->s_writesize
))
257 static const struct logfs_device_ops mtd_devops
= {
258 .find_first_sb
= mtd_find_first_sb
,
259 .find_last_sb
= mtd_find_last_sb
,
260 .readpage
= mtd_readpage
,
261 .writeseg
= mtd_writeseg
,
263 .can_write_buf
= mtd_can_write_buf
,
265 .put_device
= mtd_put_device
,
268 int logfs_get_sb_mtd(struct logfs_super
*s
, int mtdnr
)
270 struct mtd_info
*mtd
= get_mtd_device(NULL
, mtdnr
);
276 s
->s_devops
= &mtd_devops
;