2 * fs/logfs/dev_mtd.c - Device access methods for MTD
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
9 #include <linux/completion.h>
10 #include <linux/mount.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16 static int logfs_mtd_read(struct super_block
*sb
, loff_t ofs
, size_t len
,
19 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
23 ret
= mtd_read(mtd
, ofs
, len
, &retlen
, buf
);
24 BUG_ON(ret
== -EINVAL
);
28 /* Not sure if we should loop instead. */
35 static int loffs_mtd_write(struct super_block
*sb
, loff_t ofs
, size_t len
,
38 struct logfs_super
*super
= logfs_super(sb
);
39 struct mtd_info
*mtd
= super
->s_mtd
;
41 loff_t page_start
, page_end
;
44 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
47 BUG_ON((ofs
>= mtd
->size
) || (len
> mtd
->size
- ofs
));
48 BUG_ON(ofs
!= (ofs
>> super
->s_writeshift
) << super
->s_writeshift
);
49 BUG_ON(len
> PAGE_SIZE
);
50 page_start
= ofs
& PAGE_MASK
;
51 page_end
= PAGE_ALIGN(ofs
+ len
) - 1;
52 ret
= mtd_write(mtd
, ofs
, len
, &retlen
, buf
);
53 if (ret
|| (retlen
!= len
))
60 * For as long as I can remember (since about 2001) mtd->erase has been an
61 * asynchronous interface lacking the first driver to actually use the
62 * asynchronous properties. So just to prevent the first implementor of such
63 * a thing from breaking logfs in 2350, we do the usual pointless dance to
64 * declare a completion variable and wait for completion before returning
65 * from logfs_mtd_erase(). What an exercise in futility!
67 static void logfs_erase_callback(struct erase_info
*ei
)
69 complete((struct completion
*)ei
->priv
);
72 static int logfs_mtd_erase_mapping(struct super_block
*sb
, loff_t ofs
,
75 struct logfs_super
*super
= logfs_super(sb
);
76 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
78 pgoff_t index
= ofs
>> PAGE_SHIFT
;
80 for (index
= ofs
>> PAGE_SHIFT
; index
< (ofs
+ len
) >> PAGE_SHIFT
; index
++) {
81 page
= find_get_page(mapping
, index
);
84 memset(page_address(page
), 0xFF, PAGE_SIZE
);
90 static int logfs_mtd_erase(struct super_block
*sb
, loff_t ofs
, size_t len
,
93 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
95 DECLARE_COMPLETION_ONSTACK(complete
);
98 BUG_ON(len
% mtd
->erasesize
);
99 if (logfs_super(sb
)->s_flags
& LOGFS_SB_FLAG_RO
)
102 memset(&ei
, 0, sizeof(ei
));
106 ei
.callback
= logfs_erase_callback
;
107 ei
.priv
= (long)&complete
;
108 ret
= mtd_erase(mtd
, &ei
);
112 wait_for_completion(&complete
);
113 if (ei
.state
!= MTD_ERASE_DONE
)
115 return logfs_mtd_erase_mapping(sb
, ofs
, len
);
118 static void logfs_mtd_sync(struct super_block
*sb
)
120 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
125 static int logfs_mtd_readpage(void *_sb
, struct page
*page
)
127 struct super_block
*sb
= _sb
;
130 err
= logfs_mtd_read(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
132 if (err
== -EUCLEAN
|| err
== -EBADMSG
) {
133 /* -EBADMSG happens regularly on power failures */
135 /* FIXME: force GC this segment */
138 ClearPageUptodate(page
);
141 SetPageUptodate(page
);
142 ClearPageError(page
);
148 static struct page
*logfs_mtd_find_first_sb(struct super_block
*sb
, u64
*ofs
)
150 struct logfs_super
*super
= logfs_super(sb
);
151 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
152 filler_t
*filler
= logfs_mtd_readpage
;
153 struct mtd_info
*mtd
= super
->s_mtd
;
156 while (mtd_block_isbad(mtd
, *ofs
)) {
157 *ofs
+= mtd
->erasesize
;
158 if (*ofs
>= mtd
->size
)
161 BUG_ON(*ofs
& ~PAGE_MASK
);
162 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
165 static struct page
*logfs_mtd_find_last_sb(struct super_block
*sb
, u64
*ofs
)
167 struct logfs_super
*super
= logfs_super(sb
);
168 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
169 filler_t
*filler
= logfs_mtd_readpage
;
170 struct mtd_info
*mtd
= super
->s_mtd
;
172 *ofs
= mtd
->size
- mtd
->erasesize
;
173 while (mtd_block_isbad(mtd
, *ofs
)) {
174 *ofs
-= mtd
->erasesize
;
178 *ofs
= *ofs
+ mtd
->erasesize
- 0x1000;
179 BUG_ON(*ofs
& ~PAGE_MASK
);
180 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
183 static int __logfs_mtd_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
186 struct logfs_super
*super
= logfs_super(sb
);
187 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
191 for (i
= 0; i
< nr_pages
; i
++) {
192 page
= find_lock_page(mapping
, index
+ i
);
195 err
= loffs_mtd_write(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
205 static void logfs_mtd_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
207 struct logfs_super
*super
= logfs_super(sb
);
210 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
214 /* This can happen when the object fit perfectly into a
215 * segment, the segment gets written per sync and subsequently
220 head
= ofs
& (PAGE_SIZE
- 1);
225 len
= PAGE_ALIGN(len
);
226 __logfs_mtd_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
229 static void logfs_mtd_put_device(struct logfs_super
*s
)
231 put_mtd_device(s
->s_mtd
);
234 static int logfs_mtd_can_write_buf(struct super_block
*sb
, u64 ofs
)
236 struct logfs_super
*super
= logfs_super(sb
);
240 buf
= kmalloc(super
->s_writesize
, GFP_KERNEL
);
243 err
= logfs_mtd_read(sb
, ofs
, super
->s_writesize
, buf
);
246 if (memchr_inv(buf
, 0xff, super
->s_writesize
))
253 static const struct logfs_device_ops mtd_devops
= {
254 .find_first_sb
= logfs_mtd_find_first_sb
,
255 .find_last_sb
= logfs_mtd_find_last_sb
,
256 .readpage
= logfs_mtd_readpage
,
257 .writeseg
= logfs_mtd_writeseg
,
258 .erase
= logfs_mtd_erase
,
259 .can_write_buf
= logfs_mtd_can_write_buf
,
260 .sync
= logfs_mtd_sync
,
261 .put_device
= logfs_mtd_put_device
,
264 int logfs_get_sb_mtd(struct logfs_super
*s
, int mtdnr
)
266 struct mtd_info
*mtd
= get_mtd_device(NULL
, mtdnr
);
272 s
->s_devops
= &mtd_devops
;