VM: remove dead code
[minix.git] / sys / ufs / chfs / chfs_wbuf.c
blobc9823a6964fe09276d51a6f4226255285b2b2918
1 /* $NetBSD: chfs_wbuf.c,v 1.2 2011/11/24 20:50:33 agc Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8 * All rights reserved.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by the Department of Software Engineering, University of Szeged, Hungary
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <dev/flash/flash.h>
36 #include <sys/uio.h>
37 #include "chfs.h"
38 //#include </root/xipffs/netbsd.chfs/chfs.h>
40 #define DBG_WBUF 1
42 #define PAD(x) (((x)+3)&~3)
44 #define EB_ADDRESS(x) ( ((unsigned long)(x) / chmp->chm_ebh->eb_size) * chmp->chm_ebh->eb_size )
46 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(chmp->chm_wbuf_pagesize)) * (unsigned long)(chmp->chm_wbuf_pagesize) )
47 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(chmp->chm_wbuf_pagesize) )
50 // test functions
51 int wbuf_test(void);
52 void wbuf_test_erase_flash(struct chfs_mount*);
53 void wbuf_test_callback(struct erase_instruction*);
56 #define NOPAD 0
57 #define SETPAD 1
60 /**
61 * chfs_flush_wbuf - write wbuf to the flash
62 * @chmp: super block info
63 * @pad: padding (NOPAD / SETPAD)
64 * Returns zero in case of success.
66 static int
67 chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
69 int ret=0;
70 size_t retlen = 0;
72 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
73 KASSERT(mutex_owned(&chmp->chm_lock_sizes));
74 KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
76 if (pad) {
77 chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
78 memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0, chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
80 struct chfs_flash_padding_node* padnode = (void*)(chmp->chm_wbuf + chmp->chm_wbuf_len);
81 padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
82 padnode->type = htole16(CHFS_NODETYPE_PADDING);
83 padnode->length = htole32(chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
84 padnode->hdr_crc = htole32(crc32(0, (uint8_t *)padnode, sizeof(*padnode)-4));
86 struct chfs_node_ref *nref;
87 nref = chfs_alloc_node_ref(chmp->chm_nextblock);
88 nref->nref_offset = chmp->chm_wbuf_ofs + chmp->chm_wbuf_len;
89 nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
90 CHFS_OBSOLETE_NODE_MASK;
91 chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
93 chfs_change_size_free(chmp, chmp->chm_nextblock, -padnode->length);
94 chfs_change_size_wasted(chmp, chmp->chm_nextblock, padnode->length);
97 ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf, chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
98 if(ret) {
99 return ret;
102 memset(chmp->chm_wbuf,0xff,chmp->chm_wbuf_pagesize);
103 chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
104 chmp->chm_wbuf_len = 0;
105 return 0;
110 * chfs_fill_wbuf - write to wbuf
111 * @chmp: super block info
112 * @buf: buffer
113 * @len: buffer length
114 * Return the len of the buf what we didn't write to the wbuf.
116 static size_t
117 chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
119 if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
120 return 0;
122 if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
123 len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
125 memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
127 chmp->chm_wbuf_len += (int) len;
128 return len;
132 * chfs_write_wbuf - write to wbuf and then the flash
133 * @chmp: super block info
134 * @invecs: io vectors
135 * @count: num of vectors
136 * @to: offset of target
137 * @retlen: writed bytes
138 * Returns zero in case of success.
141 chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
142 off_t to, size_t *retlen)
144 int invec, ret = 0;
145 size_t wbuf_retlen, donelen = 0;
146 int outvec_to = to;
148 int lnr = chmp->chm_nextblock->lnr;
150 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
151 KASSERT(mutex_owned(&chmp->chm_lock_sizes));
152 KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
154 rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
156 //dbg("1. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
158 if (chmp->chm_wbuf_ofs == 0xffffffff) {
159 chmp->chm_wbuf_ofs = PAGE_DIV(to);
160 chmp->chm_wbuf_len = PAGE_MOD(to);
161 memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
164 //dbg("2. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
166 if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
167 if (chmp->chm_wbuf_len) {
168 ret = chfs_flush_wbuf(chmp, SETPAD);
169 if (ret)
170 goto outerr;
172 chmp->chm_wbuf_ofs = PAGE_DIV(to);
173 chmp->chm_wbuf_len = PAGE_MOD(to);
176 //dbg("3. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
178 if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
179 dbg("to: %llu != %zu\n", (unsigned long long)to,
180 PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
181 dbg("Non-contiguous write\n");
182 panic("BUG\n");
185 /* adjust alignment offset */
186 if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
187 chmp->chm_wbuf_len = PAGE_MOD(to);
188 /* take care of alignement to next page*/
189 if (!chmp->chm_wbuf_len) {
190 chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
191 ret = chfs_flush_wbuf(chmp, NOPAD);
192 if (ret)
193 goto outerr;
197 for (invec = 0; invec < count; invec++) {
198 int vlen = invecs[invec].iov_len;
199 u_char* v = invecs[invec].iov_base;
201 //dbg("invec:%d len:%d\n", invec, vlen);
203 wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
204 if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
205 ret = chfs_flush_wbuf(chmp, NOPAD);
206 if (ret) {
207 goto outerr;
210 vlen -= wbuf_retlen;
211 outvec_to += wbuf_retlen;
212 v += wbuf_retlen;
213 donelen += wbuf_retlen;
214 if (vlen >= chmp->chm_wbuf_pagesize) {
215 ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
216 //dbg("fd->write: %zu\n", wbuf_retlen);
217 vlen -= wbuf_retlen;
218 outvec_to += wbuf_retlen;
219 chmp->chm_wbuf_ofs = outvec_to;
220 v += wbuf_retlen;
221 donelen += wbuf_retlen;
223 wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
224 if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
225 ret = chfs_flush_wbuf(chmp, NOPAD);
226 if (ret)
227 goto outerr;
230 // if we write the last vector, we flush with padding
231 /*if (invec == count-1) {
232 ret = chfs_flush_wbuf(chmp, SETPAD);
233 if (ret)
234 goto outerr;
236 outvec_to += wbuf_retlen;
237 donelen += wbuf_retlen;
239 *retlen = donelen;
240 rw_exit(&chmp->chm_lock_wbuf);
241 return ret;
243 outerr:
244 *retlen = 0;
245 return ret;
248 int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
250 //dbg("flush pending wbuf\n");
251 int err;
252 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
253 mutex_enter(&chmp->chm_lock_sizes);
254 rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
255 err = chfs_flush_wbuf(chmp, SETPAD);
256 rw_exit(&chmp->chm_lock_wbuf);
257 mutex_exit(&chmp->chm_lock_sizes);
258 return err;