etc/protocols - sync with NetBSD-8
[minix.git] / sys / ufs / chfs / chfs_wbuf.c
blob06a9b0a6267b7f4a05b93259b061ebf691acb5e0
1 /* $NetBSD: chfs_wbuf.c,v 1.7 2014/10/18 08:33:29 snj Exp $ */
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8 * All rights reserved.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by the Department of Software Engineering, University of Szeged, Hungary
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <dev/flash/flash.h>
36 #include <sys/uio.h>
37 #include "chfs.h"
39 #define DBG_WBUF 1 /* XXX unused, but should be */
41 #define PAD(x) (((x)+3)&~3)
43 #define EB_ADDRESS(x) ( rounddown((x), chmp->chm_ebh->eb_size) )
45 #define PAGE_DIV(x) ( rounddown((x), chmp->chm_wbuf_pagesize) )
46 #define PAGE_MOD(x) ( (x) % (chmp->chm_wbuf_pagesize) )
48 /* writebuffer options */
49 enum {
50 WBUF_NOPAD,
51 WBUF_SETPAD
55 * chfs_flush_wbuf - write wbuf to the flash
56 * Returns zero in case of success.
58 static int
59 chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
61 int ret;
62 size_t retlen;
63 struct chfs_node_ref *nref;
64 struct chfs_flash_padding_node* padnode;
66 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
67 KASSERT(mutex_owned(&chmp->chm_lock_sizes));
68 KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
69 KASSERT(pad == WBUF_SETPAD || pad == WBUF_NOPAD);
71 /* check padding option */
72 if (pad == WBUF_SETPAD) {
73 chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
74 memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0,
75 chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
77 /* add a padding node */
78 padnode = (void *)(chmp->chm_wbuf + chmp->chm_wbuf_len);
79 padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
80 padnode->type = htole16(CHFS_NODETYPE_PADDING);
81 padnode->length = htole32(chmp->chm_wbuf_pagesize
82 - chmp->chm_wbuf_len);
83 padnode->hdr_crc = htole32(crc32(0, (uint8_t *)padnode,
84 sizeof(*padnode)-4));
86 nref = chfs_alloc_node_ref(chmp->chm_nextblock);
87 nref->nref_offset = chmp->chm_wbuf_ofs + chmp->chm_wbuf_len;
88 nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
89 CHFS_OBSOLETE_NODE_MASK;
90 chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
92 /* change sizes after padding node */
93 chfs_change_size_free(chmp, chmp->chm_nextblock,
94 -padnode->length);
95 chfs_change_size_wasted(chmp, chmp->chm_nextblock,
96 padnode->length);
99 /* write out the buffer */
100 ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf,
101 chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
102 if (ret) {
103 return ret;
106 /* reset the buffer */
107 memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
108 chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
109 chmp->chm_wbuf_len = 0;
111 return 0;
116 * chfs_fill_wbuf - write data to wbuf
117 * Return the len of the buf what we didn't write to the wbuf.
119 static size_t
120 chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
122 /* check available space */
123 if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
124 return 0;
126 /* check buffer's length */
127 if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
128 len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
130 /* write into the wbuf */
131 memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
133 /* update the actual length of writebuffer */
134 chmp->chm_wbuf_len += (int) len;
135 return len;
139 * chfs_write_wbuf - write to wbuf and then the flash
140 * Returns zero in case of success.
143 chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
144 off_t to, size_t *retlen)
146 int invec, ret = 0;
147 size_t wbuf_retlen, donelen = 0;
148 int outvec_to = to;
150 int lnr = chmp->chm_nextblock->lnr;
152 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
153 KASSERT(mutex_owned(&chmp->chm_lock_sizes));
154 KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
156 rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
158 if (chmp->chm_wbuf_ofs == 0xffffffff) {
159 chmp->chm_wbuf_ofs = PAGE_DIV(to);
160 chmp->chm_wbuf_len = PAGE_MOD(to);
161 memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
164 if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
165 if (chmp->chm_wbuf_len) {
166 ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
167 if (ret)
168 goto outerr;
170 chmp->chm_wbuf_ofs = PAGE_DIV(to);
171 chmp->chm_wbuf_len = PAGE_MOD(to);
174 if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
175 dbg("to: %llu != %zu\n", (unsigned long long)to,
176 PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
177 dbg("Non-contiguous write\n");
178 panic("BUG\n");
181 /* adjust alignment offset */
182 if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
183 chmp->chm_wbuf_len = PAGE_MOD(to);
184 /* take care of alignment to next page */
185 if (!chmp->chm_wbuf_len) {
186 chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
187 ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
188 if (ret)
189 goto outerr;
193 for (invec = 0; invec < count; invec++) {
194 int vlen = invecs[invec].iov_len;
195 u_char* v = invecs[invec].iov_base;
197 /* fill the whole wbuf */
198 wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
199 if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
200 ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
201 if (ret) {
202 goto outerr;
206 vlen -= wbuf_retlen;
207 outvec_to += wbuf_retlen;
208 v += wbuf_retlen;
209 donelen += wbuf_retlen;
211 /* if there is more residual data than the length of the wbuf
212 * write it out directly until it fits in the wbuf */
213 if (vlen >= chmp->chm_wbuf_pagesize) {
214 ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
215 vlen -= wbuf_retlen;
216 outvec_to += wbuf_retlen;
217 chmp->chm_wbuf_ofs = outvec_to;
218 v += wbuf_retlen;
219 donelen += wbuf_retlen;
222 /* write the residual data to the wbuf */
223 wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
224 if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
225 ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
226 if (ret)
227 goto outerr;
230 outvec_to += wbuf_retlen;
231 donelen += wbuf_retlen;
233 *retlen = donelen;
234 rw_exit(&chmp->chm_lock_wbuf);
235 return ret;
237 outerr:
238 *retlen = 0;
239 return ret;
243 * chfs_flush_peding_wbuf - write wbuf to the flash
244 * Used when we must flush wbuf right now.
245 * If wbuf has free space, pad it to the size of wbuf and write out.
247 int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
249 int err;
250 KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
251 mutex_enter(&chmp->chm_lock_sizes);
252 rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
253 err = chfs_flush_wbuf(chmp, WBUF_SETPAD);
254 rw_exit(&chmp->chm_lock_wbuf);
255 mutex_exit(&chmp->chm_lock_sizes);
256 return err;