loader: remove shouting from ORB's variable name
[hvf.git] / cp / fs / spool.c
blobbc424ea17e6f815015650cc527394565d903d8d9
1 /*
2 * (C) Copyright 2007-2011 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
5 * details.
6 */
8 #include <spool.h>
9 #include <buddy.h>
10 #include <slab.h>
12 static LOCK_CLASS(spool_file_lc);
14 struct spool_file *alloc_spool()
16 struct spool_file *f;
18 f = malloc(sizeof(struct spool_file), ZONE_NORMAL);
19 if (!f)
20 return ERR_PTR(-ENOMEM);
22 mutex_init(&f->lock, &spool_file_lc);
23 INIT_LIST_HEAD(&f->list);
24 f->recs = 0;
25 f->pages = 0;
26 f->frecoff = 0;
27 f->lrecoff = 0;
29 return f;
32 void free_spool(struct spool_file *f)
34 free(f);
37 u64 spool_nrecs(struct spool_file *f)
39 u64 recs;
41 mutex_lock(&f->lock);
43 recs = f->recs;
45 mutex_unlock(&f->lock);
47 return recs;
51 * Returns 0 on success
53 * Note: the output may get truncated
55 int spool_grab_rec(struct spool_file *f, u8 *buf, u16 *len)
57 struct spool_page *spage;
58 struct spool_rec *rec;
59 int ret = -ENOENT;
60 u16 reclen; /* length of record */
61 u32 copied, processed, rlen, offset, left;
63 if (!*len || !buf || !f)
64 return -EINVAL;
66 BUG_ON(sizeof(struct spool_rec) != 2);
67 BUG_ON(SPOOL_DATA_SIZE % 2);
69 mutex_lock(&f->lock);
71 if (!f->recs)
72 goto out;
74 BUG_ON(f->lrecoff % 2);
76 copied = 0;
77 processed = 0;
79 rec = NULL;
81 /* figure out the record length */
82 spage = list_first_entry(&f->list, struct spool_page, list);
83 rec = (struct spool_rec*) (spage->data + f->frecoff);
84 reclen = rec->len;
86 rlen = min(*len, reclen) + sizeof(struct spool_rec);
87 copied = 2;
88 processed = 2;
89 offset = f->frecoff+2;
90 left = SPOOL_DATA_SIZE - f->frecoff - 2;
92 while(reclen+2 != processed) {
93 if (!left) {
94 /* free page */
95 list_del(&spage->list);
96 free_pages(spage, 0);
97 f->pages--;
99 /* grab the next page */
100 spage = list_first_entry(&f->list, struct spool_page, list);
101 offset = 0;
102 left = SPOOL_DATA_SIZE;
105 if (rlen != copied) {
106 /* memcpy into the user buffer */
107 u32 tmp;
109 tmp = min(left, rlen-copied);
111 memcpy(buf+copied-2, &spage->data[offset], tmp);
112 processed += tmp;
113 copied += tmp;
114 left -= tmp;
115 offset += tmp;
116 } else {
117 /* we already filled the user buffer, but the record
118 * is longer, so we need to take it all out
120 u32 tmp;
122 tmp = min(left, reclen-processed+2);
124 processed += tmp;
125 left -= tmp;
126 offset += tmp;
130 /* align to a multiple of 2 bytes */
131 if (offset % 2)
132 offset++;
134 if (offset == SPOOL_DATA_SIZE) {
135 /* free page */
136 spage = list_first_entry(&f->list, struct spool_page, list);
138 list_del(&spage->list);
139 free_pages(spage, 0);
140 f->pages--;
142 offset = 0;
145 f->recs--;
146 f->frecoff = offset;
148 *len = copied-2;
149 ret = 0;
151 out:
152 mutex_unlock(&f->lock);
154 return ret;
157 int spool_append_rec(struct spool_file *f, u8 *buf, u16 len)
159 struct list_head new_pages;
160 struct spool_page *spage, *tmp;
161 struct spool_rec *rec;
162 struct page *page;
163 int npages;
164 int loff;
165 int copied;
166 u32 left;
167 u32 rlen;
168 int ret;
170 if (!f || !buf || !len)
171 return -EINVAL;
173 INIT_LIST_HEAD(&new_pages);
174 npages = 0;
176 BUG_ON(sizeof(struct spool_rec) != 2);
177 BUG_ON(SPOOL_DATA_SIZE % 2);
179 mutex_lock(&f->lock);
181 BUG_ON(f->lrecoff % 2);
183 left = SPOOL_DATA_SIZE - f->lrecoff;
184 rlen = len + sizeof(struct spool_rec);
185 copied = 0;
186 loff = 0;
188 /* try to fill up the last page */
189 if (f->pages && (left >= 2)) {
190 spage = list_last_entry(&f->list, struct spool_page, list);
191 rec = (struct spool_rec*) (spage->data + f->lrecoff);
193 rec->len = len;
194 copied = 2;
196 if (left-2) {
197 memcpy(rec->data, buf, min_t(u32, len, left-2));
198 copied += min_t(u32, len, left-2);
201 loff = f->lrecoff + copied;
204 BUG_ON(rlen < copied);
206 /* we need to allocate space */
207 while (rlen != copied) {
208 page = alloc_pages(0, ZONE_NORMAL);
209 if (!page) {
210 ret = -ENOMEM;
211 goto err;
214 spage = page_to_addr(page);
216 INIT_LIST_HEAD(&spage->list);
217 list_add_tail(&spage->list, &new_pages);
218 npages++;
220 rec = (struct spool_rec*) spage->data;
221 left = SPOOL_DATA_SIZE;
223 if (!copied) {
224 /* nothing was copied */
225 rec->len = len;
226 memcpy(rec->data, buf, min_t(u32, len, left-2));
227 loff = 2 + min_t(u32, len, left-2);
228 } else {
229 /* the length and maybe some data were copied */
230 memcpy(spage->data, buf-copied+2, min(rlen-copied, left));
231 loff = min(rlen-copied, left);
234 copied += loff;
237 list_splice_tail(&new_pages, &f->list);
239 /* 2-byte align the lrecoff */
240 if (loff % 2)
241 loff++;
243 f->pages += npages;
244 f->recs++;
245 f->lrecoff = loff;
247 mutex_unlock(&f->lock);
248 return 0;
250 err:
251 mutex_unlock(&f->lock);
253 list_for_each_entry_safe(spage, tmp, &new_pages, list)
254 free_pages(spage, 0);
256 return ret;