2 * (C) Copyright 2007-2011 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
12 static LOCK_CLASS(spool_file_lc
);
14 struct spool_file
*alloc_spool()
18 f
= malloc(sizeof(struct spool_file
), ZONE_NORMAL
);
20 return ERR_PTR(-ENOMEM
);
22 mutex_init(&f
->lock
, &spool_file_lc
);
23 INIT_LIST_HEAD(&f
->list
);
32 void free_spool(struct spool_file
*f
)
37 u64
spool_nrecs(struct spool_file
*f
)
45 mutex_unlock(&f
->lock
);
51 * Returns 0 on success
53 * Note: the output may get truncated
55 int spool_grab_rec(struct spool_file
*f
, u8
*buf
, u16
*len
)
57 struct spool_page
*spage
;
58 struct spool_rec
*rec
;
60 u16 reclen
; /* length of record */
61 u32 copied
, processed
, rlen
, offset
, left
;
63 if (!*len
|| !buf
|| !f
)
66 BUG_ON(sizeof(struct spool_rec
) != 2);
67 BUG_ON(SPOOL_DATA_SIZE
% 2);
74 BUG_ON(f
->lrecoff
% 2);
81 /* figure out the record length */
82 spage
= list_first_entry(&f
->list
, struct spool_page
, list
);
83 rec
= (struct spool_rec
*) (spage
->data
+ f
->frecoff
);
86 rlen
= min(*len
, reclen
) + sizeof(struct spool_rec
);
89 offset
= f
->frecoff
+2;
90 left
= SPOOL_DATA_SIZE
- f
->frecoff
- 2;
92 while(reclen
+2 != processed
) {
95 list_del(&spage
->list
);
99 /* grab the next page */
100 spage
= list_first_entry(&f
->list
, struct spool_page
, list
);
102 left
= SPOOL_DATA_SIZE
;
105 if (rlen
!= copied
) {
106 /* memcpy into the user buffer */
109 tmp
= min(left
, rlen
-copied
);
111 memcpy(buf
+copied
-2, &spage
->data
[offset
], tmp
);
117 /* we already filled the user buffer, but the record
118 * is longer, so we need to take it all out
122 tmp
= min(left
, reclen
-processed
+2);
130 /* align to a multiple of 2 bytes */
134 if (offset
== SPOOL_DATA_SIZE
) {
136 spage
= list_first_entry(&f
->list
, struct spool_page
, list
);
138 list_del(&spage
->list
);
139 free_pages(spage
, 0);
152 mutex_unlock(&f
->lock
);
157 int spool_append_rec(struct spool_file
*f
, u8
*buf
, u16 len
)
159 struct list_head new_pages
;
160 struct spool_page
*spage
, *tmp
;
161 struct spool_rec
*rec
;
170 if (!f
|| !buf
|| !len
)
173 INIT_LIST_HEAD(&new_pages
);
176 BUG_ON(sizeof(struct spool_rec
) != 2);
177 BUG_ON(SPOOL_DATA_SIZE
% 2);
179 mutex_lock(&f
->lock
);
181 BUG_ON(f
->lrecoff
% 2);
183 left
= SPOOL_DATA_SIZE
- f
->lrecoff
;
184 rlen
= len
+ sizeof(struct spool_rec
);
188 /* try to fill up the last page */
189 if (f
->pages
&& (left
>= 2)) {
190 spage
= list_last_entry(&f
->list
, struct spool_page
, list
);
191 rec
= (struct spool_rec
*) (spage
->data
+ f
->lrecoff
);
197 memcpy(rec
->data
, buf
, min_t(u32
, len
, left
-2));
198 copied
+= min_t(u32
, len
, left
-2);
201 loff
= f
->lrecoff
+ copied
;
204 BUG_ON(rlen
< copied
);
206 /* we need to allocate space */
207 while (rlen
!= copied
) {
208 page
= alloc_pages(0, ZONE_NORMAL
);
214 spage
= page_to_addr(page
);
216 INIT_LIST_HEAD(&spage
->list
);
217 list_add_tail(&spage
->list
, &new_pages
);
220 rec
= (struct spool_rec
*) spage
->data
;
221 left
= SPOOL_DATA_SIZE
;
224 /* nothing was copied */
226 memcpy(rec
->data
, buf
, min_t(u32
, len
, left
-2));
227 loff
= 2 + min_t(u32
, len
, left
-2);
229 /* the length and maybe some data were copied */
230 memcpy(spage
->data
, buf
-copied
+2, min(rlen
-copied
, left
));
231 loff
= min(rlen
-copied
, left
);
237 list_splice_tail(&new_pages
, &f
->list
);
239 /* 2-byte align the lrecoff */
247 mutex_unlock(&f
->lock
);
251 mutex_unlock(&f
->lock
);
253 list_for_each_entry_safe(spage
, tmp
, &new_pages
, list
)
254 free_pages(spage
, 0);