nmdb: Add LevelDB support
[nmdb.git] / nmdb / cache.c
blobc94bd64f6353a1806e56335e7d7fde1c8bc2d923
2 /* Generic cache layer.
3 * It's a hash table with cache-style properties, keeping a (non-precise) size
4 * and using a natural, per-chain LRU to do cleanups.
5 * Cleanups are performed in place, when cache_set() gets called.
6 */
8 #include <sys/types.h> /* for size_t */
9 #include <stdint.h> /* for [u]int*_t */
10 #include <stdlib.h> /* for malloc() */
11 #include <string.h> /* for memcpy()/memcmp() */
12 #include <stdio.h> /* snprintf() */
13 #include "hash.h" /* hash() */
14 #include "cache.h"
17 struct cache *cache_create(size_t numobjs, unsigned int flags)
19 size_t i, j;
20 struct cache *cd;
21 struct cache_chain *c;
23 cd = (struct cache *) malloc(sizeof(struct cache));
24 if (cd == NULL)
25 return NULL;
27 cd->flags = flags;
29 /* We calculate the hash size so we have 4 objects per bucket; 4 being
30 * an arbitrary number. It's long enough to make LRU useful, and small
31 * enough to make lookups fast. */
32 cd->numobjs = numobjs;
33 cd->hashlen = numobjs / CHAINLEN;
35 cd->table = (struct cache_chain *)
36 malloc(sizeof(struct cache_chain) * cd->hashlen);
37 if (cd->table == NULL) {
38 free(cd);
39 return NULL;
42 for (i = 0; i < cd->hashlen; i++) {
43 c = cd->table + i;
44 c->len = 0;
45 c->first = NULL;
46 c->last = NULL;
47 for (j = 0; j < CHAINLEN; j++) {
48 memset(&(c->entries[j]), 0,
49 sizeof(struct cache_entry));
51 /* mark them as unused */
52 c->entries[j].ksize = -1;
53 c->entries[j].vsize = -1;
57 return cd;
60 int cache_free(struct cache *cd)
62 size_t i;
63 struct cache_chain *c;
64 struct cache_entry *e, *n;
66 for (i = 0; i < cd->hashlen; i++) {
67 c = cd->table + i;
68 if (c->first == NULL)
69 continue;
71 e = c->first;
72 while (e != NULL) {
73 n = e->next;
74 free(e->key);
75 free(e->val);
76 e = n;
80 free(cd->table);
81 free(cd);
82 return 1;
85 static struct cache_entry *alloc_entry(struct cache_chain *c)
87 int i;
89 for (i = 0; i < CHAINLEN; i++) {
90 if (c->entries[i].ksize == -1)
91 return &(c->entries[i]);
94 return NULL;
97 static void free_entry(struct cache_entry *e)
99 e->key = NULL;
100 e->ksize = -1;
101 e->val = NULL;
102 e->vsize = -1;
106 /* Looks up the given key in the chain. Returns NULL if not found, or a
107 * pointer to the cache entry if it is. The chain can be empty. */
108 static struct cache_entry *find_in_chain(struct cache_chain *c,
109 const unsigned char *key, size_t ksize)
111 struct cache_entry *e;
113 for (e = c->first; e != NULL; e = e->next) {
114 if (ksize != e->ksize) {
115 continue;
117 if (memcmp(key, e->key, ksize) == 0) {
118 break;
122 /* e will be either the found chain or NULL */
123 return e;
127 /* Looks up the given key in the cache. Returns NULL if not found, or a
128 * pointer to the cache entry if it is. Useful to avoid doing the calculation
129 * in the open when the cache chain will not be needed. */
130 static struct cache_entry *find_in_cache(struct cache *cd,
131 const unsigned char *key, size_t ksize)
133 uint32_t h;
134 struct cache_chain *c;
136 h = hash(key, ksize) % cd->hashlen;
137 c = cd->table + h;
139 return find_in_chain(c, key, ksize);
143 /* Gets the matching value for the given key. Returns 0 if no match was
144 * found, or 1 otherwise. */
145 int cache_get(struct cache *cd, const unsigned char *key, size_t ksize,
146 unsigned char **val, size_t *vsize)
148 struct cache_entry *e;
150 e = find_in_cache(cd, key, ksize);
152 if (e == NULL) {
153 *val = NULL;
154 *vsize = 0;
155 return 0;
158 *val = e->val;
159 *vsize = e->vsize;
161 return 1;
164 /* Creates a new cache entry, with the given key and value */
165 static struct cache_entry *new_entry(struct cache_chain *c,
166 const unsigned char *key, size_t ksize,
167 const unsigned char *val, size_t vsize)
169 struct cache_entry *new;
171 new = alloc_entry(c);
172 if (new == NULL) {
173 return NULL;
176 new->ksize = ksize;
177 new->vsize = vsize;
179 new->key = malloc(ksize);
180 if (new->key == NULL) {
181 free(new);
182 return NULL;
184 memcpy(new->key, key, ksize);
186 new->val = malloc(vsize);
187 if (new->val == NULL) {
188 free(new->key);
189 free_entry(new);
190 return NULL;
192 memcpy(new->val, val, vsize);
193 new->prev = NULL;
194 new->next = NULL;
196 return new;
199 static int insert_in_full_chain(struct cache_chain *c,
200 const unsigned char *key, size_t ksize,
201 const unsigned char *val, size_t vsize)
203 /* To insert in a full chain, we evict the last entry and place the
204 * new one at the beginning.
206 * As an optimization, we avoid re-allocating the entry by reusing the
207 * last one, and when possible we reuse its key and value as well. */
208 struct cache_entry *e = c->last;
210 if (ksize == e->ksize) {
211 memcpy(e->key, key, ksize);
212 } else {
213 free(e->key);
214 e->key = malloc(ksize);
215 if (e->key == NULL) {
216 goto error;
218 e->ksize = ksize;
219 memcpy(e->key, key, ksize);
222 if (vsize == e->vsize) {
223 memcpy(e->val, val, vsize);
224 } else {
225 free(e->val);
226 e->val = malloc(vsize);
227 if (e->val == NULL) {
228 goto error;
230 e->vsize = vsize;
231 memcpy(e->val, val, vsize);
234 /* move the entry from the last to the first position */
235 c->last = e->prev;
236 c->last->next = NULL;
238 e->prev = NULL;
239 e->next = c->first;
241 c->first->prev = e;
242 c->first = e;
244 return 0;
246 error:
247 /* on errors, remove the entry just in case */
248 c->last = e->prev;
249 c->last->next = NULL;
250 free(e->key);
251 free(e->val);
252 free_entry(e);
254 return -1;
258 int cache_set(struct cache *cd, const unsigned char *key, size_t ksize,
259 const unsigned char *val, size_t vsize)
261 uint32_t h = 0;
262 struct cache_chain *c;
263 struct cache_entry *e, *new;
264 unsigned char *v;
266 h = hash(key, ksize) % cd->hashlen;
267 c = cd->table + h;
269 e = find_in_chain(c, key, ksize);
271 if (e == NULL) {
272 if (c->len == CHAINLEN) {
273 /* chain is full */
274 if (insert_in_full_chain(c, key, ksize,
275 val, vsize) != 0)
276 return -1;
277 } else {
278 new = new_entry(c, key, ksize, val, vsize);
279 if (new == NULL)
280 return -1;
282 if (c->len == 0) {
283 /* line is empty, just put it there */
284 c->first = new;
285 c->last = new;
286 c->len = 1;
287 } else if (c->len < CHAINLEN) {
288 /* slots are still available, put the entry
289 * first */
290 new->next = c->first;
291 c->first->prev = new;
292 c->first = new;
293 c->len += 1;
296 } else {
297 /* we've got a match, just replace the value in place */
298 if (vsize == e->vsize) {
299 memcpy(e->val, val, vsize);
300 } else {
301 v = malloc(vsize);
302 if (v == NULL)
303 return -1;
305 free(e->val);
306 e->val = v;
307 e->vsize = vsize;
308 memcpy(e->val, val, vsize);
311 /* promote the entry to the top of the list if necessary */
312 if (c->first != e) {
313 if (c->last == e)
314 c->last = e->prev;
316 e->prev->next = e->next;
317 if (e->next != NULL)
318 e->next->prev = e->prev;
319 e->prev = NULL;
320 e->next = c->first;
321 c->first->prev = e;
322 c->first = e;
326 return 0;
330 int cache_del(struct cache *cd, const unsigned char *key, size_t ksize)
333 int rv = 1;
334 uint32_t h = 0;
335 struct cache_chain *c;
336 struct cache_entry *e;
338 h = hash(key, ksize) % cd->hashlen;
339 c = cd->table + h;
341 e = find_in_chain(c, key, ksize);
343 if (e == NULL) {
344 rv = 0;
345 goto exit;
348 if (c->first == e) {
349 c->first = e->next;
350 if (e->next != NULL)
351 e->next->prev = NULL;
352 } else {
353 e->prev->next = e->next;
354 if (e->next != NULL)
355 e->next->prev = e->prev;
358 if (c->last == e) {
359 c->last = e->prev;
362 free(e->key);
363 free(e->val);
364 free_entry(e);
366 c->len -= 1;
368 exit:
369 return rv;
373 /* Performs a cache compare-and-swap.
374 * Returns -3 if there was an error, -2 if the key is not in the cache, -1 if
375 * the old value does not match, and 0 if the CAS was successful. */
376 int cache_cas(struct cache *cd, const unsigned char *key, size_t ksize,
377 const unsigned char *oldval, size_t ovsize,
378 const unsigned char *newval, size_t nvsize)
380 struct cache_entry *e;
381 unsigned char *buf;
383 e = find_in_cache(cd, key, ksize);
385 if (e == NULL)
386 return -2;
388 if (e->vsize != ovsize)
389 return -1;
391 if (memcmp(e->val, oldval, ovsize) != 0)
392 return -1;
394 if (ovsize == nvsize) {
395 /* since they have the same size, avoid the malloc() and just
396 * copy the new value */
397 memcpy(e->val, newval, nvsize);
398 } else {
399 buf = malloc(nvsize);
400 if (buf == NULL)
401 return -3;
403 memcpy(buf, newval, nvsize);
404 free(e->val);
405 e->val = buf;
406 e->vsize = nvsize;
409 return 0;
413 /* Increment the value associated with the given key by the given increment.
414 * The increment is a signed 64 bit value, and the value size must be >= 8
415 * bytes.
416 * Returns:
417 * 0 if the increment succeeded.
418 * -1 if the value was not in the cache.
419 * -2 if the value was not null terminated.
420 * -3 if there was a memory error.
422 * The new value will be set in the newval parameter if the increment was
423 * successful.
425 int cache_incr(struct cache *cd, const unsigned char *key, size_t ksize,
426 int64_t increment, int64_t *newval)
428 unsigned char *val;
429 int64_t intval;
430 size_t vsize;
431 struct cache_entry *e;
433 e = find_in_cache(cd, key, ksize);
435 if (e == NULL)
436 return -1;
438 val = e->val;
439 vsize = e->vsize;
441 /* The value must be a 0-terminated string, otherwise strtoll might
442 * cause a segmentation fault. Note that val should never be NULL, but
443 * it doesn't hurt to check just in case */
444 if (val == NULL || val[vsize - 1] != '\0')
445 return -2;
447 intval = strtoll((char *) val, NULL, 10);
448 intval = intval + increment;
450 /* The max value for an unsigned long long is 18446744073709551615,
451 * and strlen('18446744073709551615') = 20, so if the value is smaller
452 * than 24 (just in case) we create a new buffer. */
453 if (vsize < 24) {
454 unsigned char *nv = malloc(24);
455 if (nv == NULL)
456 return -3;
457 free(val);
458 e->val = val = nv;
459 e->vsize = vsize = 24;
462 snprintf((char *) val, vsize, "%23lld", (long long int) intval);
463 *newval = intval;
465 return 0;