2 /* Generic cache layer.
3 * It's a hash table with cache-style properties, keeping a (non-precise) size
4 * and using a natural, per-chain LRU to do cleanups.
5 * Cleanups are performed in place, when cache_set() gets called.
8 #include <sys/types.h> /* for size_t */
9 #include <stdint.h> /* for [u]int*_t */
10 #include <stdlib.h> /* for malloc() */
11 #include <string.h> /* for memcpy()/memcmp() */
12 #include <stdio.h> /* snprintf() */
13 #include "hash.h" /* hash() */
17 struct cache
*cache_create(size_t numobjs
, unsigned int flags
)
21 struct cache_chain
*c
;
23 cd
= (struct cache
*) malloc(sizeof(struct cache
));
29 /* We calculate the hash size so we have 4 objects per bucket; 4 being
30 * an arbitrary number. It's long enough to make LRU useful, and small
31 * enough to make lookups fast. */
32 cd
->numobjs
= numobjs
;
33 cd
->hashlen
= numobjs
/ CHAINLEN
;
35 cd
->table
= (struct cache_chain
*)
36 malloc(sizeof(struct cache_chain
) * cd
->hashlen
);
37 if (cd
->table
== NULL
) {
42 for (i
= 0; i
< cd
->hashlen
; i
++) {
47 for (j
= 0; j
< CHAINLEN
; j
++) {
48 memset(&(c
->entries
[j
]), 0,
49 sizeof(struct cache_entry
));
51 /* mark them as unused */
52 c
->entries
[j
].ksize
= -1;
53 c
->entries
[j
].vsize
= -1;
60 int cache_free(struct cache
*cd
)
63 struct cache_chain
*c
;
64 struct cache_entry
*e
, *n
;
66 for (i
= 0; i
< cd
->hashlen
; i
++) {
85 static struct cache_entry
*alloc_entry(struct cache_chain
*c
)
89 for (i
= 0; i
< CHAINLEN
; i
++) {
90 if (c
->entries
[i
].ksize
== -1)
91 return &(c
->entries
[i
]);
97 static void free_entry(struct cache_entry
*e
)
106 /* Looks up the given key in the chain. Returns NULL if not found, or a
107 * pointer to the cache entry if it is. The chain can be empty. */
108 static struct cache_entry
*find_in_chain(struct cache_chain
*c
,
109 const unsigned char *key
, size_t ksize
)
111 struct cache_entry
*e
;
113 for (e
= c
->first
; e
!= NULL
; e
= e
->next
) {
114 if (ksize
!= e
->ksize
) {
117 if (memcmp(key
, e
->key
, ksize
) == 0) {
122 /* e will be either the found chain or NULL */
127 /* Looks up the given key in the cache. Returns NULL if not found, or a
128 * pointer to the cache entry if it is. Useful to avoid doing the calculation
129 * in the open when the cache chain will not be needed. */
130 static struct cache_entry
*find_in_cache(struct cache
*cd
,
131 const unsigned char *key
, size_t ksize
)
134 struct cache_chain
*c
;
136 h
= hash(key
, ksize
) % cd
->hashlen
;
139 return find_in_chain(c
, key
, ksize
);
143 /* Gets the matching value for the given key. Returns 0 if no match was
144 * found, or 1 otherwise. */
145 int cache_get(struct cache
*cd
, const unsigned char *key
, size_t ksize
,
146 unsigned char **val
, size_t *vsize
)
148 struct cache_entry
*e
;
150 e
= find_in_cache(cd
, key
, ksize
);
164 /* Creates a new cache entry, with the given key and value */
165 static struct cache_entry
*new_entry(struct cache_chain
*c
,
166 const unsigned char *key
, size_t ksize
,
167 const unsigned char *val
, size_t vsize
)
169 struct cache_entry
*new;
171 new = alloc_entry(c
);
179 new->key
= malloc(ksize
);
180 if (new->key
== NULL
) {
184 memcpy(new->key
, key
, ksize
);
186 new->val
= malloc(vsize
);
187 if (new->val
== NULL
) {
192 memcpy(new->val
, val
, vsize
);
199 static int insert_in_full_chain(struct cache_chain
*c
,
200 const unsigned char *key
, size_t ksize
,
201 const unsigned char *val
, size_t vsize
)
203 /* To insert in a full chain, we evict the last entry and place the
204 * new one at the beginning.
206 * As an optimization, we avoid re-allocating the entry by reusing the
207 * last one, and when possible we reuse its key and value as well. */
208 struct cache_entry
*e
= c
->last
;
210 if (ksize
== e
->ksize
) {
211 memcpy(e
->key
, key
, ksize
);
214 e
->key
= malloc(ksize
);
215 if (e
->key
== NULL
) {
219 memcpy(e
->key
, key
, ksize
);
222 if (vsize
== e
->vsize
) {
223 memcpy(e
->val
, val
, vsize
);
226 e
->val
= malloc(vsize
);
227 if (e
->val
== NULL
) {
231 memcpy(e
->val
, val
, vsize
);
234 /* move the entry from the last to the first position */
236 c
->last
->next
= NULL
;
247 /* on errors, remove the entry just in case */
249 c
->last
->next
= NULL
;
258 int cache_set(struct cache
*cd
, const unsigned char *key
, size_t ksize
,
259 const unsigned char *val
, size_t vsize
)
262 struct cache_chain
*c
;
263 struct cache_entry
*e
, *new;
266 h
= hash(key
, ksize
) % cd
->hashlen
;
269 e
= find_in_chain(c
, key
, ksize
);
272 if (c
->len
== CHAINLEN
) {
274 if (insert_in_full_chain(c
, key
, ksize
,
278 new = new_entry(c
, key
, ksize
, val
, vsize
);
283 /* line is empty, just put it there */
287 } else if (c
->len
< CHAINLEN
) {
288 /* slots are still available, put the entry
290 new->next
= c
->first
;
291 c
->first
->prev
= new;
297 /* we've got a match, just replace the value in place */
298 if (vsize
== e
->vsize
) {
299 memcpy(e
->val
, val
, vsize
);
308 memcpy(e
->val
, val
, vsize
);
311 /* promote the entry to the top of the list if necessary */
316 e
->prev
->next
= e
->next
;
318 e
->next
->prev
= e
->prev
;
330 int cache_del(struct cache
*cd
, const unsigned char *key
, size_t ksize
)
335 struct cache_chain
*c
;
336 struct cache_entry
*e
;
338 h
= hash(key
, ksize
) % cd
->hashlen
;
341 e
= find_in_chain(c
, key
, ksize
);
351 e
->next
->prev
= NULL
;
353 e
->prev
->next
= e
->next
;
355 e
->next
->prev
= e
->prev
;
373 /* Performs a cache compare-and-swap.
374 * Returns -3 if there was an error, -2 if the key is not in the cache, -1 if
375 * the old value does not match, and 0 if the CAS was successful. */
376 int cache_cas(struct cache
*cd
, const unsigned char *key
, size_t ksize
,
377 const unsigned char *oldval
, size_t ovsize
,
378 const unsigned char *newval
, size_t nvsize
)
380 struct cache_entry
*e
;
383 e
= find_in_cache(cd
, key
, ksize
);
388 if (e
->vsize
!= ovsize
)
391 if (memcmp(e
->val
, oldval
, ovsize
) != 0)
394 if (ovsize
== nvsize
) {
395 /* since they have the same size, avoid the malloc() and just
396 * copy the new value */
397 memcpy(e
->val
, newval
, nvsize
);
399 buf
= malloc(nvsize
);
403 memcpy(buf
, newval
, nvsize
);
413 /* Increment the value associated with the given key by the given increment.
414 * The increment is a signed 64 bit value, and the value size must be >= 8
417 * 0 if the increment succeeded.
418 * -1 if the value was not in the cache.
419 * -2 if the value was not null terminated.
420 * -3 if there was a memory error.
422 * The new value will be set in the newval parameter if the increment was
425 int cache_incr(struct cache
*cd
, const unsigned char *key
, size_t ksize
,
426 int64_t increment
, int64_t *newval
)
431 struct cache_entry
*e
;
433 e
= find_in_cache(cd
, key
, ksize
);
441 /* The value must be a 0-terminated string, otherwise strtoll might
442 * cause a segmentation fault. Note that val should never be NULL, but
443 * it doesn't hurt to check just in case */
444 if (val
== NULL
|| val
[vsize
- 1] != '\0')
447 intval
= strtoll((char *) val
, NULL
, 10);
448 intval
= intval
+ increment
;
450 /* The max value for an unsigned long long is 18446744073709551615,
451 * and strlen('18446744073709551615') = 20, so if the value is smaller
452 * than 24 (just in case) we create a new buffer. */
454 unsigned char *nv
= malloc(24);
459 e
->vsize
= vsize
= 24;
462 snprintf((char *) val
, vsize
, "%23lld", (long long int) intval
);