net: Introduce ipv4_addr_hash and use it for tcp metrics
[linux/fpc-iii.git] / lib / lru_cache.c
blob028f5d996eef64aa960546010e8321b1b0c5a369
1 /*
2 lru_cache.c
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/bitops.h>
28 #include <linux/slab.h>
29 #include <linux/string.h> /* for memset */
30 #include <linux/seq_file.h> /* for seq_printf */
31 #include <linux/lru_cache.h>
33 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
34 "Lars Ellenberg <lars@linbit.com>");
35 MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
36 MODULE_LICENSE("GPL");
38 /* this is developers aid only.
39 * it catches concurrent access (lack of locking on the users part) */
40 #define PARANOIA_ENTRY() do { \
41 BUG_ON(!lc); \
42 BUG_ON(!lc->nr_elements); \
43 BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
44 } while (0)
46 #define RETURN(x...) do { \
47 clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
48 return x ; } while (0)
50 /* BUG() if e is not one of the elements tracked by lc */
51 #define PARANOIA_LC_ELEMENT(lc, e) do { \
52 struct lru_cache *lc_ = (lc); \
53 struct lc_element *e_ = (e); \
54 unsigned i = e_->lc_index; \
55 BUG_ON(i >= lc_->nr_elements); \
56 BUG_ON(lc_->lc_element[i] != e_); } while (0)
59 /* We need to atomically
60 * - try to grab the lock (set LC_LOCKED)
61 * - only if there is no pending transaction
62 * (neither LC_DIRTY nor LC_STARVING is set)
63 * Because of PARANOIA_ENTRY() above abusing lc->flags as well,
64 * it is not sufficient to just say
65 * return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
67 int lc_try_lock(struct lru_cache *lc)
69 unsigned long val;
70 do {
71 val = cmpxchg(&lc->flags, 0, LC_LOCKED);
72 } while (unlikely (val == LC_PARANOIA));
73 /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
74 return 0 == val;
75 #if 0
76 /* Alternative approach, spin in case someone enters or leaves a
77 * PARANOIA_ENTRY()/RETURN() section. */
78 unsigned long old, new, val;
79 do {
80 old = lc->flags & LC_PARANOIA;
81 new = old | LC_LOCKED;
82 val = cmpxchg(&lc->flags, old, new);
83 } while (unlikely (val == (old ^ LC_PARANOIA)));
84 return old == val;
85 #endif
88 /**
89 * lc_create - prepares to track objects in an active set
90 * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
91 * @max_pending_changes: maximum changes to accumulate until a transaction is required
92 * @e_count: number of elements allowed to be active simultaneously
93 * @e_size: size of the tracked objects
94 * @e_off: offset to the &struct lc_element member in a tracked object
96 * Returns a pointer to a newly initialized struct lru_cache on success,
97 * or NULL on (allocation) failure.
99 struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
100 unsigned max_pending_changes,
101 unsigned e_count, size_t e_size, size_t e_off)
103 struct hlist_head *slot = NULL;
104 struct lc_element **element = NULL;
105 struct lru_cache *lc;
106 struct lc_element *e;
107 unsigned cache_obj_size = kmem_cache_size(cache);
108 unsigned i;
110 WARN_ON(cache_obj_size < e_size);
111 if (cache_obj_size < e_size)
112 return NULL;
114 /* e_count too big; would probably fail the allocation below anyways.
115 * for typical use cases, e_count should be few thousand at most. */
116 if (e_count > LC_MAX_ACTIVE)
117 return NULL;
119 slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
120 if (!slot)
121 goto out_fail;
122 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
123 if (!element)
124 goto out_fail;
126 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
127 if (!lc)
128 goto out_fail;
130 INIT_LIST_HEAD(&lc->in_use);
131 INIT_LIST_HEAD(&lc->lru);
132 INIT_LIST_HEAD(&lc->free);
133 INIT_LIST_HEAD(&lc->to_be_changed);
135 lc->name = name;
136 lc->element_size = e_size;
137 lc->element_off = e_off;
138 lc->nr_elements = e_count;
139 lc->max_pending_changes = max_pending_changes;
140 lc->lc_cache = cache;
141 lc->lc_element = element;
142 lc->lc_slot = slot;
144 /* preallocate all objects */
145 for (i = 0; i < e_count; i++) {
146 void *p = kmem_cache_alloc(cache, GFP_KERNEL);
147 if (!p)
148 break;
149 memset(p, 0, lc->element_size);
150 e = p + e_off;
151 e->lc_index = i;
152 e->lc_number = LC_FREE;
153 e->lc_new_number = LC_FREE;
154 list_add(&e->list, &lc->free);
155 element[i] = e;
157 if (i == e_count)
158 return lc;
160 /* else: could not allocate all elements, give up */
161 for (i--; i; i--) {
162 void *p = element[i];
163 kmem_cache_free(cache, p - e_off);
165 kfree(lc);
166 out_fail:
167 kfree(element);
168 kfree(slot);
169 return NULL;
172 static void lc_free_by_index(struct lru_cache *lc, unsigned i)
174 void *p = lc->lc_element[i];
175 WARN_ON(!p);
176 if (p) {
177 p -= lc->element_off;
178 kmem_cache_free(lc->lc_cache, p);
183 * lc_destroy - frees memory allocated by lc_create()
184 * @lc: the lru cache to destroy
186 void lc_destroy(struct lru_cache *lc)
188 unsigned i;
189 if (!lc)
190 return;
191 for (i = 0; i < lc->nr_elements; i++)
192 lc_free_by_index(lc, i);
193 kfree(lc->lc_element);
194 kfree(lc->lc_slot);
195 kfree(lc);
199 * lc_reset - does a full reset for @lc and the hash table slots.
200 * @lc: the lru cache to operate on
202 * It is roughly the equivalent of re-allocating a fresh lru_cache object,
203 * basically a short cut to lc_destroy(lc); lc = lc_create(...);
205 void lc_reset(struct lru_cache *lc)
207 unsigned i;
209 INIT_LIST_HEAD(&lc->in_use);
210 INIT_LIST_HEAD(&lc->lru);
211 INIT_LIST_HEAD(&lc->free);
212 INIT_LIST_HEAD(&lc->to_be_changed);
213 lc->used = 0;
214 lc->hits = 0;
215 lc->misses = 0;
216 lc->starving = 0;
217 lc->locked = 0;
218 lc->changed = 0;
219 lc->pending_changes = 0;
220 lc->flags = 0;
221 memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
223 for (i = 0; i < lc->nr_elements; i++) {
224 struct lc_element *e = lc->lc_element[i];
225 void *p = e;
226 p -= lc->element_off;
227 memset(p, 0, lc->element_size);
228 /* re-init it */
229 e->lc_index = i;
230 e->lc_number = LC_FREE;
231 e->lc_new_number = LC_FREE;
232 list_add(&e->list, &lc->free);
237 * lc_seq_printf_stats - print stats about @lc into @seq
238 * @seq: the seq_file to print into
239 * @lc: the lru cache to print statistics of
241 size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
243 /* NOTE:
244 * total calls to lc_get are
245 * (starving + hits + misses)
246 * misses include "locked" count (update from an other thread in
247 * progress) and "changed", when this in fact lead to an successful
248 * update of the cache.
250 seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
251 lc->name, lc->used, lc->nr_elements,
252 lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
254 return 0;
257 static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
259 return lc->lc_slot + (enr % lc->nr_elements);
263 static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
264 bool include_changing)
266 struct lc_element *e;
268 BUG_ON(!lc);
269 BUG_ON(!lc->nr_elements);
270 hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
271 /* "about to be changed" elements, pending transaction commit,
272 * are hashed by their "new number". "Normal" elements have
273 * lc_number == lc_new_number. */
274 if (e->lc_new_number != enr)
275 continue;
276 if (e->lc_new_number == e->lc_number || include_changing)
277 return e;
278 break;
280 return NULL;
284 * lc_find - find element by label, if present in the hash table
285 * @lc: The lru_cache object
286 * @enr: element number
288 * Returns the pointer to an element, if the element with the requested
289 * "label" or element number is present in the hash table,
290 * or NULL if not found. Does not change the refcnt.
291 * Ignores elements that are "about to be used", i.e. not yet in the active
292 * set, but still pending transaction commit.
294 struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
296 return __lc_find(lc, enr, 0);
300 * lc_is_used - find element by label
301 * @lc: The lru_cache object
302 * @enr: element number
304 * Returns true, if the element with the requested "label" or element number is
305 * present in the hash table, and is used (refcnt > 0).
306 * Also finds elements that are not _currently_ used but only "about to be
307 * used", i.e. on the "to_be_changed" list, pending transaction commit.
309 bool lc_is_used(struct lru_cache *lc, unsigned int enr)
311 struct lc_element *e = __lc_find(lc, enr, 1);
312 return e && e->refcnt;
316 * lc_del - removes an element from the cache
317 * @lc: The lru_cache object
318 * @e: The element to remove
320 * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
321 * sets @e->enr to %LC_FREE.
323 void lc_del(struct lru_cache *lc, struct lc_element *e)
325 PARANOIA_ENTRY();
326 PARANOIA_LC_ELEMENT(lc, e);
327 BUG_ON(e->refcnt);
329 e->lc_number = e->lc_new_number = LC_FREE;
330 hlist_del_init(&e->colision);
331 list_move(&e->list, &lc->free);
332 RETURN();
335 static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
337 struct list_head *n;
338 struct lc_element *e;
340 if (!list_empty(&lc->free))
341 n = lc->free.next;
342 else if (!list_empty(&lc->lru))
343 n = lc->lru.prev;
344 else
345 return NULL;
347 e = list_entry(n, struct lc_element, list);
348 PARANOIA_LC_ELEMENT(lc, e);
350 e->lc_new_number = new_number;
351 if (!hlist_unhashed(&e->colision))
352 __hlist_del(&e->colision);
353 hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
354 list_move(&e->list, &lc->to_be_changed);
356 return e;
359 static int lc_unused_element_available(struct lru_cache *lc)
361 if (!list_empty(&lc->free))
362 return 1; /* something on the free list */
363 if (!list_empty(&lc->lru))
364 return 1; /* something to evict */
366 return 0;
369 /* used as internal flags to __lc_get */
370 enum {
371 LC_GET_MAY_CHANGE = 1,
372 LC_GET_MAY_USE_UNCOMMITTED = 2,
375 static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
377 struct lc_element *e;
379 PARANOIA_ENTRY();
380 if (lc->flags & LC_STARVING) {
381 ++lc->starving;
382 RETURN(NULL);
385 e = __lc_find(lc, enr, 1);
386 /* if lc_new_number != lc_number,
387 * this enr is currently being pulled in already,
388 * and will be available once the pending transaction
389 * has been committed. */
390 if (e) {
391 if (e->lc_new_number != e->lc_number) {
392 /* It has been found above, but on the "to_be_changed"
393 * list, not yet committed. Don't pull it in twice,
394 * wait for the transaction, then try again...
396 if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
397 RETURN(NULL);
398 /* ... unless the caller is aware of the implications,
399 * probably preparing a cumulative transaction. */
400 ++e->refcnt;
401 ++lc->hits;
402 RETURN(e);
404 /* else: lc_new_number == lc_number; a real hit. */
405 ++lc->hits;
406 if (e->refcnt++ == 0)
407 lc->used++;
408 list_move(&e->list, &lc->in_use); /* Not evictable... */
409 RETURN(e);
411 /* e == NULL */
413 ++lc->misses;
414 if (!(flags & LC_GET_MAY_CHANGE))
415 RETURN(NULL);
417 /* To avoid races with lc_try_lock(), first, mark us dirty
418 * (using test_and_set_bit, as it implies memory barriers), ... */
419 test_and_set_bit(__LC_DIRTY, &lc->flags);
421 /* ... only then check if it is locked anyways. If lc_unlock clears
422 * the dirty bit again, that's not a problem, we will come here again.
424 if (test_bit(__LC_LOCKED, &lc->flags)) {
425 ++lc->locked;
426 RETURN(NULL);
429 /* In case there is nothing available and we can not kick out
430 * the LRU element, we have to wait ...
432 if (!lc_unused_element_available(lc)) {
433 __set_bit(__LC_STARVING, &lc->flags);
434 RETURN(NULL);
437 /* It was not present in the active set. We are going to recycle an
438 * unused (or even "free") element, but we won't accumulate more than
439 * max_pending_changes changes. */
440 if (lc->pending_changes >= lc->max_pending_changes)
441 RETURN(NULL);
443 e = lc_prepare_for_change(lc, enr);
444 BUG_ON(!e);
446 clear_bit(__LC_STARVING, &lc->flags);
447 BUG_ON(++e->refcnt != 1);
448 lc->used++;
449 lc->pending_changes++;
451 RETURN(e);
455 * lc_get - get element by label, maybe change the active set
456 * @lc: the lru cache to operate on
457 * @enr: the label to look up
459 * Finds an element in the cache, increases its usage count,
460 * "touches" and returns it.
462 * In case the requested number is not present, it needs to be added to the
463 * cache. Therefore it is possible that an other element becomes evicted from
464 * the cache. In either case, the user is notified so he is able to e.g. keep
465 * a persistent log of the cache changes, and therefore the objects in use.
467 * Return values:
468 * NULL
469 * The cache was marked %LC_STARVING,
470 * or the requested label was not in the active set
471 * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
472 * Or no unused or free element could be recycled (@lc will be marked as
473 * %LC_STARVING, blocking further lc_get() operations).
475 * pointer to the element with the REQUESTED element number.
476 * In this case, it can be used right away
478 * pointer to an UNUSED element with some different element number,
479 * where that different number may also be %LC_FREE.
481 * In this case, the cache is marked %LC_DIRTY,
482 * so lc_try_lock() will no longer succeed.
483 * The returned element pointer is moved to the "to_be_changed" list,
484 * and registered with the new element number on the hash collision chains,
485 * so it is possible to pick it up from lc_is_used().
486 * Up to "max_pending_changes" (see lc_create()) can be accumulated.
487 * The user now should do whatever housekeeping is necessary,
488 * typically serialize on lc_try_lock_for_transaction(), then call
489 * lc_committed(lc) and lc_unlock(), to finish the change.
491 * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
492 * any cache set change.
494 struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
496 return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
500 * lc_get_cumulative - like lc_get; also finds to-be-changed elements
501 * @lc: the lru cache to operate on
502 * @enr: the label to look up
504 * Unlike lc_get this also returns the element for @enr, if it is belonging to
505 * a pending transaction, so the return values are like for lc_get(),
506 * plus:
508 * pointer to an element already on the "to_be_changed" list.
509 * In this case, the cache was already marked %LC_DIRTY.
511 * Caller needs to make sure that the pending transaction is completed,
512 * before proceeding to actually use this element.
514 struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
516 return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
520 * lc_try_get - get element by label, if present; do not change the active set
521 * @lc: the lru cache to operate on
522 * @enr: the label to look up
524 * Finds an element in the cache, increases its usage count,
525 * "touches" and returns it.
527 * Return values:
528 * NULL
529 * The cache was marked %LC_STARVING,
530 * or the requested label was not in the active set
532 * pointer to the element with the REQUESTED element number.
533 * In this case, it can be used right away
535 struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
537 return __lc_get(lc, enr, 0);
541 * lc_committed - tell @lc that pending changes have been recorded
542 * @lc: the lru cache to operate on
544 * User is expected to serialize on explicit lc_try_lock_for_transaction()
545 * before the transaction is started, and later needs to lc_unlock() explicitly
546 * as well.
548 void lc_committed(struct lru_cache *lc)
550 struct lc_element *e, *tmp;
552 PARANOIA_ENTRY();
553 list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
554 /* count number of changes, not number of transactions */
555 ++lc->changed;
556 e->lc_number = e->lc_new_number;
557 list_move(&e->list, &lc->in_use);
559 lc->pending_changes = 0;
560 RETURN();
565 * lc_put - give up refcnt of @e
566 * @lc: the lru cache to operate on
567 * @e: the element to put
569 * If refcnt reaches zero, the element is moved to the lru list,
570 * and a %LC_STARVING (if set) is cleared.
571 * Returns the new (post-decrement) refcnt.
573 unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
575 PARANOIA_ENTRY();
576 PARANOIA_LC_ELEMENT(lc, e);
577 BUG_ON(e->refcnt == 0);
578 BUG_ON(e->lc_number != e->lc_new_number);
579 if (--e->refcnt == 0) {
580 /* move it to the front of LRU. */
581 list_move(&e->list, &lc->lru);
582 lc->used--;
583 clear_bit_unlock(__LC_STARVING, &lc->flags);
585 RETURN(e->refcnt);
589 * lc_element_by_index
590 * @lc: the lru cache to operate on
591 * @i: the index of the element to return
593 struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
595 BUG_ON(i >= lc->nr_elements);
596 BUG_ON(lc->lc_element[i] == NULL);
597 BUG_ON(lc->lc_element[i]->lc_index != i);
598 return lc->lc_element[i];
602 * lc_index_of
603 * @lc: the lru cache to operate on
604 * @e: the element to query for its index position in lc->element
606 unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
608 PARANOIA_LC_ELEMENT(lc, e);
609 return e->lc_index;
613 * lc_set - associate index with label
614 * @lc: the lru cache to operate on
615 * @enr: the label to set
616 * @index: the element index to associate label with.
618 * Used to initialize the active set to some previously recorded state.
620 void lc_set(struct lru_cache *lc, unsigned int enr, int index)
622 struct lc_element *e;
623 struct list_head *lh;
625 if (index < 0 || index >= lc->nr_elements)
626 return;
628 e = lc_element_by_index(lc, index);
629 BUG_ON(e->lc_number != e->lc_new_number);
630 BUG_ON(e->refcnt != 0);
632 e->lc_number = e->lc_new_number = enr;
633 hlist_del_init(&e->colision);
634 if (enr == LC_FREE)
635 lh = &lc->free;
636 else {
637 hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
638 lh = &lc->lru;
640 list_move(&e->list, lh);
644 * lc_dump - Dump a complete LRU cache to seq in textual form.
645 * @lc: the lru cache to operate on
646 * @seq: the &struct seq_file pointer to seq_printf into
647 * @utext: user supplied additional "heading" or other info
648 * @detail: function pointer the user may provide to dump further details
649 * of the object the lc_element is embedded in. May be NULL.
650 * Note: a leading space ' ' and trailing newline '\n' is implied.
652 void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
653 void (*detail) (struct seq_file *, struct lc_element *))
655 unsigned int nr_elements = lc->nr_elements;
656 struct lc_element *e;
657 int i;
659 seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
660 for (i = 0; i < nr_elements; i++) {
661 e = lc_element_by_index(lc, i);
662 if (e->lc_number != e->lc_new_number)
663 seq_printf(seq, "\t%5d: %6d %8d %6d ",
664 i, e->lc_number, e->lc_new_number, e->refcnt);
665 else
666 seq_printf(seq, "\t%5d: %6d %-8s %6d ",
667 i, e->lc_number, "-\"-", e->refcnt);
668 if (detail)
669 detail(seq, e);
670 seq_putc(seq, '\n');
674 EXPORT_SYMBOL(lc_create);
675 EXPORT_SYMBOL(lc_reset);
676 EXPORT_SYMBOL(lc_destroy);
677 EXPORT_SYMBOL(lc_set);
678 EXPORT_SYMBOL(lc_del);
679 EXPORT_SYMBOL(lc_try_get);
680 EXPORT_SYMBOL(lc_find);
681 EXPORT_SYMBOL(lc_get);
682 EXPORT_SYMBOL(lc_put);
683 EXPORT_SYMBOL(lc_committed);
684 EXPORT_SYMBOL(lc_element_by_index);
685 EXPORT_SYMBOL(lc_index_of);
686 EXPORT_SYMBOL(lc_seq_printf_stats);
687 EXPORT_SYMBOL(lc_seq_dump_details);
688 EXPORT_SYMBOL(lc_try_lock);
689 EXPORT_SYMBOL(lc_is_used);
690 EXPORT_SYMBOL(lc_get_cumulative);