4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 * Released under terms in GPL version 2. See COPYING.
13 #include <linux/types.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/string_helpers.h>
24 #include <linux/uaccess.h>
25 #include <linux/poll.h>
26 #include <linux/seq_file.h>
27 #include <linux/proc_fs.h>
28 #include <linux/net.h>
29 #include <linux/workqueue.h>
30 #include <linux/mutex.h>
31 #include <linux/pagemap.h>
32 #include <asm/ioctls.h>
33 #include <linux/sunrpc/types.h>
34 #include <linux/sunrpc/cache.h>
35 #include <linux/sunrpc/stats.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
39 #define RPCDBG_FACILITY RPCDBG_CACHE
41 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
42 static void cache_revisit_request(struct cache_head
*item
);
44 static void cache_init(struct cache_head
*h
, struct cache_detail
*detail
)
46 time_t now
= seconds_since_boot();
47 INIT_HLIST_NODE(&h
->cache_list
);
50 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
51 if (now
<= detail
->flush_time
)
52 /* ensure it isn't already expired */
53 now
= detail
->flush_time
+ 1;
54 h
->last_refresh
= now
;
57 struct cache_head
*sunrpc_cache_lookup(struct cache_detail
*detail
,
58 struct cache_head
*key
, int hash
)
60 struct cache_head
*new = NULL
, *freeme
= NULL
, *tmp
= NULL
;
61 struct hlist_head
*head
;
63 head
= &detail
->hash_table
[hash
];
65 read_lock(&detail
->hash_lock
);
67 hlist_for_each_entry(tmp
, head
, cache_list
) {
68 if (detail
->match(tmp
, key
)) {
69 if (cache_is_expired(detail
, tmp
))
70 /* This entry is expired, we will discard it. */
73 read_unlock(&detail
->hash_lock
);
77 read_unlock(&detail
->hash_lock
);
78 /* Didn't find anything, insert an empty entry */
80 new = detail
->alloc();
83 /* must fully initialise 'new', else
84 * we might get lose if we need to
87 cache_init(new, detail
);
88 detail
->init(new, key
);
90 write_lock(&detail
->hash_lock
);
92 /* check if entry appeared while we slept */
93 hlist_for_each_entry(tmp
, head
, cache_list
) {
94 if (detail
->match(tmp
, key
)) {
95 if (cache_is_expired(detail
, tmp
)) {
96 hlist_del_init(&tmp
->cache_list
);
102 write_unlock(&detail
->hash_lock
);
103 cache_put(new, detail
);
108 hlist_add_head(&new->cache_list
, head
);
111 write_unlock(&detail
->hash_lock
);
114 cache_put(freeme
, detail
);
117 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup
);
120 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
122 static void cache_fresh_locked(struct cache_head
*head
, time_t expiry
,
123 struct cache_detail
*detail
)
125 time_t now
= seconds_since_boot();
126 if (now
<= detail
->flush_time
)
127 /* ensure it isn't immediately treated as expired */
128 now
= detail
->flush_time
+ 1;
129 head
->expiry_time
= expiry
;
130 head
->last_refresh
= now
;
131 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
132 set_bit(CACHE_VALID
, &head
->flags
);
135 static void cache_fresh_unlocked(struct cache_head
*head
,
136 struct cache_detail
*detail
)
138 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
139 cache_revisit_request(head
);
140 cache_dequeue(detail
, head
);
144 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
145 struct cache_head
*new, struct cache_head
*old
, int hash
)
147 /* The 'old' entry is to be replaced by 'new'.
148 * If 'old' is not VALID, we update it directly,
149 * otherwise we need to replace it
151 struct cache_head
*tmp
;
153 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
154 write_lock(&detail
->hash_lock
);
155 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
156 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
157 set_bit(CACHE_NEGATIVE
, &old
->flags
);
159 detail
->update(old
, new);
160 cache_fresh_locked(old
, new->expiry_time
, detail
);
161 write_unlock(&detail
->hash_lock
);
162 cache_fresh_unlocked(old
, detail
);
165 write_unlock(&detail
->hash_lock
);
167 /* We need to insert a new entry */
168 tmp
= detail
->alloc();
170 cache_put(old
, detail
);
173 cache_init(tmp
, detail
);
174 detail
->init(tmp
, old
);
176 write_lock(&detail
->hash_lock
);
177 if (test_bit(CACHE_NEGATIVE
, &new->flags
))
178 set_bit(CACHE_NEGATIVE
, &tmp
->flags
);
180 detail
->update(tmp
, new);
181 hlist_add_head(&tmp
->cache_list
, &detail
->hash_table
[hash
]);
184 cache_fresh_locked(tmp
, new->expiry_time
, detail
);
185 cache_fresh_locked(old
, 0, detail
);
186 write_unlock(&detail
->hash_lock
);
187 cache_fresh_unlocked(tmp
, detail
);
188 cache_fresh_unlocked(old
, detail
);
189 cache_put(old
, detail
);
192 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
194 static int cache_make_upcall(struct cache_detail
*cd
, struct cache_head
*h
)
196 if (cd
->cache_upcall
)
197 return cd
->cache_upcall(cd
, h
);
198 return sunrpc_cache_pipe_upcall(cd
, h
);
201 static inline int cache_is_valid(struct cache_head
*h
)
203 if (!test_bit(CACHE_VALID
, &h
->flags
))
207 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
211 * In combination with write barrier in
212 * sunrpc_cache_update, ensures that anyone
213 * using the cache entry after this sees the
222 static int try_to_negate_entry(struct cache_detail
*detail
, struct cache_head
*h
)
226 write_lock(&detail
->hash_lock
);
227 rv
= cache_is_valid(h
);
229 set_bit(CACHE_NEGATIVE
, &h
->flags
);
230 cache_fresh_locked(h
, seconds_since_boot()+CACHE_NEW_EXPIRY
,
234 write_unlock(&detail
->hash_lock
);
235 cache_fresh_unlocked(h
, detail
);
240 * This is the generic cache management routine for all
241 * the authentication caches.
242 * It checks the currency of a cache item and will (later)
243 * initiate an upcall to fill it if needed.
246 * Returns 0 if the cache_head can be used, or cache_puts it and returns
247 * -EAGAIN if upcall is pending and request has been queued
248 * -ETIMEDOUT if upcall failed or request could not be queue or
249 * upcall completed but item is still invalid (implying that
250 * the cache item has been replaced with a newer one).
251 * -ENOENT if cache entry was negative
253 int cache_check(struct cache_detail
*detail
,
254 struct cache_head
*h
, struct cache_req
*rqstp
)
257 long refresh_age
, age
;
259 /* First decide return status as best we can */
260 rv
= cache_is_valid(h
);
262 /* now see if we want to start an upcall */
263 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
264 age
= seconds_since_boot() - h
->last_refresh
;
269 } else if (rv
== -EAGAIN
||
270 (h
->expiry_time
!= 0 && age
> refresh_age
/2)) {
271 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
273 if (!test_and_set_bit(CACHE_PENDING
, &h
->flags
)) {
274 switch (cache_make_upcall(detail
, h
)) {
276 rv
= try_to_negate_entry(detail
, h
);
279 cache_fresh_unlocked(h
, detail
);
286 if (!cache_defer_req(rqstp
, h
)) {
288 * Request was not deferred; handle it as best
291 rv
= cache_is_valid(h
);
297 cache_put(h
, detail
);
300 EXPORT_SYMBOL_GPL(cache_check
);
303 * caches need to be periodically cleaned.
304 * For this we maintain a list of cache_detail and
305 * a current pointer into that list and into the table
308 * Each time cache_clean is called it finds the next non-empty entry
309 * in the current table and walks the list in that entry
310 * looking for entries that can be removed.
312 * An entry gets removed if:
313 * - The expiry is before current time
314 * - The last_refresh time is before the flush_time for that cache
316 * later we might drop old entries with non-NEVER expiry if that table
317 * is getting 'full' for some definition of 'full'
319 * The question of "how often to scan a table" is an interesting one
320 * and is answered in part by the use of the "nextcheck" field in the
322 * When a scan of a table begins, the nextcheck field is set to a time
323 * that is well into the future.
324 * While scanning, if an expiry time is found that is earlier than the
325 * current nextcheck time, nextcheck is set to that expiry time.
326 * If the flush_time is ever set to a time earlier than the nextcheck
327 * time, the nextcheck time is then set to that flush_time.
329 * A table is then only scanned if the current time is at least
330 * the nextcheck time.
334 static LIST_HEAD(cache_list
);
335 static DEFINE_SPINLOCK(cache_list_lock
);
336 static struct cache_detail
*current_detail
;
337 static int current_index
;
339 static void do_cache_clean(struct work_struct
*work
);
340 static struct delayed_work cache_cleaner
;
342 void sunrpc_init_cache_detail(struct cache_detail
*cd
)
344 rwlock_init(&cd
->hash_lock
);
345 INIT_LIST_HEAD(&cd
->queue
);
346 spin_lock(&cache_list_lock
);
349 atomic_set(&cd
->readers
, 0);
352 list_add(&cd
->others
, &cache_list
);
353 spin_unlock(&cache_list_lock
);
355 /* start the cleaning process */
356 queue_delayed_work(system_power_efficient_wq
, &cache_cleaner
, 0);
358 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail
);
360 void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
363 spin_lock(&cache_list_lock
);
364 write_lock(&cd
->hash_lock
);
365 if (current_detail
== cd
)
366 current_detail
= NULL
;
367 list_del_init(&cd
->others
);
368 write_unlock(&cd
->hash_lock
);
369 spin_unlock(&cache_list_lock
);
370 if (list_empty(&cache_list
)) {
371 /* module must be being unloaded so its safe to kill the worker */
372 cancel_delayed_work_sync(&cache_cleaner
);
375 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail
);
377 /* clean cache tries to find something to clean
379 * It returns 1 if it cleaned something,
380 * 0 if it didn't find anything this time
381 * -1 if it fell off the end of the list.
383 static int cache_clean(void)
386 struct list_head
*next
;
388 spin_lock(&cache_list_lock
);
390 /* find a suitable table if we don't already have one */
391 while (current_detail
== NULL
||
392 current_index
>= current_detail
->hash_size
) {
394 next
= current_detail
->others
.next
;
396 next
= cache_list
.next
;
397 if (next
== &cache_list
) {
398 current_detail
= NULL
;
399 spin_unlock(&cache_list_lock
);
402 current_detail
= list_entry(next
, struct cache_detail
, others
);
403 if (current_detail
->nextcheck
> seconds_since_boot())
404 current_index
= current_detail
->hash_size
;
407 current_detail
->nextcheck
= seconds_since_boot()+30*60;
411 /* find a non-empty bucket in the table */
412 while (current_detail
&&
413 current_index
< current_detail
->hash_size
&&
414 hlist_empty(¤t_detail
->hash_table
[current_index
]))
417 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
419 if (current_detail
&& current_index
< current_detail
->hash_size
) {
420 struct cache_head
*ch
= NULL
;
421 struct cache_detail
*d
;
422 struct hlist_head
*head
;
423 struct hlist_node
*tmp
;
425 write_lock(¤t_detail
->hash_lock
);
427 /* Ok, now to clean this strand */
429 head
= ¤t_detail
->hash_table
[current_index
];
430 hlist_for_each_entry_safe(ch
, tmp
, head
, cache_list
) {
431 if (current_detail
->nextcheck
> ch
->expiry_time
)
432 current_detail
->nextcheck
= ch
->expiry_time
+1;
433 if (!cache_is_expired(current_detail
, ch
))
436 hlist_del_init(&ch
->cache_list
);
437 current_detail
->entries
--;
442 write_unlock(¤t_detail
->hash_lock
);
446 spin_unlock(&cache_list_lock
);
448 set_bit(CACHE_CLEANED
, &ch
->flags
);
449 cache_fresh_unlocked(ch
, d
);
453 spin_unlock(&cache_list_lock
);
459 * We want to regularly clean the cache, so we need to schedule some work ...
461 static void do_cache_clean(struct work_struct
*work
)
464 if (cache_clean() == -1)
465 delay
= round_jiffies_relative(30*HZ
);
467 if (list_empty(&cache_list
))
471 queue_delayed_work(system_power_efficient_wq
,
472 &cache_cleaner
, delay
);
477 * Clean all caches promptly. This just calls cache_clean
478 * repeatedly until we are sure that every cache has had a chance to
481 void cache_flush(void)
483 while (cache_clean() != -1)
485 while (cache_clean() != -1)
488 EXPORT_SYMBOL_GPL(cache_flush
);
490 void cache_purge(struct cache_detail
*detail
)
492 struct cache_head
*ch
= NULL
;
493 struct hlist_head
*head
= NULL
;
494 struct hlist_node
*tmp
= NULL
;
497 write_lock(&detail
->hash_lock
);
498 if (!detail
->entries
) {
499 write_unlock(&detail
->hash_lock
);
503 dprintk("RPC: %d entries in %s cache\n", detail
->entries
, detail
->name
);
504 for (i
= 0; i
< detail
->hash_size
; i
++) {
505 head
= &detail
->hash_table
[i
];
506 hlist_for_each_entry_safe(ch
, tmp
, head
, cache_list
) {
507 hlist_del_init(&ch
->cache_list
);
510 set_bit(CACHE_CLEANED
, &ch
->flags
);
511 write_unlock(&detail
->hash_lock
);
512 cache_fresh_unlocked(ch
, detail
);
513 cache_put(ch
, detail
);
514 write_lock(&detail
->hash_lock
);
517 write_unlock(&detail
->hash_lock
);
519 EXPORT_SYMBOL_GPL(cache_purge
);
523 * Deferral and Revisiting of Requests.
525 * If a cache lookup finds a pending entry, we
526 * need to defer the request and revisit it later.
527 * All deferred requests are stored in a hash table,
528 * indexed by "struct cache_head *".
529 * As it may be wasteful to store a whole request
530 * structure, we allow the request to provide a
531 * deferred form, which must contain a
532 * 'struct cache_deferred_req'
533 * This cache_deferred_req contains a method to allow
534 * it to be revisited when cache info is available
537 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
538 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
540 #define DFR_MAX 300 /* ??? */
542 static DEFINE_SPINLOCK(cache_defer_lock
);
543 static LIST_HEAD(cache_defer_list
);
544 static struct hlist_head cache_defer_hash
[DFR_HASHSIZE
];
545 static int cache_defer_cnt
;
547 static void __unhash_deferred_req(struct cache_deferred_req
*dreq
)
549 hlist_del_init(&dreq
->hash
);
550 if (!list_empty(&dreq
->recent
)) {
551 list_del_init(&dreq
->recent
);
556 static void __hash_deferred_req(struct cache_deferred_req
*dreq
, struct cache_head
*item
)
558 int hash
= DFR_HASH(item
);
560 INIT_LIST_HEAD(&dreq
->recent
);
561 hlist_add_head(&dreq
->hash
, &cache_defer_hash
[hash
]);
564 static void setup_deferral(struct cache_deferred_req
*dreq
,
565 struct cache_head
*item
,
571 spin_lock(&cache_defer_lock
);
573 __hash_deferred_req(dreq
, item
);
577 list_add(&dreq
->recent
, &cache_defer_list
);
580 spin_unlock(&cache_defer_lock
);
584 struct thread_deferred_req
{
585 struct cache_deferred_req handle
;
586 struct completion completion
;
589 static void cache_restart_thread(struct cache_deferred_req
*dreq
, int too_many
)
591 struct thread_deferred_req
*dr
=
592 container_of(dreq
, struct thread_deferred_req
, handle
);
593 complete(&dr
->completion
);
596 static void cache_wait_req(struct cache_req
*req
, struct cache_head
*item
)
598 struct thread_deferred_req sleeper
;
599 struct cache_deferred_req
*dreq
= &sleeper
.handle
;
601 sleeper
.completion
= COMPLETION_INITIALIZER_ONSTACK(sleeper
.completion
);
602 dreq
->revisit
= cache_restart_thread
;
604 setup_deferral(dreq
, item
, 0);
606 if (!test_bit(CACHE_PENDING
, &item
->flags
) ||
607 wait_for_completion_interruptible_timeout(
608 &sleeper
.completion
, req
->thread_wait
) <= 0) {
609 /* The completion wasn't completed, so we need
612 spin_lock(&cache_defer_lock
);
613 if (!hlist_unhashed(&sleeper
.handle
.hash
)) {
614 __unhash_deferred_req(&sleeper
.handle
);
615 spin_unlock(&cache_defer_lock
);
617 /* cache_revisit_request already removed
618 * this from the hash table, but hasn't
619 * called ->revisit yet. It will very soon
620 * and we need to wait for it.
622 spin_unlock(&cache_defer_lock
);
623 wait_for_completion(&sleeper
.completion
);
628 static void cache_limit_defers(void)
630 /* Make sure we haven't exceed the limit of allowed deferred
633 struct cache_deferred_req
*discard
= NULL
;
635 if (cache_defer_cnt
<= DFR_MAX
)
638 spin_lock(&cache_defer_lock
);
640 /* Consider removing either the first or the last */
641 if (cache_defer_cnt
> DFR_MAX
) {
642 if (prandom_u32() & 1)
643 discard
= list_entry(cache_defer_list
.next
,
644 struct cache_deferred_req
, recent
);
646 discard
= list_entry(cache_defer_list
.prev
,
647 struct cache_deferred_req
, recent
);
648 __unhash_deferred_req(discard
);
650 spin_unlock(&cache_defer_lock
);
652 discard
->revisit(discard
, 1);
655 /* Return true if and only if a deferred request is queued. */
656 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
658 struct cache_deferred_req
*dreq
;
660 if (req
->thread_wait
) {
661 cache_wait_req(req
, item
);
662 if (!test_bit(CACHE_PENDING
, &item
->flags
))
665 dreq
= req
->defer(req
);
668 setup_deferral(dreq
, item
, 1);
669 if (!test_bit(CACHE_PENDING
, &item
->flags
))
670 /* Bit could have been cleared before we managed to
671 * set up the deferral, so need to revisit just in case
673 cache_revisit_request(item
);
675 cache_limit_defers();
679 static void cache_revisit_request(struct cache_head
*item
)
681 struct cache_deferred_req
*dreq
;
682 struct list_head pending
;
683 struct hlist_node
*tmp
;
684 int hash
= DFR_HASH(item
);
686 INIT_LIST_HEAD(&pending
);
687 spin_lock(&cache_defer_lock
);
689 hlist_for_each_entry_safe(dreq
, tmp
, &cache_defer_hash
[hash
], hash
)
690 if (dreq
->item
== item
) {
691 __unhash_deferred_req(dreq
);
692 list_add(&dreq
->recent
, &pending
);
695 spin_unlock(&cache_defer_lock
);
697 while (!list_empty(&pending
)) {
698 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
699 list_del_init(&dreq
->recent
);
700 dreq
->revisit(dreq
, 0);
704 void cache_clean_deferred(void *owner
)
706 struct cache_deferred_req
*dreq
, *tmp
;
707 struct list_head pending
;
710 INIT_LIST_HEAD(&pending
);
711 spin_lock(&cache_defer_lock
);
713 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
714 if (dreq
->owner
== owner
) {
715 __unhash_deferred_req(dreq
);
716 list_add(&dreq
->recent
, &pending
);
719 spin_unlock(&cache_defer_lock
);
721 while (!list_empty(&pending
)) {
722 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
723 list_del_init(&dreq
->recent
);
724 dreq
->revisit(dreq
, 1);
729 * communicate with user-space
731 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
732 * On read, you get a full request, or block.
733 * On write, an update request is processed.
734 * Poll works if anything to read, and always allows write.
736 * Implemented by linked list of requests. Each open file has
737 * a ->private that also exists in this list. New requests are added
738 * to the end and may wakeup and preceding readers.
739 * New readers are added to the head. If, on read, an item is found with
740 * CACHE_UPCALLING clear, we free it from the list.
744 static DEFINE_SPINLOCK(queue_lock
);
745 static DEFINE_MUTEX(queue_io_mutex
);
748 struct list_head list
;
749 int reader
; /* if 0, then request */
751 struct cache_request
{
752 struct cache_queue q
;
753 struct cache_head
*item
;
758 struct cache_reader
{
759 struct cache_queue q
;
760 int offset
; /* if non-0, we have a refcnt on next request */
763 static int cache_request(struct cache_detail
*detail
,
764 struct cache_request
*crq
)
769 detail
->cache_request(detail
, crq
->item
, &bp
, &len
);
772 return PAGE_SIZE
- len
;
775 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
776 loff_t
*ppos
, struct cache_detail
*cd
)
778 struct cache_reader
*rp
= filp
->private_data
;
779 struct cache_request
*rq
;
780 struct inode
*inode
= file_inode(filp
);
786 inode_lock(inode
); /* protect against multiple concurrent
787 * readers on this file */
789 spin_lock(&queue_lock
);
790 /* need to find next request */
791 while (rp
->q
.list
.next
!= &cd
->queue
&&
792 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
794 struct list_head
*next
= rp
->q
.list
.next
;
795 list_move(&rp
->q
.list
, next
);
797 if (rp
->q
.list
.next
== &cd
->queue
) {
798 spin_unlock(&queue_lock
);
800 WARN_ON_ONCE(rp
->offset
);
803 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
804 WARN_ON_ONCE(rq
->q
.reader
);
807 spin_unlock(&queue_lock
);
810 err
= cache_request(cd
, rq
);
816 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
818 spin_lock(&queue_lock
);
819 list_move(&rp
->q
.list
, &rq
->q
.list
);
820 spin_unlock(&queue_lock
);
822 if (rp
->offset
+ count
> rq
->len
)
823 count
= rq
->len
- rp
->offset
;
825 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
828 if (rp
->offset
>= rq
->len
) {
830 spin_lock(&queue_lock
);
831 list_move(&rp
->q
.list
, &rq
->q
.list
);
832 spin_unlock(&queue_lock
);
837 if (rp
->offset
== 0) {
838 /* need to release rq */
839 spin_lock(&queue_lock
);
841 if (rq
->readers
== 0 &&
842 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
843 list_del(&rq
->q
.list
);
844 spin_unlock(&queue_lock
);
845 cache_put(rq
->item
, cd
);
849 spin_unlock(&queue_lock
);
854 return err
? err
: count
;
857 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
858 size_t count
, struct cache_detail
*cd
)
864 if (copy_from_user(kaddr
, buf
, count
))
867 ret
= cd
->cache_parse(cd
, kaddr
, count
);
873 static ssize_t
cache_slow_downcall(const char __user
*buf
,
874 size_t count
, struct cache_detail
*cd
)
876 static char write_buf
[8192]; /* protected by queue_io_mutex */
877 ssize_t ret
= -EINVAL
;
879 if (count
>= sizeof(write_buf
))
881 mutex_lock(&queue_io_mutex
);
882 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
883 mutex_unlock(&queue_io_mutex
);
888 static ssize_t
cache_downcall(struct address_space
*mapping
,
889 const char __user
*buf
,
890 size_t count
, struct cache_detail
*cd
)
894 ssize_t ret
= -ENOMEM
;
896 if (count
>= PAGE_SIZE
)
899 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
904 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
910 return cache_slow_downcall(buf
, count
, cd
);
913 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
914 size_t count
, loff_t
*ppos
,
915 struct cache_detail
*cd
)
917 struct address_space
*mapping
= filp
->f_mapping
;
918 struct inode
*inode
= file_inode(filp
);
919 ssize_t ret
= -EINVAL
;
921 if (!cd
->cache_parse
)
925 ret
= cache_downcall(mapping
, buf
, count
, cd
);
931 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
933 static __poll_t
cache_poll(struct file
*filp
, poll_table
*wait
,
934 struct cache_detail
*cd
)
937 struct cache_reader
*rp
= filp
->private_data
;
938 struct cache_queue
*cq
;
940 poll_wait(filp
, &queue_wait
, wait
);
942 /* alway allow write */
943 mask
= EPOLLOUT
| EPOLLWRNORM
;
948 spin_lock(&queue_lock
);
950 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
951 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
953 mask
|= EPOLLIN
| EPOLLRDNORM
;
956 spin_unlock(&queue_lock
);
960 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
961 unsigned int cmd
, unsigned long arg
,
962 struct cache_detail
*cd
)
965 struct cache_reader
*rp
= filp
->private_data
;
966 struct cache_queue
*cq
;
968 if (cmd
!= FIONREAD
|| !rp
)
971 spin_lock(&queue_lock
);
973 /* only find the length remaining in current request,
974 * or the length of the next request
976 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
977 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
979 struct cache_request
*cr
=
980 container_of(cq
, struct cache_request
, q
);
981 len
= cr
->len
- rp
->offset
;
984 spin_unlock(&queue_lock
);
986 return put_user(len
, (int __user
*)arg
);
989 static int cache_open(struct inode
*inode
, struct file
*filp
,
990 struct cache_detail
*cd
)
992 struct cache_reader
*rp
= NULL
;
994 if (!cd
|| !try_module_get(cd
->owner
))
996 nonseekable_open(inode
, filp
);
997 if (filp
->f_mode
& FMODE_READ
) {
998 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
1000 module_put(cd
->owner
);
1005 atomic_inc(&cd
->readers
);
1006 spin_lock(&queue_lock
);
1007 list_add(&rp
->q
.list
, &cd
->queue
);
1008 spin_unlock(&queue_lock
);
1010 filp
->private_data
= rp
;
1014 static int cache_release(struct inode
*inode
, struct file
*filp
,
1015 struct cache_detail
*cd
)
1017 struct cache_reader
*rp
= filp
->private_data
;
1020 spin_lock(&queue_lock
);
1022 struct cache_queue
*cq
;
1023 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
1024 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
1026 container_of(cq
, struct cache_request
, q
)
1032 list_del(&rp
->q
.list
);
1033 spin_unlock(&queue_lock
);
1035 filp
->private_data
= NULL
;
1038 cd
->last_close
= seconds_since_boot();
1039 atomic_dec(&cd
->readers
);
1041 module_put(cd
->owner
);
1047 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
1049 struct cache_queue
*cq
, *tmp
;
1050 struct cache_request
*cr
;
1051 struct list_head dequeued
;
1053 INIT_LIST_HEAD(&dequeued
);
1054 spin_lock(&queue_lock
);
1055 list_for_each_entry_safe(cq
, tmp
, &detail
->queue
, list
)
1057 cr
= container_of(cq
, struct cache_request
, q
);
1060 if (test_bit(CACHE_PENDING
, &ch
->flags
))
1061 /* Lost a race and it is pending again */
1063 if (cr
->readers
!= 0)
1065 list_move(&cr
->q
.list
, &dequeued
);
1067 spin_unlock(&queue_lock
);
1068 while (!list_empty(&dequeued
)) {
1069 cr
= list_entry(dequeued
.next
, struct cache_request
, q
.list
);
1070 list_del(&cr
->q
.list
);
1071 cache_put(cr
->item
, detail
);
1078 * Support routines for text-based upcalls.
1079 * Fields are separated by spaces.
1080 * Fields are either mangled to quote space tab newline slosh with slosh
1081 * or a hexified with a leading \x
1082 * Record is terminated with newline.
1086 void qword_add(char **bpp
, int *lp
, char *str
)
1092 if (len
< 0) return;
1094 ret
= string_escape_str(str
, bp
, len
, ESCAPE_OCTAL
, "\\ \n\t");
1107 EXPORT_SYMBOL_GPL(qword_add
);
1109 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
1114 if (len
< 0) return;
1120 while (blen
&& len
>= 2) {
1121 bp
= hex_byte_pack(bp
, *buf
++);
1126 if (blen
|| len
<1) len
= -1;
1134 EXPORT_SYMBOL_GPL(qword_addhex
);
1136 static void warn_no_listener(struct cache_detail
*detail
)
1138 if (detail
->last_warn
!= detail
->last_close
) {
1139 detail
->last_warn
= detail
->last_close
;
1140 if (detail
->warn_no_listener
)
1141 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1145 static bool cache_listeners_exist(struct cache_detail
*detail
)
1147 if (atomic_read(&detail
->readers
))
1149 if (detail
->last_close
== 0)
1150 /* This cache was never opened */
1152 if (detail
->last_close
< seconds_since_boot() - 30)
1154 * We allow for the possibility that someone might
1155 * restart a userspace daemon without restarting the
1156 * server; but after 30 seconds, we give up.
1163 * register an upcall request to user-space and queue it up for read() by the
1166 * Each request is at most one page long.
1168 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1172 struct cache_request
*crq
;
1175 if (!detail
->cache_request
)
1178 if (!cache_listeners_exist(detail
)) {
1179 warn_no_listener(detail
);
1182 if (test_bit(CACHE_CLEANED
, &h
->flags
))
1183 /* Too late to make an upcall */
1186 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1190 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1200 spin_lock(&queue_lock
);
1201 if (test_bit(CACHE_PENDING
, &h
->flags
)) {
1202 crq
->item
= cache_get(h
);
1203 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1205 /* Lost a race, no longer PENDING, so don't enqueue */
1207 spin_unlock(&queue_lock
);
1208 wake_up(&queue_wait
);
1209 if (ret
== -EAGAIN
) {
1215 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1218 * parse a message from user-space and pass it
1219 * to an appropriate cache
1220 * Messages are, like requests, separated into fields by
1221 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1224 * reply cachename expiry key ... content....
1226 * key and content are both parsed by cache
1229 int qword_get(char **bpp
, char *dest
, int bufsize
)
1231 /* return bytes copied, or -1 on error */
1235 while (*bp
== ' ') bp
++;
1237 if (bp
[0] == '\\' && bp
[1] == 'x') {
1240 while (len
< bufsize
- 1) {
1243 h
= hex_to_bin(bp
[0]);
1247 l
= hex_to_bin(bp
[1]);
1251 *dest
++ = (h
<< 4) | l
;
1256 /* text with \nnn octal quoting */
1257 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1259 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1262 int byte
= (*++bp
-'0');
1264 byte
= (byte
<< 3) | (*bp
++ - '0');
1265 byte
= (byte
<< 3) | (*bp
++ - '0');
1275 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1277 while (*bp
== ' ') bp
++;
1282 EXPORT_SYMBOL_GPL(qword_get
);
1286 * support /proc/net/rpc/$CACHENAME/content
1288 * We call ->cache_show passing NULL for the item to
1289 * get a header, then pass each real item in the cache
1292 void *cache_seq_start(struct seq_file
*m
, loff_t
*pos
)
1293 __acquires(cd
->hash_lock
)
1296 unsigned int hash
, entry
;
1297 struct cache_head
*ch
;
1298 struct cache_detail
*cd
= m
->private;
1300 read_lock(&cd
->hash_lock
);
1302 return SEQ_START_TOKEN
;
1304 entry
= n
& ((1LL<<32) - 1);
1306 hlist_for_each_entry(ch
, &cd
->hash_table
[hash
], cache_list
)
1309 n
&= ~((1LL<<32) - 1);
1313 } while(hash
< cd
->hash_size
&&
1314 hlist_empty(&cd
->hash_table
[hash
]));
1315 if (hash
>= cd
->hash_size
)
1318 return hlist_entry_safe(cd
->hash_table
[hash
].first
,
1319 struct cache_head
, cache_list
);
1321 EXPORT_SYMBOL_GPL(cache_seq_start
);
1323 void *cache_seq_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1325 struct cache_head
*ch
= p
;
1326 int hash
= (*pos
>> 32);
1327 struct cache_detail
*cd
= m
->private;
1329 if (p
== SEQ_START_TOKEN
)
1331 else if (ch
->cache_list
.next
== NULL
) {
1336 return hlist_entry_safe(ch
->cache_list
.next
,
1337 struct cache_head
, cache_list
);
1339 *pos
&= ~((1LL<<32) - 1);
1340 while (hash
< cd
->hash_size
&&
1341 hlist_empty(&cd
->hash_table
[hash
])) {
1345 if (hash
>= cd
->hash_size
)
1348 return hlist_entry_safe(cd
->hash_table
[hash
].first
,
1349 struct cache_head
, cache_list
);
1351 EXPORT_SYMBOL_GPL(cache_seq_next
);
1353 void cache_seq_stop(struct seq_file
*m
, void *p
)
1354 __releases(cd
->hash_lock
)
1356 struct cache_detail
*cd
= m
->private;
1357 read_unlock(&cd
->hash_lock
);
1359 EXPORT_SYMBOL_GPL(cache_seq_stop
);
1361 static int c_show(struct seq_file
*m
, void *p
)
1363 struct cache_head
*cp
= p
;
1364 struct cache_detail
*cd
= m
->private;
1366 if (p
== SEQ_START_TOKEN
)
1367 return cd
->cache_show(m
, cd
, NULL
);
1370 seq_printf(m
, "# expiry=%ld refcnt=%d flags=%lx\n",
1371 convert_to_wallclock(cp
->expiry_time
),
1372 kref_read(&cp
->ref
), cp
->flags
);
1374 if (cache_check(cd
, cp
, NULL
))
1375 /* cache_check does a cache_put on failure */
1376 seq_printf(m
, "# ");
1378 if (cache_is_expired(cd
, cp
))
1379 seq_printf(m
, "# ");
1383 return cd
->cache_show(m
, cd
, cp
);
1386 static const struct seq_operations cache_content_op
= {
1387 .start
= cache_seq_start
,
1388 .next
= cache_seq_next
,
1389 .stop
= cache_seq_stop
,
1393 static int content_open(struct inode
*inode
, struct file
*file
,
1394 struct cache_detail
*cd
)
1396 struct seq_file
*seq
;
1399 if (!cd
|| !try_module_get(cd
->owner
))
1402 err
= seq_open(file
, &cache_content_op
);
1404 module_put(cd
->owner
);
1408 seq
= file
->private_data
;
1413 static int content_release(struct inode
*inode
, struct file
*file
,
1414 struct cache_detail
*cd
)
1416 int ret
= seq_release(inode
, file
);
1417 module_put(cd
->owner
);
1421 static int open_flush(struct inode
*inode
, struct file
*file
,
1422 struct cache_detail
*cd
)
1424 if (!cd
|| !try_module_get(cd
->owner
))
1426 return nonseekable_open(inode
, file
);
1429 static int release_flush(struct inode
*inode
, struct file
*file
,
1430 struct cache_detail
*cd
)
1432 module_put(cd
->owner
);
1436 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1437 size_t count
, loff_t
*ppos
,
1438 struct cache_detail
*cd
)
1443 len
= snprintf(tbuf
, sizeof(tbuf
), "%lu\n",
1444 convert_to_wallclock(cd
->flush_time
));
1445 return simple_read_from_buffer(buf
, count
, ppos
, tbuf
, len
);
1448 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1449 size_t count
, loff_t
*ppos
,
1450 struct cache_detail
*cd
)
1456 if (*ppos
|| count
> sizeof(tbuf
)-1)
1458 if (copy_from_user(tbuf
, buf
, count
))
1461 simple_strtoul(tbuf
, &ep
, 0);
1462 if (*ep
&& *ep
!= '\n')
1464 /* Note that while we check that 'buf' holds a valid number,
1465 * we always ignore the value and just flush everything.
1466 * Making use of the number leads to races.
1469 now
= seconds_since_boot();
1470 /* Always flush everything, so behave like cache_purge()
1471 * Do this by advancing flush_time to the current time,
1472 * or by one second if it has already reached the current time.
1473 * Newly added cache entries will always have ->last_refresh greater
1474 * that ->flush_time, so they don't get flushed prematurely.
1477 if (cd
->flush_time
>= now
)
1478 now
= cd
->flush_time
+ 1;
1480 cd
->flush_time
= now
;
1481 cd
->nextcheck
= now
;
1488 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1489 size_t count
, loff_t
*ppos
)
1491 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1493 return cache_read(filp
, buf
, count
, ppos
, cd
);
1496 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1497 size_t count
, loff_t
*ppos
)
1499 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1501 return cache_write(filp
, buf
, count
, ppos
, cd
);
1504 static __poll_t
cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1506 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1508 return cache_poll(filp
, wait
, cd
);
1511 static long cache_ioctl_procfs(struct file
*filp
,
1512 unsigned int cmd
, unsigned long arg
)
1514 struct inode
*inode
= file_inode(filp
);
1515 struct cache_detail
*cd
= PDE_DATA(inode
);
1517 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1520 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1522 struct cache_detail
*cd
= PDE_DATA(inode
);
1524 return cache_open(inode
, filp
, cd
);
1527 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1529 struct cache_detail
*cd
= PDE_DATA(inode
);
1531 return cache_release(inode
, filp
, cd
);
1534 static const struct file_operations cache_file_operations_procfs
= {
1535 .owner
= THIS_MODULE
,
1536 .llseek
= no_llseek
,
1537 .read
= cache_read_procfs
,
1538 .write
= cache_write_procfs
,
1539 .poll
= cache_poll_procfs
,
1540 .unlocked_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1541 .open
= cache_open_procfs
,
1542 .release
= cache_release_procfs
,
1545 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1547 struct cache_detail
*cd
= PDE_DATA(inode
);
1549 return content_open(inode
, filp
, cd
);
1552 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1554 struct cache_detail
*cd
= PDE_DATA(inode
);
1556 return content_release(inode
, filp
, cd
);
1559 static const struct file_operations content_file_operations_procfs
= {
1560 .open
= content_open_procfs
,
1562 .llseek
= seq_lseek
,
1563 .release
= content_release_procfs
,
1566 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1568 struct cache_detail
*cd
= PDE_DATA(inode
);
1570 return open_flush(inode
, filp
, cd
);
1573 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1575 struct cache_detail
*cd
= PDE_DATA(inode
);
1577 return release_flush(inode
, filp
, cd
);
1580 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1581 size_t count
, loff_t
*ppos
)
1583 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1585 return read_flush(filp
, buf
, count
, ppos
, cd
);
1588 static ssize_t
write_flush_procfs(struct file
*filp
,
1589 const char __user
*buf
,
1590 size_t count
, loff_t
*ppos
)
1592 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1594 return write_flush(filp
, buf
, count
, ppos
, cd
);
1597 static const struct file_operations cache_flush_operations_procfs
= {
1598 .open
= open_flush_procfs
,
1599 .read
= read_flush_procfs
,
1600 .write
= write_flush_procfs
,
1601 .release
= release_flush_procfs
,
1602 .llseek
= no_llseek
,
1605 static void remove_cache_proc_entries(struct cache_detail
*cd
)
1608 proc_remove(cd
->procfs
);
1613 #ifdef CONFIG_PROC_FS
1614 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1616 struct proc_dir_entry
*p
;
1617 struct sunrpc_net
*sn
;
1619 sn
= net_generic(net
, sunrpc_net_id
);
1620 cd
->procfs
= proc_mkdir(cd
->name
, sn
->proc_net_rpc
);
1621 if (cd
->procfs
== NULL
)
1624 p
= proc_create_data("flush", S_IFREG
| 0600,
1625 cd
->procfs
, &cache_flush_operations_procfs
, cd
);
1629 if (cd
->cache_request
|| cd
->cache_parse
) {
1630 p
= proc_create_data("channel", S_IFREG
| 0600, cd
->procfs
,
1631 &cache_file_operations_procfs
, cd
);
1635 if (cd
->cache_show
) {
1636 p
= proc_create_data("content", S_IFREG
| 0400, cd
->procfs
,
1637 &content_file_operations_procfs
, cd
);
1643 remove_cache_proc_entries(cd
);
1646 #else /* CONFIG_PROC_FS */
1647 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1653 void __init
cache_initialize(void)
1655 INIT_DEFERRABLE_WORK(&cache_cleaner
, do_cache_clean
);
1658 int cache_register_net(struct cache_detail
*cd
, struct net
*net
)
1662 sunrpc_init_cache_detail(cd
);
1663 ret
= create_cache_proc_entries(cd
, net
);
1665 sunrpc_destroy_cache_detail(cd
);
1668 EXPORT_SYMBOL_GPL(cache_register_net
);
1670 void cache_unregister_net(struct cache_detail
*cd
, struct net
*net
)
1672 remove_cache_proc_entries(cd
);
1673 sunrpc_destroy_cache_detail(cd
);
1675 EXPORT_SYMBOL_GPL(cache_unregister_net
);
1677 struct cache_detail
*cache_create_net(const struct cache_detail
*tmpl
, struct net
*net
)
1679 struct cache_detail
*cd
;
1682 cd
= kmemdup(tmpl
, sizeof(struct cache_detail
), GFP_KERNEL
);
1684 return ERR_PTR(-ENOMEM
);
1686 cd
->hash_table
= kcalloc(cd
->hash_size
, sizeof(struct hlist_head
),
1688 if (cd
->hash_table
== NULL
) {
1690 return ERR_PTR(-ENOMEM
);
1693 for (i
= 0; i
< cd
->hash_size
; i
++)
1694 INIT_HLIST_HEAD(&cd
->hash_table
[i
]);
1698 EXPORT_SYMBOL_GPL(cache_create_net
);
1700 void cache_destroy_net(struct cache_detail
*cd
, struct net
*net
)
1702 kfree(cd
->hash_table
);
1705 EXPORT_SYMBOL_GPL(cache_destroy_net
);
1707 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1708 size_t count
, loff_t
*ppos
)
1710 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1712 return cache_read(filp
, buf
, count
, ppos
, cd
);
1715 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1716 size_t count
, loff_t
*ppos
)
1718 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1720 return cache_write(filp
, buf
, count
, ppos
, cd
);
1723 static __poll_t
cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1725 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1727 return cache_poll(filp
, wait
, cd
);
1730 static long cache_ioctl_pipefs(struct file
*filp
,
1731 unsigned int cmd
, unsigned long arg
)
1733 struct inode
*inode
= file_inode(filp
);
1734 struct cache_detail
*cd
= RPC_I(inode
)->private;
1736 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1739 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1741 struct cache_detail
*cd
= RPC_I(inode
)->private;
1743 return cache_open(inode
, filp
, cd
);
1746 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1748 struct cache_detail
*cd
= RPC_I(inode
)->private;
1750 return cache_release(inode
, filp
, cd
);
1753 const struct file_operations cache_file_operations_pipefs
= {
1754 .owner
= THIS_MODULE
,
1755 .llseek
= no_llseek
,
1756 .read
= cache_read_pipefs
,
1757 .write
= cache_write_pipefs
,
1758 .poll
= cache_poll_pipefs
,
1759 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1760 .open
= cache_open_pipefs
,
1761 .release
= cache_release_pipefs
,
1764 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1766 struct cache_detail
*cd
= RPC_I(inode
)->private;
1768 return content_open(inode
, filp
, cd
);
1771 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1773 struct cache_detail
*cd
= RPC_I(inode
)->private;
1775 return content_release(inode
, filp
, cd
);
1778 const struct file_operations content_file_operations_pipefs
= {
1779 .open
= content_open_pipefs
,
1781 .llseek
= seq_lseek
,
1782 .release
= content_release_pipefs
,
1785 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1787 struct cache_detail
*cd
= RPC_I(inode
)->private;
1789 return open_flush(inode
, filp
, cd
);
1792 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1794 struct cache_detail
*cd
= RPC_I(inode
)->private;
1796 return release_flush(inode
, filp
, cd
);
1799 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1800 size_t count
, loff_t
*ppos
)
1802 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1804 return read_flush(filp
, buf
, count
, ppos
, cd
);
1807 static ssize_t
write_flush_pipefs(struct file
*filp
,
1808 const char __user
*buf
,
1809 size_t count
, loff_t
*ppos
)
1811 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1813 return write_flush(filp
, buf
, count
, ppos
, cd
);
1816 const struct file_operations cache_flush_operations_pipefs
= {
1817 .open
= open_flush_pipefs
,
1818 .read
= read_flush_pipefs
,
1819 .write
= write_flush_pipefs
,
1820 .release
= release_flush_pipefs
,
1821 .llseek
= no_llseek
,
1824 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1825 const char *name
, umode_t umode
,
1826 struct cache_detail
*cd
)
1828 struct dentry
*dir
= rpc_create_cache_dir(parent
, name
, umode
, cd
);
1830 return PTR_ERR(dir
);
1834 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1836 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1839 rpc_remove_cache_dir(cd
->pipefs
);
1843 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);
1845 void sunrpc_cache_unhash(struct cache_detail
*cd
, struct cache_head
*h
)
1847 write_lock(&cd
->hash_lock
);
1848 if (!hlist_unhashed(&h
->cache_list
)){
1849 hlist_del_init(&h
->cache_list
);
1851 write_unlock(&cd
->hash_lock
);
1854 write_unlock(&cd
->hash_lock
);
1856 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash
);