1 // SPDX-License-Identifier: GPL-2.0-only
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
11 #include <linux/types.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/string_helpers.h>
22 #include <linux/uaccess.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/net.h>
27 #include <linux/workqueue.h>
28 #include <linux/mutex.h>
29 #include <linux/pagemap.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
35 #include <trace/events/sunrpc.h>
38 #define RPCDBG_FACILITY RPCDBG_CACHE
40 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
);
41 static void cache_revisit_request(struct cache_head
*item
);
43 static void cache_init(struct cache_head
*h
, struct cache_detail
*detail
)
45 time64_t now
= seconds_since_boot();
46 INIT_HLIST_NODE(&h
->cache_list
);
49 h
->expiry_time
= now
+ CACHE_NEW_EXPIRY
;
50 if (now
<= detail
->flush_time
)
51 /* ensure it isn't already expired */
52 now
= detail
->flush_time
+ 1;
53 h
->last_refresh
= now
;
56 static void cache_fresh_unlocked(struct cache_head
*head
,
57 struct cache_detail
*detail
);
59 static struct cache_head
*sunrpc_cache_find_rcu(struct cache_detail
*detail
,
60 struct cache_head
*key
,
63 struct hlist_head
*head
= &detail
->hash_table
[hash
];
64 struct cache_head
*tmp
;
67 hlist_for_each_entry_rcu(tmp
, head
, cache_list
) {
68 if (!detail
->match(tmp
, key
))
70 if (test_bit(CACHE_VALID
, &tmp
->flags
) &&
71 cache_is_expired(detail
, tmp
))
73 tmp
= cache_get_rcu(tmp
);
81 static void sunrpc_begin_cache_remove_entry(struct cache_head
*ch
,
82 struct cache_detail
*cd
)
84 /* Must be called under cd->hash_lock */
85 hlist_del_init_rcu(&ch
->cache_list
);
86 set_bit(CACHE_CLEANED
, &ch
->flags
);
90 static void sunrpc_end_cache_remove_entry(struct cache_head
*ch
,
91 struct cache_detail
*cd
)
93 cache_fresh_unlocked(ch
, cd
);
97 static struct cache_head
*sunrpc_cache_add_entry(struct cache_detail
*detail
,
98 struct cache_head
*key
,
101 struct cache_head
*new, *tmp
, *freeme
= NULL
;
102 struct hlist_head
*head
= &detail
->hash_table
[hash
];
104 new = detail
->alloc();
107 /* must fully initialise 'new', else
108 * we might get lose if we need to
111 cache_init(new, detail
);
112 detail
->init(new, key
);
114 spin_lock(&detail
->hash_lock
);
116 /* check if entry appeared while we slept */
117 hlist_for_each_entry_rcu(tmp
, head
, cache_list
,
118 lockdep_is_held(&detail
->hash_lock
)) {
119 if (!detail
->match(tmp
, key
))
121 if (test_bit(CACHE_VALID
, &tmp
->flags
) &&
122 cache_is_expired(detail
, tmp
)) {
123 sunrpc_begin_cache_remove_entry(tmp
, detail
);
124 trace_cache_entry_expired(detail
, tmp
);
129 spin_unlock(&detail
->hash_lock
);
130 cache_put(new, detail
);
134 hlist_add_head_rcu(&new->cache_list
, head
);
137 spin_unlock(&detail
->hash_lock
);
140 sunrpc_end_cache_remove_entry(freeme
, detail
);
144 struct cache_head
*sunrpc_cache_lookup_rcu(struct cache_detail
*detail
,
145 struct cache_head
*key
, int hash
)
147 struct cache_head
*ret
;
149 ret
= sunrpc_cache_find_rcu(detail
, key
, hash
);
152 /* Didn't find anything, insert an empty entry */
153 return sunrpc_cache_add_entry(detail
, key
, hash
);
155 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu
);
157 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
);
159 static void cache_fresh_locked(struct cache_head
*head
, time64_t expiry
,
160 struct cache_detail
*detail
)
162 time64_t now
= seconds_since_boot();
163 if (now
<= detail
->flush_time
)
164 /* ensure it isn't immediately treated as expired */
165 now
= detail
->flush_time
+ 1;
166 head
->expiry_time
= expiry
;
167 head
->last_refresh
= now
;
168 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
169 set_bit(CACHE_VALID
, &head
->flags
);
172 static void cache_fresh_unlocked(struct cache_head
*head
,
173 struct cache_detail
*detail
)
175 if (test_and_clear_bit(CACHE_PENDING
, &head
->flags
)) {
176 cache_revisit_request(head
);
177 cache_dequeue(detail
, head
);
181 static void cache_make_negative(struct cache_detail
*detail
,
182 struct cache_head
*h
)
184 set_bit(CACHE_NEGATIVE
, &h
->flags
);
185 trace_cache_entry_make_negative(detail
, h
);
188 static void cache_entry_update(struct cache_detail
*detail
,
189 struct cache_head
*h
,
190 struct cache_head
*new)
192 if (!test_bit(CACHE_NEGATIVE
, &new->flags
)) {
193 detail
->update(h
, new);
194 trace_cache_entry_update(detail
, h
);
196 cache_make_negative(detail
, h
);
200 struct cache_head
*sunrpc_cache_update(struct cache_detail
*detail
,
201 struct cache_head
*new, struct cache_head
*old
, int hash
)
203 /* The 'old' entry is to be replaced by 'new'.
204 * If 'old' is not VALID, we update it directly,
205 * otherwise we need to replace it
207 struct cache_head
*tmp
;
209 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
210 spin_lock(&detail
->hash_lock
);
211 if (!test_bit(CACHE_VALID
, &old
->flags
)) {
212 cache_entry_update(detail
, old
, new);
213 cache_fresh_locked(old
, new->expiry_time
, detail
);
214 spin_unlock(&detail
->hash_lock
);
215 cache_fresh_unlocked(old
, detail
);
218 spin_unlock(&detail
->hash_lock
);
220 /* We need to insert a new entry */
221 tmp
= detail
->alloc();
223 cache_put(old
, detail
);
226 cache_init(tmp
, detail
);
227 detail
->init(tmp
, old
);
229 spin_lock(&detail
->hash_lock
);
230 cache_entry_update(detail
, tmp
, new);
231 hlist_add_head(&tmp
->cache_list
, &detail
->hash_table
[hash
]);
234 cache_fresh_locked(tmp
, new->expiry_time
, detail
);
235 cache_fresh_locked(old
, 0, detail
);
236 spin_unlock(&detail
->hash_lock
);
237 cache_fresh_unlocked(tmp
, detail
);
238 cache_fresh_unlocked(old
, detail
);
239 cache_put(old
, detail
);
242 EXPORT_SYMBOL_GPL(sunrpc_cache_update
);
244 static inline int cache_is_valid(struct cache_head
*h
)
246 if (!test_bit(CACHE_VALID
, &h
->flags
))
250 if (test_bit(CACHE_NEGATIVE
, &h
->flags
))
254 * In combination with write barrier in
255 * sunrpc_cache_update, ensures that anyone
256 * using the cache entry after this sees the
265 static int try_to_negate_entry(struct cache_detail
*detail
, struct cache_head
*h
)
269 spin_lock(&detail
->hash_lock
);
270 rv
= cache_is_valid(h
);
272 cache_make_negative(detail
, h
);
273 cache_fresh_locked(h
, seconds_since_boot()+CACHE_NEW_EXPIRY
,
277 spin_unlock(&detail
->hash_lock
);
278 cache_fresh_unlocked(h
, detail
);
283 * This is the generic cache management routine for all
284 * the authentication caches.
285 * It checks the currency of a cache item and will (later)
286 * initiate an upcall to fill it if needed.
289 * Returns 0 if the cache_head can be used, or cache_puts it and returns
290 * -EAGAIN if upcall is pending and request has been queued
291 * -ETIMEDOUT if upcall failed or request could not be queue or
292 * upcall completed but item is still invalid (implying that
293 * the cache item has been replaced with a newer one).
294 * -ENOENT if cache entry was negative
296 int cache_check(struct cache_detail
*detail
,
297 struct cache_head
*h
, struct cache_req
*rqstp
)
300 time64_t refresh_age
, age
;
302 /* First decide return status as best we can */
303 rv
= cache_is_valid(h
);
305 /* now see if we want to start an upcall */
306 refresh_age
= (h
->expiry_time
- h
->last_refresh
);
307 age
= seconds_since_boot() - h
->last_refresh
;
312 } else if (rv
== -EAGAIN
||
313 (h
->expiry_time
!= 0 && age
> refresh_age
/2)) {
314 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
316 switch (detail
->cache_upcall(detail
, h
)) {
318 rv
= try_to_negate_entry(detail
, h
);
321 cache_fresh_unlocked(h
, detail
);
327 if (!cache_defer_req(rqstp
, h
)) {
329 * Request was not deferred; handle it as best
332 rv
= cache_is_valid(h
);
338 cache_put(h
, detail
);
341 EXPORT_SYMBOL_GPL(cache_check
);
344 * caches need to be periodically cleaned.
345 * For this we maintain a list of cache_detail and
346 * a current pointer into that list and into the table
349 * Each time cache_clean is called it finds the next non-empty entry
350 * in the current table and walks the list in that entry
351 * looking for entries that can be removed.
353 * An entry gets removed if:
354 * - The expiry is before current time
355 * - The last_refresh time is before the flush_time for that cache
357 * later we might drop old entries with non-NEVER expiry if that table
358 * is getting 'full' for some definition of 'full'
360 * The question of "how often to scan a table" is an interesting one
361 * and is answered in part by the use of the "nextcheck" field in the
363 * When a scan of a table begins, the nextcheck field is set to a time
364 * that is well into the future.
365 * While scanning, if an expiry time is found that is earlier than the
366 * current nextcheck time, nextcheck is set to that expiry time.
367 * If the flush_time is ever set to a time earlier than the nextcheck
368 * time, the nextcheck time is then set to that flush_time.
370 * A table is then only scanned if the current time is at least
371 * the nextcheck time.
375 static LIST_HEAD(cache_list
);
376 static DEFINE_SPINLOCK(cache_list_lock
);
377 static struct cache_detail
*current_detail
;
378 static int current_index
;
380 static void do_cache_clean(struct work_struct
*work
);
381 static struct delayed_work cache_cleaner
;
383 void sunrpc_init_cache_detail(struct cache_detail
*cd
)
385 spin_lock_init(&cd
->hash_lock
);
386 INIT_LIST_HEAD(&cd
->queue
);
387 spin_lock(&cache_list_lock
);
390 atomic_set(&cd
->writers
, 0);
393 list_add(&cd
->others
, &cache_list
);
394 spin_unlock(&cache_list_lock
);
396 /* start the cleaning process */
397 queue_delayed_work(system_power_efficient_wq
, &cache_cleaner
, 0);
399 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail
);
401 void sunrpc_destroy_cache_detail(struct cache_detail
*cd
)
404 spin_lock(&cache_list_lock
);
405 spin_lock(&cd
->hash_lock
);
406 if (current_detail
== cd
)
407 current_detail
= NULL
;
408 list_del_init(&cd
->others
);
409 spin_unlock(&cd
->hash_lock
);
410 spin_unlock(&cache_list_lock
);
411 if (list_empty(&cache_list
)) {
412 /* module must be being unloaded so its safe to kill the worker */
413 cancel_delayed_work_sync(&cache_cleaner
);
416 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail
);
418 /* clean cache tries to find something to clean
420 * It returns 1 if it cleaned something,
421 * 0 if it didn't find anything this time
422 * -1 if it fell off the end of the list.
424 static int cache_clean(void)
427 struct list_head
*next
;
429 spin_lock(&cache_list_lock
);
431 /* find a suitable table if we don't already have one */
432 while (current_detail
== NULL
||
433 current_index
>= current_detail
->hash_size
) {
435 next
= current_detail
->others
.next
;
437 next
= cache_list
.next
;
438 if (next
== &cache_list
) {
439 current_detail
= NULL
;
440 spin_unlock(&cache_list_lock
);
443 current_detail
= list_entry(next
, struct cache_detail
, others
);
444 if (current_detail
->nextcheck
> seconds_since_boot())
445 current_index
= current_detail
->hash_size
;
448 current_detail
->nextcheck
= seconds_since_boot()+30*60;
452 /* find a non-empty bucket in the table */
453 while (current_detail
&&
454 current_index
< current_detail
->hash_size
&&
455 hlist_empty(¤t_detail
->hash_table
[current_index
]))
458 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
460 if (current_detail
&& current_index
< current_detail
->hash_size
) {
461 struct cache_head
*ch
= NULL
;
462 struct cache_detail
*d
;
463 struct hlist_head
*head
;
464 struct hlist_node
*tmp
;
466 spin_lock(¤t_detail
->hash_lock
);
468 /* Ok, now to clean this strand */
470 head
= ¤t_detail
->hash_table
[current_index
];
471 hlist_for_each_entry_safe(ch
, tmp
, head
, cache_list
) {
472 if (current_detail
->nextcheck
> ch
->expiry_time
)
473 current_detail
->nextcheck
= ch
->expiry_time
+1;
474 if (!cache_is_expired(current_detail
, ch
))
477 sunrpc_begin_cache_remove_entry(ch
, current_detail
);
478 trace_cache_entry_expired(current_detail
, ch
);
483 spin_unlock(¤t_detail
->hash_lock
);
487 spin_unlock(&cache_list_lock
);
489 sunrpc_end_cache_remove_entry(ch
, d
);
491 spin_unlock(&cache_list_lock
);
497 * We want to regularly clean the cache, so we need to schedule some work ...
499 static void do_cache_clean(struct work_struct
*work
)
502 if (cache_clean() == -1)
503 delay
= round_jiffies_relative(30*HZ
);
505 if (list_empty(&cache_list
))
509 queue_delayed_work(system_power_efficient_wq
,
510 &cache_cleaner
, delay
);
515 * Clean all caches promptly. This just calls cache_clean
516 * repeatedly until we are sure that every cache has had a chance to
519 void cache_flush(void)
521 while (cache_clean() != -1)
523 while (cache_clean() != -1)
526 EXPORT_SYMBOL_GPL(cache_flush
);
528 void cache_purge(struct cache_detail
*detail
)
530 struct cache_head
*ch
= NULL
;
531 struct hlist_head
*head
= NULL
;
532 struct hlist_node
*tmp
= NULL
;
535 spin_lock(&detail
->hash_lock
);
536 if (!detail
->entries
) {
537 spin_unlock(&detail
->hash_lock
);
541 dprintk("RPC: %d entries in %s cache\n", detail
->entries
, detail
->name
);
542 for (i
= 0; i
< detail
->hash_size
; i
++) {
543 head
= &detail
->hash_table
[i
];
544 hlist_for_each_entry_safe(ch
, tmp
, head
, cache_list
) {
545 sunrpc_begin_cache_remove_entry(ch
, detail
);
546 spin_unlock(&detail
->hash_lock
);
547 sunrpc_end_cache_remove_entry(ch
, detail
);
548 spin_lock(&detail
->hash_lock
);
551 spin_unlock(&detail
->hash_lock
);
553 EXPORT_SYMBOL_GPL(cache_purge
);
557 * Deferral and Revisiting of Requests.
559 * If a cache lookup finds a pending entry, we
560 * need to defer the request and revisit it later.
561 * All deferred requests are stored in a hash table,
562 * indexed by "struct cache_head *".
563 * As it may be wasteful to store a whole request
564 * structure, we allow the request to provide a
565 * deferred form, which must contain a
566 * 'struct cache_deferred_req'
567 * This cache_deferred_req contains a method to allow
568 * it to be revisited when cache info is available
571 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
572 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
574 #define DFR_MAX 300 /* ??? */
576 static DEFINE_SPINLOCK(cache_defer_lock
);
577 static LIST_HEAD(cache_defer_list
);
578 static struct hlist_head cache_defer_hash
[DFR_HASHSIZE
];
579 static int cache_defer_cnt
;
581 static void __unhash_deferred_req(struct cache_deferred_req
*dreq
)
583 hlist_del_init(&dreq
->hash
);
584 if (!list_empty(&dreq
->recent
)) {
585 list_del_init(&dreq
->recent
);
590 static void __hash_deferred_req(struct cache_deferred_req
*dreq
, struct cache_head
*item
)
592 int hash
= DFR_HASH(item
);
594 INIT_LIST_HEAD(&dreq
->recent
);
595 hlist_add_head(&dreq
->hash
, &cache_defer_hash
[hash
]);
598 static void setup_deferral(struct cache_deferred_req
*dreq
,
599 struct cache_head
*item
,
605 spin_lock(&cache_defer_lock
);
607 __hash_deferred_req(dreq
, item
);
611 list_add(&dreq
->recent
, &cache_defer_list
);
614 spin_unlock(&cache_defer_lock
);
618 struct thread_deferred_req
{
619 struct cache_deferred_req handle
;
620 struct completion completion
;
623 static void cache_restart_thread(struct cache_deferred_req
*dreq
, int too_many
)
625 struct thread_deferred_req
*dr
=
626 container_of(dreq
, struct thread_deferred_req
, handle
);
627 complete(&dr
->completion
);
630 static void cache_wait_req(struct cache_req
*req
, struct cache_head
*item
)
632 struct thread_deferred_req sleeper
;
633 struct cache_deferred_req
*dreq
= &sleeper
.handle
;
635 sleeper
.completion
= COMPLETION_INITIALIZER_ONSTACK(sleeper
.completion
);
636 dreq
->revisit
= cache_restart_thread
;
638 setup_deferral(dreq
, item
, 0);
640 if (!test_bit(CACHE_PENDING
, &item
->flags
) ||
641 wait_for_completion_interruptible_timeout(
642 &sleeper
.completion
, req
->thread_wait
) <= 0) {
643 /* The completion wasn't completed, so we need
646 spin_lock(&cache_defer_lock
);
647 if (!hlist_unhashed(&sleeper
.handle
.hash
)) {
648 __unhash_deferred_req(&sleeper
.handle
);
649 spin_unlock(&cache_defer_lock
);
651 /* cache_revisit_request already removed
652 * this from the hash table, but hasn't
653 * called ->revisit yet. It will very soon
654 * and we need to wait for it.
656 spin_unlock(&cache_defer_lock
);
657 wait_for_completion(&sleeper
.completion
);
662 static void cache_limit_defers(void)
664 /* Make sure we haven't exceed the limit of allowed deferred
667 struct cache_deferred_req
*discard
= NULL
;
669 if (cache_defer_cnt
<= DFR_MAX
)
672 spin_lock(&cache_defer_lock
);
674 /* Consider removing either the first or the last */
675 if (cache_defer_cnt
> DFR_MAX
) {
676 if (prandom_u32() & 1)
677 discard
= list_entry(cache_defer_list
.next
,
678 struct cache_deferred_req
, recent
);
680 discard
= list_entry(cache_defer_list
.prev
,
681 struct cache_deferred_req
, recent
);
682 __unhash_deferred_req(discard
);
684 spin_unlock(&cache_defer_lock
);
686 discard
->revisit(discard
, 1);
689 /* Return true if and only if a deferred request is queued. */
690 static bool cache_defer_req(struct cache_req
*req
, struct cache_head
*item
)
692 struct cache_deferred_req
*dreq
;
694 if (req
->thread_wait
) {
695 cache_wait_req(req
, item
);
696 if (!test_bit(CACHE_PENDING
, &item
->flags
))
699 dreq
= req
->defer(req
);
702 setup_deferral(dreq
, item
, 1);
703 if (!test_bit(CACHE_PENDING
, &item
->flags
))
704 /* Bit could have been cleared before we managed to
705 * set up the deferral, so need to revisit just in case
707 cache_revisit_request(item
);
709 cache_limit_defers();
713 static void cache_revisit_request(struct cache_head
*item
)
715 struct cache_deferred_req
*dreq
;
716 struct list_head pending
;
717 struct hlist_node
*tmp
;
718 int hash
= DFR_HASH(item
);
720 INIT_LIST_HEAD(&pending
);
721 spin_lock(&cache_defer_lock
);
723 hlist_for_each_entry_safe(dreq
, tmp
, &cache_defer_hash
[hash
], hash
)
724 if (dreq
->item
== item
) {
725 __unhash_deferred_req(dreq
);
726 list_add(&dreq
->recent
, &pending
);
729 spin_unlock(&cache_defer_lock
);
731 while (!list_empty(&pending
)) {
732 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
733 list_del_init(&dreq
->recent
);
734 dreq
->revisit(dreq
, 0);
738 void cache_clean_deferred(void *owner
)
740 struct cache_deferred_req
*dreq
, *tmp
;
741 struct list_head pending
;
744 INIT_LIST_HEAD(&pending
);
745 spin_lock(&cache_defer_lock
);
747 list_for_each_entry_safe(dreq
, tmp
, &cache_defer_list
, recent
) {
748 if (dreq
->owner
== owner
) {
749 __unhash_deferred_req(dreq
);
750 list_add(&dreq
->recent
, &pending
);
753 spin_unlock(&cache_defer_lock
);
755 while (!list_empty(&pending
)) {
756 dreq
= list_entry(pending
.next
, struct cache_deferred_req
, recent
);
757 list_del_init(&dreq
->recent
);
758 dreq
->revisit(dreq
, 1);
763 * communicate with user-space
765 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
766 * On read, you get a full request, or block.
767 * On write, an update request is processed.
768 * Poll works if anything to read, and always allows write.
770 * Implemented by linked list of requests. Each open file has
771 * a ->private that also exists in this list. New requests are added
772 * to the end and may wakeup and preceding readers.
773 * New readers are added to the head. If, on read, an item is found with
774 * CACHE_UPCALLING clear, we free it from the list.
778 static DEFINE_SPINLOCK(queue_lock
);
779 static DEFINE_MUTEX(queue_io_mutex
);
782 struct list_head list
;
783 int reader
; /* if 0, then request */
785 struct cache_request
{
786 struct cache_queue q
;
787 struct cache_head
*item
;
792 struct cache_reader
{
793 struct cache_queue q
;
794 int offset
; /* if non-0, we have a refcnt on next request */
797 static int cache_request(struct cache_detail
*detail
,
798 struct cache_request
*crq
)
803 detail
->cache_request(detail
, crq
->item
, &bp
, &len
);
806 return PAGE_SIZE
- len
;
809 static ssize_t
cache_read(struct file
*filp
, char __user
*buf
, size_t count
,
810 loff_t
*ppos
, struct cache_detail
*cd
)
812 struct cache_reader
*rp
= filp
->private_data
;
813 struct cache_request
*rq
;
814 struct inode
*inode
= file_inode(filp
);
820 inode_lock(inode
); /* protect against multiple concurrent
821 * readers on this file */
823 spin_lock(&queue_lock
);
824 /* need to find next request */
825 while (rp
->q
.list
.next
!= &cd
->queue
&&
826 list_entry(rp
->q
.list
.next
, struct cache_queue
, list
)
828 struct list_head
*next
= rp
->q
.list
.next
;
829 list_move(&rp
->q
.list
, next
);
831 if (rp
->q
.list
.next
== &cd
->queue
) {
832 spin_unlock(&queue_lock
);
834 WARN_ON_ONCE(rp
->offset
);
837 rq
= container_of(rp
->q
.list
.next
, struct cache_request
, q
.list
);
838 WARN_ON_ONCE(rq
->q
.reader
);
841 spin_unlock(&queue_lock
);
844 err
= cache_request(cd
, rq
);
850 if (rp
->offset
== 0 && !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
852 spin_lock(&queue_lock
);
853 list_move(&rp
->q
.list
, &rq
->q
.list
);
854 spin_unlock(&queue_lock
);
856 if (rp
->offset
+ count
> rq
->len
)
857 count
= rq
->len
- rp
->offset
;
859 if (copy_to_user(buf
, rq
->buf
+ rp
->offset
, count
))
862 if (rp
->offset
>= rq
->len
) {
864 spin_lock(&queue_lock
);
865 list_move(&rp
->q
.list
, &rq
->q
.list
);
866 spin_unlock(&queue_lock
);
871 if (rp
->offset
== 0) {
872 /* need to release rq */
873 spin_lock(&queue_lock
);
875 if (rq
->readers
== 0 &&
876 !test_bit(CACHE_PENDING
, &rq
->item
->flags
)) {
877 list_del(&rq
->q
.list
);
878 spin_unlock(&queue_lock
);
879 cache_put(rq
->item
, cd
);
883 spin_unlock(&queue_lock
);
888 return err
? err
: count
;
891 static ssize_t
cache_do_downcall(char *kaddr
, const char __user
*buf
,
892 size_t count
, struct cache_detail
*cd
)
898 if (copy_from_user(kaddr
, buf
, count
))
901 ret
= cd
->cache_parse(cd
, kaddr
, count
);
907 static ssize_t
cache_slow_downcall(const char __user
*buf
,
908 size_t count
, struct cache_detail
*cd
)
910 static char write_buf
[8192]; /* protected by queue_io_mutex */
911 ssize_t ret
= -EINVAL
;
913 if (count
>= sizeof(write_buf
))
915 mutex_lock(&queue_io_mutex
);
916 ret
= cache_do_downcall(write_buf
, buf
, count
, cd
);
917 mutex_unlock(&queue_io_mutex
);
922 static ssize_t
cache_downcall(struct address_space
*mapping
,
923 const char __user
*buf
,
924 size_t count
, struct cache_detail
*cd
)
928 ssize_t ret
= -ENOMEM
;
930 if (count
>= PAGE_SIZE
)
933 page
= find_or_create_page(mapping
, 0, GFP_KERNEL
);
938 ret
= cache_do_downcall(kaddr
, buf
, count
, cd
);
944 return cache_slow_downcall(buf
, count
, cd
);
947 static ssize_t
cache_write(struct file
*filp
, const char __user
*buf
,
948 size_t count
, loff_t
*ppos
,
949 struct cache_detail
*cd
)
951 struct address_space
*mapping
= filp
->f_mapping
;
952 struct inode
*inode
= file_inode(filp
);
953 ssize_t ret
= -EINVAL
;
955 if (!cd
->cache_parse
)
959 ret
= cache_downcall(mapping
, buf
, count
, cd
);
965 static DECLARE_WAIT_QUEUE_HEAD(queue_wait
);
967 static __poll_t
cache_poll(struct file
*filp
, poll_table
*wait
,
968 struct cache_detail
*cd
)
971 struct cache_reader
*rp
= filp
->private_data
;
972 struct cache_queue
*cq
;
974 poll_wait(filp
, &queue_wait
, wait
);
976 /* alway allow write */
977 mask
= EPOLLOUT
| EPOLLWRNORM
;
982 spin_lock(&queue_lock
);
984 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
985 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
987 mask
|= EPOLLIN
| EPOLLRDNORM
;
990 spin_unlock(&queue_lock
);
994 static int cache_ioctl(struct inode
*ino
, struct file
*filp
,
995 unsigned int cmd
, unsigned long arg
,
996 struct cache_detail
*cd
)
999 struct cache_reader
*rp
= filp
->private_data
;
1000 struct cache_queue
*cq
;
1002 if (cmd
!= FIONREAD
|| !rp
)
1005 spin_lock(&queue_lock
);
1007 /* only find the length remaining in current request,
1008 * or the length of the next request
1010 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
1011 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
1013 struct cache_request
*cr
=
1014 container_of(cq
, struct cache_request
, q
);
1015 len
= cr
->len
- rp
->offset
;
1018 spin_unlock(&queue_lock
);
1020 return put_user(len
, (int __user
*)arg
);
1023 static int cache_open(struct inode
*inode
, struct file
*filp
,
1024 struct cache_detail
*cd
)
1026 struct cache_reader
*rp
= NULL
;
1028 if (!cd
|| !try_module_get(cd
->owner
))
1030 nonseekable_open(inode
, filp
);
1031 if (filp
->f_mode
& FMODE_READ
) {
1032 rp
= kmalloc(sizeof(*rp
), GFP_KERNEL
);
1034 module_put(cd
->owner
);
1040 spin_lock(&queue_lock
);
1041 list_add(&rp
->q
.list
, &cd
->queue
);
1042 spin_unlock(&queue_lock
);
1044 if (filp
->f_mode
& FMODE_WRITE
)
1045 atomic_inc(&cd
->writers
);
1046 filp
->private_data
= rp
;
1050 static int cache_release(struct inode
*inode
, struct file
*filp
,
1051 struct cache_detail
*cd
)
1053 struct cache_reader
*rp
= filp
->private_data
;
1056 spin_lock(&queue_lock
);
1058 struct cache_queue
*cq
;
1059 for (cq
= &rp
->q
; &cq
->list
!= &cd
->queue
;
1060 cq
= list_entry(cq
->list
.next
, struct cache_queue
, list
))
1062 container_of(cq
, struct cache_request
, q
)
1068 list_del(&rp
->q
.list
);
1069 spin_unlock(&queue_lock
);
1071 filp
->private_data
= NULL
;
1075 if (filp
->f_mode
& FMODE_WRITE
) {
1076 atomic_dec(&cd
->writers
);
1077 cd
->last_close
= seconds_since_boot();
1079 module_put(cd
->owner
);
1085 static void cache_dequeue(struct cache_detail
*detail
, struct cache_head
*ch
)
1087 struct cache_queue
*cq
, *tmp
;
1088 struct cache_request
*cr
;
1089 struct list_head dequeued
;
1091 INIT_LIST_HEAD(&dequeued
);
1092 spin_lock(&queue_lock
);
1093 list_for_each_entry_safe(cq
, tmp
, &detail
->queue
, list
)
1095 cr
= container_of(cq
, struct cache_request
, q
);
1098 if (test_bit(CACHE_PENDING
, &ch
->flags
))
1099 /* Lost a race and it is pending again */
1101 if (cr
->readers
!= 0)
1103 list_move(&cr
->q
.list
, &dequeued
);
1105 spin_unlock(&queue_lock
);
1106 while (!list_empty(&dequeued
)) {
1107 cr
= list_entry(dequeued
.next
, struct cache_request
, q
.list
);
1108 list_del(&cr
->q
.list
);
1109 cache_put(cr
->item
, detail
);
1116 * Support routines for text-based upcalls.
1117 * Fields are separated by spaces.
1118 * Fields are either mangled to quote space tab newline slosh with slosh
1119 * or a hexified with a leading \x
1120 * Record is terminated with newline.
1124 void qword_add(char **bpp
, int *lp
, char *str
)
1130 if (len
< 0) return;
1132 ret
= string_escape_str(str
, bp
, len
, ESCAPE_OCTAL
, "\\ \n\t");
1145 EXPORT_SYMBOL_GPL(qword_add
);
1147 void qword_addhex(char **bpp
, int *lp
, char *buf
, int blen
)
1152 if (len
< 0) return;
1158 while (blen
&& len
>= 2) {
1159 bp
= hex_byte_pack(bp
, *buf
++);
1164 if (blen
|| len
<1) len
= -1;
1172 EXPORT_SYMBOL_GPL(qword_addhex
);
1174 static void warn_no_listener(struct cache_detail
*detail
)
1176 if (detail
->last_warn
!= detail
->last_close
) {
1177 detail
->last_warn
= detail
->last_close
;
1178 if (detail
->warn_no_listener
)
1179 detail
->warn_no_listener(detail
, detail
->last_close
!= 0);
1183 static bool cache_listeners_exist(struct cache_detail
*detail
)
1185 if (atomic_read(&detail
->writers
))
1187 if (detail
->last_close
== 0)
1188 /* This cache was never opened */
1190 if (detail
->last_close
< seconds_since_boot() - 30)
1192 * We allow for the possibility that someone might
1193 * restart a userspace daemon without restarting the
1194 * server; but after 30 seconds, we give up.
1201 * register an upcall request to user-space and queue it up for read() by the
1204 * Each request is at most one page long.
1206 static int cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1209 struct cache_request
*crq
;
1212 if (test_bit(CACHE_CLEANED
, &h
->flags
))
1213 /* Too late to make an upcall */
1216 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
1220 crq
= kmalloc(sizeof (*crq
), GFP_KERNEL
);
1230 spin_lock(&queue_lock
);
1231 if (test_bit(CACHE_PENDING
, &h
->flags
)) {
1232 crq
->item
= cache_get(h
);
1233 list_add_tail(&crq
->q
.list
, &detail
->queue
);
1234 trace_cache_entry_upcall(detail
, h
);
1236 /* Lost a race, no longer PENDING, so don't enqueue */
1238 spin_unlock(&queue_lock
);
1239 wake_up(&queue_wait
);
1240 if (ret
== -EAGAIN
) {
1247 int sunrpc_cache_pipe_upcall(struct cache_detail
*detail
, struct cache_head
*h
)
1249 if (test_and_set_bit(CACHE_PENDING
, &h
->flags
))
1251 return cache_pipe_upcall(detail
, h
);
1253 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall
);
1255 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail
*detail
,
1256 struct cache_head
*h
)
1258 if (!cache_listeners_exist(detail
)) {
1259 warn_no_listener(detail
);
1260 trace_cache_entry_no_listener(detail
, h
);
1263 return sunrpc_cache_pipe_upcall(detail
, h
);
1265 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout
);
1268 * parse a message from user-space and pass it
1269 * to an appropriate cache
1270 * Messages are, like requests, separated into fields by
1271 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1274 * reply cachename expiry key ... content....
1276 * key and content are both parsed by cache
1279 int qword_get(char **bpp
, char *dest
, int bufsize
)
1281 /* return bytes copied, or -1 on error */
1285 while (*bp
== ' ') bp
++;
1287 if (bp
[0] == '\\' && bp
[1] == 'x') {
1290 while (len
< bufsize
- 1) {
1293 h
= hex_to_bin(bp
[0]);
1297 l
= hex_to_bin(bp
[1]);
1301 *dest
++ = (h
<< 4) | l
;
1306 /* text with \nnn octal quoting */
1307 while (*bp
!= ' ' && *bp
!= '\n' && *bp
&& len
< bufsize
-1) {
1309 isodigit(bp
[1]) && (bp
[1] <= '3') &&
1312 int byte
= (*++bp
-'0');
1314 byte
= (byte
<< 3) | (*bp
++ - '0');
1315 byte
= (byte
<< 3) | (*bp
++ - '0');
1325 if (*bp
!= ' ' && *bp
!= '\n' && *bp
!= '\0')
1327 while (*bp
== ' ') bp
++;
1332 EXPORT_SYMBOL_GPL(qword_get
);
1336 * support /proc/net/rpc/$CACHENAME/content
1338 * We call ->cache_show passing NULL for the item to
1339 * get a header, then pass each real item in the cache
1342 static void *__cache_seq_start(struct seq_file
*m
, loff_t
*pos
)
1345 unsigned int hash
, entry
;
1346 struct cache_head
*ch
;
1347 struct cache_detail
*cd
= m
->private;
1350 return SEQ_START_TOKEN
;
1352 entry
= n
& ((1LL<<32) - 1);
1354 hlist_for_each_entry_rcu(ch
, &cd
->hash_table
[hash
], cache_list
)
1357 n
&= ~((1LL<<32) - 1);
1361 } while(hash
< cd
->hash_size
&&
1362 hlist_empty(&cd
->hash_table
[hash
]));
1363 if (hash
>= cd
->hash_size
)
1366 return hlist_entry_safe(rcu_dereference_raw(
1367 hlist_first_rcu(&cd
->hash_table
[hash
])),
1368 struct cache_head
, cache_list
);
1371 static void *cache_seq_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1373 struct cache_head
*ch
= p
;
1374 int hash
= (*pos
>> 32);
1375 struct cache_detail
*cd
= m
->private;
1377 if (p
== SEQ_START_TOKEN
)
1379 else if (ch
->cache_list
.next
== NULL
) {
1384 return hlist_entry_safe(rcu_dereference_raw(
1385 hlist_next_rcu(&ch
->cache_list
)),
1386 struct cache_head
, cache_list
);
1388 *pos
&= ~((1LL<<32) - 1);
1389 while (hash
< cd
->hash_size
&&
1390 hlist_empty(&cd
->hash_table
[hash
])) {
1394 if (hash
>= cd
->hash_size
)
1397 return hlist_entry_safe(rcu_dereference_raw(
1398 hlist_first_rcu(&cd
->hash_table
[hash
])),
1399 struct cache_head
, cache_list
);
1402 void *cache_seq_start_rcu(struct seq_file
*m
, loff_t
*pos
)
1406 return __cache_seq_start(m
, pos
);
1408 EXPORT_SYMBOL_GPL(cache_seq_start_rcu
);
1410 void *cache_seq_next_rcu(struct seq_file
*file
, void *p
, loff_t
*pos
)
1412 return cache_seq_next(file
, p
, pos
);
1414 EXPORT_SYMBOL_GPL(cache_seq_next_rcu
);
1416 void cache_seq_stop_rcu(struct seq_file
*m
, void *p
)
1421 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu
);
1423 static int c_show(struct seq_file
*m
, void *p
)
1425 struct cache_head
*cp
= p
;
1426 struct cache_detail
*cd
= m
->private;
1428 if (p
== SEQ_START_TOKEN
)
1429 return cd
->cache_show(m
, cd
, NULL
);
1432 seq_printf(m
, "# expiry=%lld refcnt=%d flags=%lx\n",
1433 convert_to_wallclock(cp
->expiry_time
),
1434 kref_read(&cp
->ref
), cp
->flags
);
1436 if (cache_check(cd
, cp
, NULL
))
1437 /* cache_check does a cache_put on failure */
1438 seq_printf(m
, "# ");
1440 if (cache_is_expired(cd
, cp
))
1441 seq_printf(m
, "# ");
1445 return cd
->cache_show(m
, cd
, cp
);
1448 static const struct seq_operations cache_content_op
= {
1449 .start
= cache_seq_start_rcu
,
1450 .next
= cache_seq_next_rcu
,
1451 .stop
= cache_seq_stop_rcu
,
1455 static int content_open(struct inode
*inode
, struct file
*file
,
1456 struct cache_detail
*cd
)
1458 struct seq_file
*seq
;
1461 if (!cd
|| !try_module_get(cd
->owner
))
1464 err
= seq_open(file
, &cache_content_op
);
1466 module_put(cd
->owner
);
1470 seq
= file
->private_data
;
1475 static int content_release(struct inode
*inode
, struct file
*file
,
1476 struct cache_detail
*cd
)
1478 int ret
= seq_release(inode
, file
);
1479 module_put(cd
->owner
);
1483 static int open_flush(struct inode
*inode
, struct file
*file
,
1484 struct cache_detail
*cd
)
1486 if (!cd
|| !try_module_get(cd
->owner
))
1488 return nonseekable_open(inode
, file
);
1491 static int release_flush(struct inode
*inode
, struct file
*file
,
1492 struct cache_detail
*cd
)
1494 module_put(cd
->owner
);
1498 static ssize_t
read_flush(struct file
*file
, char __user
*buf
,
1499 size_t count
, loff_t
*ppos
,
1500 struct cache_detail
*cd
)
1505 len
= snprintf(tbuf
, sizeof(tbuf
), "%llu\n",
1506 convert_to_wallclock(cd
->flush_time
));
1507 return simple_read_from_buffer(buf
, count
, ppos
, tbuf
, len
);
1510 static ssize_t
write_flush(struct file
*file
, const char __user
*buf
,
1511 size_t count
, loff_t
*ppos
,
1512 struct cache_detail
*cd
)
1518 if (*ppos
|| count
> sizeof(tbuf
)-1)
1520 if (copy_from_user(tbuf
, buf
, count
))
1523 simple_strtoul(tbuf
, &ep
, 0);
1524 if (*ep
&& *ep
!= '\n')
1526 /* Note that while we check that 'buf' holds a valid number,
1527 * we always ignore the value and just flush everything.
1528 * Making use of the number leads to races.
1531 now
= seconds_since_boot();
1532 /* Always flush everything, so behave like cache_purge()
1533 * Do this by advancing flush_time to the current time,
1534 * or by one second if it has already reached the current time.
1535 * Newly added cache entries will always have ->last_refresh greater
1536 * that ->flush_time, so they don't get flushed prematurely.
1539 if (cd
->flush_time
>= now
)
1540 now
= cd
->flush_time
+ 1;
1542 cd
->flush_time
= now
;
1543 cd
->nextcheck
= now
;
1553 static ssize_t
cache_read_procfs(struct file
*filp
, char __user
*buf
,
1554 size_t count
, loff_t
*ppos
)
1556 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1558 return cache_read(filp
, buf
, count
, ppos
, cd
);
1561 static ssize_t
cache_write_procfs(struct file
*filp
, const char __user
*buf
,
1562 size_t count
, loff_t
*ppos
)
1564 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1566 return cache_write(filp
, buf
, count
, ppos
, cd
);
1569 static __poll_t
cache_poll_procfs(struct file
*filp
, poll_table
*wait
)
1571 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1573 return cache_poll(filp
, wait
, cd
);
1576 static long cache_ioctl_procfs(struct file
*filp
,
1577 unsigned int cmd
, unsigned long arg
)
1579 struct inode
*inode
= file_inode(filp
);
1580 struct cache_detail
*cd
= PDE_DATA(inode
);
1582 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1585 static int cache_open_procfs(struct inode
*inode
, struct file
*filp
)
1587 struct cache_detail
*cd
= PDE_DATA(inode
);
1589 return cache_open(inode
, filp
, cd
);
1592 static int cache_release_procfs(struct inode
*inode
, struct file
*filp
)
1594 struct cache_detail
*cd
= PDE_DATA(inode
);
1596 return cache_release(inode
, filp
, cd
);
1599 static const struct proc_ops cache_channel_proc_ops
= {
1600 .proc_lseek
= no_llseek
,
1601 .proc_read
= cache_read_procfs
,
1602 .proc_write
= cache_write_procfs
,
1603 .proc_poll
= cache_poll_procfs
,
1604 .proc_ioctl
= cache_ioctl_procfs
, /* for FIONREAD */
1605 .proc_open
= cache_open_procfs
,
1606 .proc_release
= cache_release_procfs
,
1609 static int content_open_procfs(struct inode
*inode
, struct file
*filp
)
1611 struct cache_detail
*cd
= PDE_DATA(inode
);
1613 return content_open(inode
, filp
, cd
);
1616 static int content_release_procfs(struct inode
*inode
, struct file
*filp
)
1618 struct cache_detail
*cd
= PDE_DATA(inode
);
1620 return content_release(inode
, filp
, cd
);
1623 static const struct proc_ops content_proc_ops
= {
1624 .proc_open
= content_open_procfs
,
1625 .proc_read
= seq_read
,
1626 .proc_lseek
= seq_lseek
,
1627 .proc_release
= content_release_procfs
,
1630 static int open_flush_procfs(struct inode
*inode
, struct file
*filp
)
1632 struct cache_detail
*cd
= PDE_DATA(inode
);
1634 return open_flush(inode
, filp
, cd
);
1637 static int release_flush_procfs(struct inode
*inode
, struct file
*filp
)
1639 struct cache_detail
*cd
= PDE_DATA(inode
);
1641 return release_flush(inode
, filp
, cd
);
1644 static ssize_t
read_flush_procfs(struct file
*filp
, char __user
*buf
,
1645 size_t count
, loff_t
*ppos
)
1647 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1649 return read_flush(filp
, buf
, count
, ppos
, cd
);
1652 static ssize_t
write_flush_procfs(struct file
*filp
,
1653 const char __user
*buf
,
1654 size_t count
, loff_t
*ppos
)
1656 struct cache_detail
*cd
= PDE_DATA(file_inode(filp
));
1658 return write_flush(filp
, buf
, count
, ppos
, cd
);
1661 static const struct proc_ops cache_flush_proc_ops
= {
1662 .proc_open
= open_flush_procfs
,
1663 .proc_read
= read_flush_procfs
,
1664 .proc_write
= write_flush_procfs
,
1665 .proc_release
= release_flush_procfs
,
1666 .proc_lseek
= no_llseek
,
1669 static void remove_cache_proc_entries(struct cache_detail
*cd
)
1672 proc_remove(cd
->procfs
);
1677 #ifdef CONFIG_PROC_FS
1678 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1680 struct proc_dir_entry
*p
;
1681 struct sunrpc_net
*sn
;
1683 sn
= net_generic(net
, sunrpc_net_id
);
1684 cd
->procfs
= proc_mkdir(cd
->name
, sn
->proc_net_rpc
);
1685 if (cd
->procfs
== NULL
)
1688 p
= proc_create_data("flush", S_IFREG
| 0600,
1689 cd
->procfs
, &cache_flush_proc_ops
, cd
);
1693 if (cd
->cache_request
|| cd
->cache_parse
) {
1694 p
= proc_create_data("channel", S_IFREG
| 0600, cd
->procfs
,
1695 &cache_channel_proc_ops
, cd
);
1699 if (cd
->cache_show
) {
1700 p
= proc_create_data("content", S_IFREG
| 0400, cd
->procfs
,
1701 &content_proc_ops
, cd
);
1707 remove_cache_proc_entries(cd
);
1710 #else /* CONFIG_PROC_FS */
1711 static int create_cache_proc_entries(struct cache_detail
*cd
, struct net
*net
)
1717 void __init
cache_initialize(void)
1719 INIT_DEFERRABLE_WORK(&cache_cleaner
, do_cache_clean
);
1722 int cache_register_net(struct cache_detail
*cd
, struct net
*net
)
1726 sunrpc_init_cache_detail(cd
);
1727 ret
= create_cache_proc_entries(cd
, net
);
1729 sunrpc_destroy_cache_detail(cd
);
1732 EXPORT_SYMBOL_GPL(cache_register_net
);
1734 void cache_unregister_net(struct cache_detail
*cd
, struct net
*net
)
1736 remove_cache_proc_entries(cd
);
1737 sunrpc_destroy_cache_detail(cd
);
1739 EXPORT_SYMBOL_GPL(cache_unregister_net
);
1741 struct cache_detail
*cache_create_net(const struct cache_detail
*tmpl
, struct net
*net
)
1743 struct cache_detail
*cd
;
1746 cd
= kmemdup(tmpl
, sizeof(struct cache_detail
), GFP_KERNEL
);
1748 return ERR_PTR(-ENOMEM
);
1750 cd
->hash_table
= kcalloc(cd
->hash_size
, sizeof(struct hlist_head
),
1752 if (cd
->hash_table
== NULL
) {
1754 return ERR_PTR(-ENOMEM
);
1757 for (i
= 0; i
< cd
->hash_size
; i
++)
1758 INIT_HLIST_HEAD(&cd
->hash_table
[i
]);
1762 EXPORT_SYMBOL_GPL(cache_create_net
);
1764 void cache_destroy_net(struct cache_detail
*cd
, struct net
*net
)
1766 kfree(cd
->hash_table
);
1769 EXPORT_SYMBOL_GPL(cache_destroy_net
);
1771 static ssize_t
cache_read_pipefs(struct file
*filp
, char __user
*buf
,
1772 size_t count
, loff_t
*ppos
)
1774 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1776 return cache_read(filp
, buf
, count
, ppos
, cd
);
1779 static ssize_t
cache_write_pipefs(struct file
*filp
, const char __user
*buf
,
1780 size_t count
, loff_t
*ppos
)
1782 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1784 return cache_write(filp
, buf
, count
, ppos
, cd
);
1787 static __poll_t
cache_poll_pipefs(struct file
*filp
, poll_table
*wait
)
1789 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1791 return cache_poll(filp
, wait
, cd
);
1794 static long cache_ioctl_pipefs(struct file
*filp
,
1795 unsigned int cmd
, unsigned long arg
)
1797 struct inode
*inode
= file_inode(filp
);
1798 struct cache_detail
*cd
= RPC_I(inode
)->private;
1800 return cache_ioctl(inode
, filp
, cmd
, arg
, cd
);
1803 static int cache_open_pipefs(struct inode
*inode
, struct file
*filp
)
1805 struct cache_detail
*cd
= RPC_I(inode
)->private;
1807 return cache_open(inode
, filp
, cd
);
1810 static int cache_release_pipefs(struct inode
*inode
, struct file
*filp
)
1812 struct cache_detail
*cd
= RPC_I(inode
)->private;
1814 return cache_release(inode
, filp
, cd
);
1817 const struct file_operations cache_file_operations_pipefs
= {
1818 .owner
= THIS_MODULE
,
1819 .llseek
= no_llseek
,
1820 .read
= cache_read_pipefs
,
1821 .write
= cache_write_pipefs
,
1822 .poll
= cache_poll_pipefs
,
1823 .unlocked_ioctl
= cache_ioctl_pipefs
, /* for FIONREAD */
1824 .open
= cache_open_pipefs
,
1825 .release
= cache_release_pipefs
,
1828 static int content_open_pipefs(struct inode
*inode
, struct file
*filp
)
1830 struct cache_detail
*cd
= RPC_I(inode
)->private;
1832 return content_open(inode
, filp
, cd
);
1835 static int content_release_pipefs(struct inode
*inode
, struct file
*filp
)
1837 struct cache_detail
*cd
= RPC_I(inode
)->private;
1839 return content_release(inode
, filp
, cd
);
1842 const struct file_operations content_file_operations_pipefs
= {
1843 .open
= content_open_pipefs
,
1845 .llseek
= seq_lseek
,
1846 .release
= content_release_pipefs
,
1849 static int open_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1851 struct cache_detail
*cd
= RPC_I(inode
)->private;
1853 return open_flush(inode
, filp
, cd
);
1856 static int release_flush_pipefs(struct inode
*inode
, struct file
*filp
)
1858 struct cache_detail
*cd
= RPC_I(inode
)->private;
1860 return release_flush(inode
, filp
, cd
);
1863 static ssize_t
read_flush_pipefs(struct file
*filp
, char __user
*buf
,
1864 size_t count
, loff_t
*ppos
)
1866 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1868 return read_flush(filp
, buf
, count
, ppos
, cd
);
1871 static ssize_t
write_flush_pipefs(struct file
*filp
,
1872 const char __user
*buf
,
1873 size_t count
, loff_t
*ppos
)
1875 struct cache_detail
*cd
= RPC_I(file_inode(filp
))->private;
1877 return write_flush(filp
, buf
, count
, ppos
, cd
);
1880 const struct file_operations cache_flush_operations_pipefs
= {
1881 .open
= open_flush_pipefs
,
1882 .read
= read_flush_pipefs
,
1883 .write
= write_flush_pipefs
,
1884 .release
= release_flush_pipefs
,
1885 .llseek
= no_llseek
,
1888 int sunrpc_cache_register_pipefs(struct dentry
*parent
,
1889 const char *name
, umode_t umode
,
1890 struct cache_detail
*cd
)
1892 struct dentry
*dir
= rpc_create_cache_dir(parent
, name
, umode
, cd
);
1894 return PTR_ERR(dir
);
1898 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs
);
1900 void sunrpc_cache_unregister_pipefs(struct cache_detail
*cd
)
1903 rpc_remove_cache_dir(cd
->pipefs
);
1907 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs
);
1909 void sunrpc_cache_unhash(struct cache_detail
*cd
, struct cache_head
*h
)
1911 spin_lock(&cd
->hash_lock
);
1912 if (!hlist_unhashed(&h
->cache_list
)){
1913 sunrpc_begin_cache_remove_entry(h
, cd
);
1914 spin_unlock(&cd
->hash_lock
);
1915 sunrpc_end_cache_remove_entry(h
, cd
);
1917 spin_unlock(&cd
->hash_lock
);
1919 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash
);