io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / net / core / dst_cache.c
blobbe74ab4551c204a0ddf2cfc17c886d0f36f5be98
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/core/dst_cache.c - dst entry cache
5 * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com>
6 */
8 #include <linux/kernel.h>
9 #include <linux/percpu.h>
10 #include <net/dst_cache.h>
11 #include <net/route.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ip6_fib.h>
14 #endif
15 #include <uapi/linux/in.h>
17 struct dst_cache_pcpu {
18 unsigned long refresh_ts;
19 struct dst_entry *dst;
20 u32 cookie;
21 union {
22 struct in_addr in_saddr;
23 struct in6_addr in6_saddr;
27 static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
28 struct dst_entry *dst, u32 cookie)
30 dst_release(dst_cache->dst);
31 if (dst)
32 dst_hold(dst);
34 dst_cache->cookie = cookie;
35 dst_cache->dst = dst;
38 static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
39 struct dst_cache_pcpu *idst)
41 struct dst_entry *dst;
43 dst = idst->dst;
44 if (!dst)
45 goto fail;
47 /* the cache already hold a dst reference; it can't go away */
48 dst_hold(dst);
50 if (unlikely(!time_after(idst->refresh_ts, dst_cache->reset_ts) ||
51 (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) {
52 dst_cache_per_cpu_dst_set(idst, NULL, 0);
53 dst_release(dst);
54 goto fail;
56 return dst;
58 fail:
59 idst->refresh_ts = jiffies;
60 return NULL;
63 struct dst_entry *dst_cache_get(struct dst_cache *dst_cache)
65 if (!dst_cache->cache)
66 return NULL;
68 return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache));
70 EXPORT_SYMBOL_GPL(dst_cache_get);
72 struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
74 struct dst_cache_pcpu *idst;
75 struct dst_entry *dst;
77 if (!dst_cache->cache)
78 return NULL;
80 idst = this_cpu_ptr(dst_cache->cache);
81 dst = dst_cache_per_cpu_get(dst_cache, idst);
82 if (!dst)
83 return NULL;
85 *saddr = idst->in_saddr.s_addr;
86 return container_of(dst, struct rtable, dst);
88 EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
90 void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst,
91 __be32 saddr)
93 struct dst_cache_pcpu *idst;
95 if (!dst_cache->cache)
96 return;
98 idst = this_cpu_ptr(dst_cache->cache);
99 dst_cache_per_cpu_dst_set(idst, dst, 0);
100 idst->in_saddr.s_addr = saddr;
102 EXPORT_SYMBOL_GPL(dst_cache_set_ip4);
104 #if IS_ENABLED(CONFIG_IPV6)
105 void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
106 const struct in6_addr *saddr)
108 struct dst_cache_pcpu *idst;
110 if (!dst_cache->cache)
111 return;
113 idst = this_cpu_ptr(dst_cache->cache);
114 dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst,
115 rt6_get_cookie((struct rt6_info *)dst));
116 idst->in6_saddr = *saddr;
118 EXPORT_SYMBOL_GPL(dst_cache_set_ip6);
120 struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache,
121 struct in6_addr *saddr)
123 struct dst_cache_pcpu *idst;
124 struct dst_entry *dst;
126 if (!dst_cache->cache)
127 return NULL;
129 idst = this_cpu_ptr(dst_cache->cache);
130 dst = dst_cache_per_cpu_get(dst_cache, idst);
131 if (!dst)
132 return NULL;
134 *saddr = idst->in6_saddr;
135 return dst;
137 EXPORT_SYMBOL_GPL(dst_cache_get_ip6);
138 #endif
140 int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp)
142 dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu,
143 gfp | __GFP_ZERO);
144 if (!dst_cache->cache)
145 return -ENOMEM;
147 dst_cache_reset(dst_cache);
148 return 0;
150 EXPORT_SYMBOL_GPL(dst_cache_init);
152 void dst_cache_destroy(struct dst_cache *dst_cache)
154 int i;
156 if (!dst_cache->cache)
157 return;
159 for_each_possible_cpu(i)
160 dst_release(per_cpu_ptr(dst_cache->cache, i)->dst);
162 free_percpu(dst_cache->cache);
164 EXPORT_SYMBOL_GPL(dst_cache_destroy);