2 * INETPEER - A storage for permanent information about peers
4 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 #ifndef _NET_INETPEER_H
8 #define _NET_INETPEER_H
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/jiffies.h>
13 #include <linux/spinlock.h>
14 #include <linux/rtnetlink.h>
16 #include <linux/atomic.h>
18 struct inetpeer_addr_base
{
25 struct inetpeer_addr
{
26 struct inetpeer_addr_base addr
;
31 /* group together avl_left,avl_right,v4daddr to speedup lookups */
32 struct inet_peer __rcu
*avl_left
, *avl_right
;
33 struct inetpeer_addr daddr
;
36 u32 metrics
[RTAX_MAX
];
37 u32 rate_tokens
; /* rate limiting for ICMP */
38 unsigned long rate_last
;
40 struct list_head gc_list
;
41 struct rcu_head gc_rcu
;
44 * Once inet_peer is queued for deletion (refcnt == -1), following field
45 * is not available: rid
46 * We can share memory with rcu_head to help keep inet_peer small.
50 atomic_t rid
; /* Frag reception counter */
53 struct inet_peer
*gc_next
;
56 /* following fields might be frequently dirtied */
57 __u32 dtime
; /* the time of last use of not referenced entries */
61 struct inet_peer_base
{
62 struct inet_peer __rcu
*root
;
67 #define INETPEER_BASE_BIT 0x1UL
69 static inline struct inet_peer
*inetpeer_ptr(unsigned long val
)
71 BUG_ON(val
& INETPEER_BASE_BIT
);
72 return (struct inet_peer
*) val
;
75 static inline struct inet_peer_base
*inetpeer_base_ptr(unsigned long val
)
77 if (!(val
& INETPEER_BASE_BIT
))
79 val
&= ~INETPEER_BASE_BIT
;
80 return (struct inet_peer_base
*) val
;
83 static inline bool inetpeer_ptr_is_peer(unsigned long val
)
85 return !(val
& INETPEER_BASE_BIT
);
88 static inline void __inetpeer_ptr_set_peer(unsigned long *val
, struct inet_peer
*peer
)
90 /* This implicitly clears INETPEER_BASE_BIT */
91 *val
= (unsigned long) peer
;
94 static inline bool inetpeer_ptr_set_peer(unsigned long *ptr
, struct inet_peer
*peer
)
96 unsigned long val
= (unsigned long) peer
;
97 unsigned long orig
= *ptr
;
99 if (!(orig
& INETPEER_BASE_BIT
) ||
100 cmpxchg(ptr
, orig
, val
) != orig
)
105 static inline void inetpeer_init_ptr(unsigned long *ptr
, struct inet_peer_base
*base
)
107 *ptr
= (unsigned long) base
| INETPEER_BASE_BIT
;
110 static inline void inetpeer_transfer_peer(unsigned long *to
, unsigned long *from
)
112 unsigned long val
= *from
;
115 if (inetpeer_ptr_is_peer(val
)) {
116 struct inet_peer
*peer
= inetpeer_ptr(val
);
117 atomic_inc(&peer
->refcnt
);
121 void inet_peer_base_init(struct inet_peer_base
*);
123 void inet_initpeers(void) __init
;
125 #define INETPEER_METRICS_NEW (~(u32) 0)
127 static inline bool inet_metrics_new(const struct inet_peer
*p
)
129 return p
->metrics
[RTAX_LOCK
-1] == INETPEER_METRICS_NEW
;
132 /* can be called with or without local BH being disabled */
133 struct inet_peer
*inet_getpeer(struct inet_peer_base
*base
,
134 const struct inetpeer_addr
*daddr
,
137 static inline struct inet_peer
*inet_getpeer_v4(struct inet_peer_base
*base
,
141 struct inetpeer_addr daddr
;
143 daddr
.addr
.a4
= v4daddr
;
144 daddr
.family
= AF_INET
;
145 return inet_getpeer(base
, &daddr
, create
);
148 static inline struct inet_peer
*inet_getpeer_v6(struct inet_peer_base
*base
,
149 const struct in6_addr
*v6daddr
,
152 struct inetpeer_addr daddr
;
154 *(struct in6_addr
*)daddr
.addr
.a6
= *v6daddr
;
155 daddr
.family
= AF_INET6
;
156 return inet_getpeer(base
, &daddr
, create
);
159 /* can be called from BH context or outside */
160 void inet_putpeer(struct inet_peer
*p
);
161 bool inet_peer_xrlim_allow(struct inet_peer
*peer
, int timeout
);
163 void inetpeer_invalidate_tree(struct inet_peer_base
*);
166 * temporary check to make sure we dont access rid, tcp_ts,
167 * tcp_ts_stamp if no refcount is taken on inet_peer
169 static inline void inet_peer_refcheck(const struct inet_peer
*p
)
171 WARN_ON_ONCE(atomic_read(&p
->refcnt
) <= 0);
173 #endif /* _NET_INETPEER_H */