1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _PROTO_MEMORY_H
3 #define _PROTO_MEMORY_H
6 #include <net/hotdata.h>
8 /* 1 MB per cpu, in page units */
9 #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
11 static inline bool sk_has_memory_pressure(const struct sock
*sk
)
13 return sk
->sk_prot
->memory_pressure
!= NULL
;
17 proto_memory_pressure(const struct proto
*prot
)
19 if (!prot
->memory_pressure
)
21 return !!READ_ONCE(*prot
->memory_pressure
);
24 static inline bool sk_under_global_memory_pressure(const struct sock
*sk
)
26 return proto_memory_pressure(sk
->sk_prot
);
29 static inline bool sk_under_memory_pressure(const struct sock
*sk
)
31 if (!sk
->sk_prot
->memory_pressure
)
34 if (mem_cgroup_sockets_enabled
&& sk
->sk_memcg
&&
35 mem_cgroup_under_socket_pressure(sk
->sk_memcg
))
38 return !!READ_ONCE(*sk
->sk_prot
->memory_pressure
);
42 proto_memory_allocated(const struct proto
*prot
)
44 return max(0L, atomic_long_read(prot
->memory_allocated
));
48 sk_memory_allocated(const struct sock
*sk
)
50 return proto_memory_allocated(sk
->sk_prot
);
53 static inline void proto_memory_pcpu_drain(struct proto
*proto
)
55 int val
= this_cpu_xchg(*proto
->per_cpu_fw_alloc
, 0);
58 atomic_long_add(val
, proto
->memory_allocated
);
62 sk_memory_allocated_add(const struct sock
*sk
, int val
)
64 struct proto
*proto
= sk
->sk_prot
;
66 val
= this_cpu_add_return(*proto
->per_cpu_fw_alloc
, val
);
68 if (unlikely(val
>= READ_ONCE(net_hotdata
.sysctl_mem_pcpu_rsv
)))
69 proto_memory_pcpu_drain(proto
);
73 sk_memory_allocated_sub(const struct sock
*sk
, int val
)
75 struct proto
*proto
= sk
->sk_prot
;
77 val
= this_cpu_sub_return(*proto
->per_cpu_fw_alloc
, val
);
79 if (unlikely(val
<= -READ_ONCE(net_hotdata
.sysctl_mem_pcpu_rsv
)))
80 proto_memory_pcpu_drain(proto
);
83 #endif /* _PROTO_MEMORY_H */