WIP FPC-III support
[linux/fpc-iii.git] / net / mptcp / pm.c
blobda2ed576f289915dd97d799f3b9ef43dfeeeb95d
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
4 * Copyright (c) 2019, Intel Corporation.
5 */
6 #define pr_fmt(fmt) "MPTCP: " fmt
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
13 /* path manager command handlers */
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16 const struct mptcp_addr_info *addr,
17 bool echo, bool port)
19 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
21 pr_debug("msk=%p, local_id=%d", msk, addr->id);
23 if (add_addr) {
24 pr_warn("addr_signal error, add_addr=%d", add_addr);
25 return -EINVAL;
28 msk->pm.local = *addr;
29 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
30 if (echo)
31 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
32 if (addr->family == AF_INET6)
33 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
34 if (port)
35 add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
36 WRITE_ONCE(msk->pm.addr_signal, add_addr);
37 return 0;
40 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
42 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
44 pr_debug("msk=%p, local_id=%d", msk, local_id);
46 if (rm_addr) {
47 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
48 return -EINVAL;
51 msk->pm.rm_id = local_id;
52 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
53 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
54 return 0;
57 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
59 pr_debug("msk=%p, local_id=%d", msk, local_id);
61 spin_lock_bh(&msk->pm.lock);
62 mptcp_pm_nl_rm_subflow_received(msk, local_id);
63 spin_unlock_bh(&msk->pm.lock);
64 return 0;
67 /* path manager event handlers */
69 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
71 struct mptcp_pm_data *pm = &msk->pm;
73 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
75 WRITE_ONCE(pm->server_side, server_side);
78 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
80 struct mptcp_pm_data *pm = &msk->pm;
81 int ret = 0;
83 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
84 pm->subflows_max, READ_ONCE(pm->accept_subflow));
86 /* try to avoid acquiring the lock below */
87 if (!READ_ONCE(pm->accept_subflow))
88 return false;
90 spin_lock_bh(&pm->lock);
91 if (READ_ONCE(pm->accept_subflow)) {
92 ret = pm->subflows < pm->subflows_max;
93 if (ret && ++pm->subflows == pm->subflows_max)
94 WRITE_ONCE(pm->accept_subflow, false);
96 spin_unlock_bh(&pm->lock);
98 return ret;
101 /* return true if the new status bit is currently cleared, that is, this event
102 * can be server, eventually by an already scheduled work
104 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
105 enum mptcp_pm_status new_status)
107 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
108 BIT(new_status));
109 if (msk->pm.status & BIT(new_status))
110 return false;
112 msk->pm.status |= BIT(new_status);
113 mptcp_schedule_work((struct sock *)msk);
114 return true;
117 void mptcp_pm_fully_established(struct mptcp_sock *msk)
119 struct mptcp_pm_data *pm = &msk->pm;
121 pr_debug("msk=%p", msk);
123 /* try to avoid acquiring the lock below */
124 if (!READ_ONCE(pm->work_pending))
125 return;
127 spin_lock_bh(&pm->lock);
129 /* mptcp_pm_fully_established() can be invoked by multiple
130 * racing paths - accept() and check_fully_established()
131 * be sure to serve this event only once.
133 if (READ_ONCE(pm->work_pending) &&
134 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
135 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
136 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
138 spin_unlock_bh(&pm->lock);
141 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
143 pr_debug("msk=%p", msk);
146 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
147 struct mptcp_subflow_context *subflow)
149 struct mptcp_pm_data *pm = &msk->pm;
151 pr_debug("msk=%p", msk);
153 if (!READ_ONCE(pm->work_pending))
154 return;
156 spin_lock_bh(&pm->lock);
158 if (READ_ONCE(pm->work_pending))
159 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
161 spin_unlock_bh(&pm->lock);
164 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
166 pr_debug("msk=%p", msk);
169 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
170 const struct mptcp_addr_info *addr)
172 struct mptcp_pm_data *pm = &msk->pm;
174 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
175 READ_ONCE(pm->accept_addr));
177 spin_lock_bh(&pm->lock);
179 if (!READ_ONCE(pm->accept_addr)) {
180 mptcp_pm_announce_addr(msk, addr, true, addr->port);
181 mptcp_pm_add_addr_send_ack(msk);
182 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
183 pm->remote = *addr;
186 spin_unlock_bh(&pm->lock);
189 void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
191 if (!mptcp_pm_should_add_signal_ipv6(msk) &&
192 !mptcp_pm_should_add_signal_port(msk))
193 return;
195 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
198 void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
200 struct mptcp_pm_data *pm = &msk->pm;
202 pr_debug("msk=%p remote_id=%d", msk, rm_id);
204 spin_lock_bh(&pm->lock);
205 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
206 pm->rm_id = rm_id;
207 spin_unlock_bh(&pm->lock);
210 /* path manager helpers */
212 bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
213 struct mptcp_addr_info *saddr, bool *echo, bool *port)
215 int ret = false;
217 spin_lock_bh(&msk->pm.lock);
219 /* double check after the lock is acquired */
220 if (!mptcp_pm_should_add_signal(msk))
221 goto out_unlock;
223 *echo = mptcp_pm_should_add_signal_echo(msk);
224 *port = mptcp_pm_should_add_signal_port(msk);
226 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
227 goto out_unlock;
229 *saddr = msk->pm.local;
230 WRITE_ONCE(msk->pm.addr_signal, 0);
231 ret = true;
233 out_unlock:
234 spin_unlock_bh(&msk->pm.lock);
235 return ret;
238 bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
239 u8 *rm_id)
241 int ret = false;
243 spin_lock_bh(&msk->pm.lock);
245 /* double check after the lock is acquired */
246 if (!mptcp_pm_should_rm_signal(msk))
247 goto out_unlock;
249 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
250 goto out_unlock;
252 *rm_id = msk->pm.rm_id;
253 WRITE_ONCE(msk->pm.addr_signal, 0);
254 ret = true;
256 out_unlock:
257 spin_unlock_bh(&msk->pm.lock);
258 return ret;
261 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
263 return mptcp_pm_nl_get_local_id(msk, skc);
266 void mptcp_pm_data_init(struct mptcp_sock *msk)
268 msk->pm.add_addr_signaled = 0;
269 msk->pm.add_addr_accepted = 0;
270 msk->pm.local_addr_used = 0;
271 msk->pm.subflows = 0;
272 msk->pm.rm_id = 0;
273 WRITE_ONCE(msk->pm.work_pending, false);
274 WRITE_ONCE(msk->pm.addr_signal, 0);
275 WRITE_ONCE(msk->pm.accept_addr, false);
276 WRITE_ONCE(msk->pm.accept_subflow, false);
277 msk->pm.status = 0;
279 spin_lock_init(&msk->pm.lock);
280 INIT_LIST_HEAD(&msk->pm.anno_list);
282 mptcp_pm_nl_data_init(msk);
285 void __init mptcp_pm_init(void)
287 mptcp_pm_nl_init();