1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2019, Intel Corporation.
6 #define pr_fmt(fmt) "MPTCP: " fmt
8 #include <linux/kernel.h>
10 #include <net/mptcp.h>
13 static struct workqueue_struct
*pm_wq
;
15 /* path manager command handlers */
17 int mptcp_pm_announce_addr(struct mptcp_sock
*msk
,
18 const struct mptcp_addr_info
*addr
)
20 pr_debug("msk=%p, local_id=%d", msk
, addr
->id
);
22 msk
->pm
.local
= *addr
;
23 WRITE_ONCE(msk
->pm
.addr_signal
, true);
27 int mptcp_pm_remove_addr(struct mptcp_sock
*msk
, u8 local_id
)
32 int mptcp_pm_remove_subflow(struct mptcp_sock
*msk
, u8 remote_id
)
37 /* path manager event handlers */
39 void mptcp_pm_new_connection(struct mptcp_sock
*msk
, int server_side
)
41 struct mptcp_pm_data
*pm
= &msk
->pm
;
43 pr_debug("msk=%p, token=%u side=%d", msk
, msk
->token
, server_side
);
45 WRITE_ONCE(pm
->server_side
, server_side
);
48 bool mptcp_pm_allow_new_subflow(struct mptcp_sock
*msk
)
50 struct mptcp_pm_data
*pm
= &msk
->pm
;
53 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk
, pm
->subflows
,
54 pm
->subflows_max
, READ_ONCE(pm
->accept_subflow
));
56 /* try to avoid acquiring the lock below */
57 if (!READ_ONCE(pm
->accept_subflow
))
60 spin_lock_bh(&pm
->lock
);
61 ret
= pm
->subflows
< pm
->subflows_max
;
62 if (ret
&& ++pm
->subflows
== pm
->subflows_max
)
63 WRITE_ONCE(pm
->accept_subflow
, false);
64 spin_unlock_bh(&pm
->lock
);
69 /* return true if the new status bit is currently cleared, that is, this event
70 * can be server, eventually by an already scheduled work
72 static bool mptcp_pm_schedule_work(struct mptcp_sock
*msk
,
73 enum mptcp_pm_status new_status
)
75 pr_debug("msk=%p status=%x new=%lx", msk
, msk
->pm
.status
,
77 if (msk
->pm
.status
& BIT(new_status
))
80 msk
->pm
.status
|= BIT(new_status
);
81 if (queue_work(pm_wq
, &msk
->pm
.work
))
82 sock_hold((struct sock
*)msk
);
86 void mptcp_pm_fully_established(struct mptcp_sock
*msk
)
88 struct mptcp_pm_data
*pm
= &msk
->pm
;
90 pr_debug("msk=%p", msk
);
92 /* try to avoid acquiring the lock below */
93 if (!READ_ONCE(pm
->work_pending
))
96 spin_lock_bh(&pm
->lock
);
98 if (READ_ONCE(pm
->work_pending
))
99 mptcp_pm_schedule_work(msk
, MPTCP_PM_ESTABLISHED
);
101 spin_unlock_bh(&pm
->lock
);
104 void mptcp_pm_connection_closed(struct mptcp_sock
*msk
)
106 pr_debug("msk=%p", msk
);
109 void mptcp_pm_subflow_established(struct mptcp_sock
*msk
,
110 struct mptcp_subflow_context
*subflow
)
112 struct mptcp_pm_data
*pm
= &msk
->pm
;
114 pr_debug("msk=%p", msk
);
116 if (!READ_ONCE(pm
->work_pending
))
119 spin_lock_bh(&pm
->lock
);
121 if (READ_ONCE(pm
->work_pending
))
122 mptcp_pm_schedule_work(msk
, MPTCP_PM_SUBFLOW_ESTABLISHED
);
124 spin_unlock_bh(&pm
->lock
);
127 void mptcp_pm_subflow_closed(struct mptcp_sock
*msk
, u8 id
)
129 pr_debug("msk=%p", msk
);
132 void mptcp_pm_add_addr_received(struct mptcp_sock
*msk
,
133 const struct mptcp_addr_info
*addr
)
135 struct mptcp_pm_data
*pm
= &msk
->pm
;
137 pr_debug("msk=%p remote_id=%d accept=%d", msk
, addr
->id
,
138 READ_ONCE(pm
->accept_addr
));
140 /* avoid acquiring the lock if there is no room for fouther addresses */
141 if (!READ_ONCE(pm
->accept_addr
))
144 spin_lock_bh(&pm
->lock
);
146 /* be sure there is something to signal re-checking under PM lock */
147 if (READ_ONCE(pm
->accept_addr
) &&
148 mptcp_pm_schedule_work(msk
, MPTCP_PM_ADD_ADDR_RECEIVED
))
151 spin_unlock_bh(&pm
->lock
);
154 /* path manager helpers */
156 bool mptcp_pm_addr_signal(struct mptcp_sock
*msk
, unsigned int remaining
,
157 struct mptcp_addr_info
*saddr
)
161 spin_lock_bh(&msk
->pm
.lock
);
163 /* double check after the lock is acquired */
164 if (!mptcp_pm_should_signal(msk
))
167 if (remaining
< mptcp_add_addr_len(msk
->pm
.local
.family
))
170 *saddr
= msk
->pm
.local
;
171 WRITE_ONCE(msk
->pm
.addr_signal
, false);
175 spin_unlock_bh(&msk
->pm
.lock
);
179 int mptcp_pm_get_local_id(struct mptcp_sock
*msk
, struct sock_common
*skc
)
181 return mptcp_pm_nl_get_local_id(msk
, skc
);
184 static void pm_worker(struct work_struct
*work
)
186 struct mptcp_pm_data
*pm
= container_of(work
, struct mptcp_pm_data
,
188 struct mptcp_sock
*msk
= container_of(pm
, struct mptcp_sock
, pm
);
189 struct sock
*sk
= (struct sock
*)msk
;
192 spin_lock_bh(&msk
->pm
.lock
);
194 pr_debug("msk=%p status=%x", msk
, pm
->status
);
195 if (pm
->status
& BIT(MPTCP_PM_ADD_ADDR_RECEIVED
)) {
196 pm
->status
&= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED
);
197 mptcp_pm_nl_add_addr_received(msk
);
199 if (pm
->status
& BIT(MPTCP_PM_ESTABLISHED
)) {
200 pm
->status
&= ~BIT(MPTCP_PM_ESTABLISHED
);
201 mptcp_pm_nl_fully_established(msk
);
203 if (pm
->status
& BIT(MPTCP_PM_SUBFLOW_ESTABLISHED
)) {
204 pm
->status
&= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED
);
205 mptcp_pm_nl_subflow_established(msk
);
208 spin_unlock_bh(&msk
->pm
.lock
);
213 void mptcp_pm_data_init(struct mptcp_sock
*msk
)
215 msk
->pm
.add_addr_signaled
= 0;
216 msk
->pm
.add_addr_accepted
= 0;
217 msk
->pm
.local_addr_used
= 0;
218 msk
->pm
.subflows
= 0;
219 WRITE_ONCE(msk
->pm
.work_pending
, false);
220 WRITE_ONCE(msk
->pm
.addr_signal
, false);
221 WRITE_ONCE(msk
->pm
.accept_addr
, false);
222 WRITE_ONCE(msk
->pm
.accept_subflow
, false);
225 spin_lock_init(&msk
->pm
.lock
);
226 INIT_WORK(&msk
->pm
.work
, pm_worker
);
228 mptcp_pm_nl_data_init(msk
);
231 void mptcp_pm_close(struct mptcp_sock
*msk
)
233 if (cancel_work_sync(&msk
->pm
.work
))
234 sock_put((struct sock
*)msk
);
237 void mptcp_pm_init(void)
239 pm_wq
= alloc_workqueue("pm_wq", WQ_UNBOUND
| WQ_MEM_RECLAIM
, 8);
241 panic("Failed to allocate workqueue");