Linux 4.8-rc8
[linux/fpc-iii.git] / net / kcm / kcmsock.c
blob411693288648dc3eb27b0b2a0c542be92762ad8a
1 #include <linux/bpf.h>
2 #include <linux/errno.h>
3 #include <linux/errqueue.h>
4 #include <linux/file.h>
5 #include <linux/in.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/net.h>
9 #include <linux/netdevice.h>
10 #include <linux/poll.h>
11 #include <linux/rculist.h>
12 #include <linux/skbuff.h>
13 #include <linux/socket.h>
14 #include <linux/uaccess.h>
15 #include <linux/workqueue.h>
16 #include <linux/syscalls.h>
17 #include <net/kcm.h>
18 #include <net/netns/generic.h>
19 #include <net/sock.h>
20 #include <net/tcp.h>
21 #include <uapi/linux/kcm.h>
23 unsigned int kcm_net_id;
25 static struct kmem_cache *kcm_psockp __read_mostly;
26 static struct kmem_cache *kcm_muxp __read_mostly;
27 static struct workqueue_struct *kcm_wq;
29 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
31 return (struct kcm_sock *)sk;
34 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
36 return (struct kcm_tx_msg *)skb->cb;
39 static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
41 return (struct kcm_rx_msg *)((void *)skb->cb +
42 offsetof(struct qdisc_skb_cb, data));
45 static void report_csk_error(struct sock *csk, int err)
47 csk->sk_err = EPIPE;
48 csk->sk_error_report(csk);
51 /* Callback lock held */
52 static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
53 struct sk_buff *skb)
55 struct sock *csk = psock->sk;
57 /* Unrecoverable error in receive */
59 del_timer(&psock->rx_msg_timer);
61 if (psock->rx_stopped)
62 return;
64 psock->rx_stopped = 1;
65 KCM_STATS_INCR(psock->stats.rx_aborts);
67 /* Report an error on the lower socket */
68 report_csk_error(csk, err);
71 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
72 bool wakeup_kcm)
74 struct sock *csk = psock->sk;
75 struct kcm_mux *mux = psock->mux;
77 /* Unrecoverable error in transmit */
79 spin_lock_bh(&mux->lock);
81 if (psock->tx_stopped) {
82 spin_unlock_bh(&mux->lock);
83 return;
86 psock->tx_stopped = 1;
87 KCM_STATS_INCR(psock->stats.tx_aborts);
89 if (!psock->tx_kcm) {
90 /* Take off psocks_avail list */
91 list_del(&psock->psock_avail_list);
92 } else if (wakeup_kcm) {
93 /* In this case psock is being aborted while outside of
94 * write_msgs and psock is reserved. Schedule tx_work
95 * to handle the failure there. Need to commit tx_stopped
96 * before queuing work.
98 smp_mb();
100 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
103 spin_unlock_bh(&mux->lock);
105 /* Report error on lower socket */
106 report_csk_error(csk, err);
109 /* RX mux lock held. */
110 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
111 struct kcm_psock *psock)
113 KCM_STATS_ADD(mux->stats.rx_bytes,
114 psock->stats.rx_bytes - psock->saved_rx_bytes);
115 mux->stats.rx_msgs +=
116 psock->stats.rx_msgs - psock->saved_rx_msgs;
117 psock->saved_rx_msgs = psock->stats.rx_msgs;
118 psock->saved_rx_bytes = psock->stats.rx_bytes;
121 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
122 struct kcm_psock *psock)
124 KCM_STATS_ADD(mux->stats.tx_bytes,
125 psock->stats.tx_bytes - psock->saved_tx_bytes);
126 mux->stats.tx_msgs +=
127 psock->stats.tx_msgs - psock->saved_tx_msgs;
128 psock->saved_tx_msgs = psock->stats.tx_msgs;
129 psock->saved_tx_bytes = psock->stats.tx_bytes;
132 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
134 /* KCM is ready to receive messages on its queue-- either the KCM is new or
135 * has become unblocked after being blocked on full socket buffer. Queue any
136 * pending ready messages on a psock. RX mux lock held.
138 static void kcm_rcv_ready(struct kcm_sock *kcm)
140 struct kcm_mux *mux = kcm->mux;
141 struct kcm_psock *psock;
142 struct sk_buff *skb;
144 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
145 return;
147 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
148 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
149 /* Assuming buffer limit has been reached */
150 skb_queue_head(&mux->rx_hold_queue, skb);
151 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
152 return;
156 while (!list_empty(&mux->psocks_ready)) {
157 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
158 psock_ready_list);
160 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
161 /* Assuming buffer limit has been reached */
162 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
163 return;
166 /* Consumed the ready message on the psock. Schedule rx_work to
167 * get more messages.
169 list_del(&psock->psock_ready_list);
170 psock->ready_rx_msg = NULL;
172 /* Commit clearing of ready_rx_msg for queuing work */
173 smp_mb();
175 queue_work(kcm_wq, &psock->rx_work);
178 /* Buffer limit is okay now, add to ready list */
179 list_add_tail(&kcm->wait_rx_list,
180 &kcm->mux->kcm_rx_waiters);
181 kcm->rx_wait = true;
184 static void kcm_rfree(struct sk_buff *skb)
186 struct sock *sk = skb->sk;
187 struct kcm_sock *kcm = kcm_sk(sk);
188 struct kcm_mux *mux = kcm->mux;
189 unsigned int len = skb->truesize;
191 sk_mem_uncharge(sk, len);
192 atomic_sub(len, &sk->sk_rmem_alloc);
194 /* For reading rx_wait and rx_psock without holding lock */
195 smp_mb__after_atomic();
197 if (!kcm->rx_wait && !kcm->rx_psock &&
198 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
199 spin_lock_bh(&mux->rx_lock);
200 kcm_rcv_ready(kcm);
201 spin_unlock_bh(&mux->rx_lock);
205 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
207 struct sk_buff_head *list = &sk->sk_receive_queue;
209 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
210 return -ENOMEM;
212 if (!sk_rmem_schedule(sk, skb, skb->truesize))
213 return -ENOBUFS;
215 skb->dev = NULL;
217 skb_orphan(skb);
218 skb->sk = sk;
219 skb->destructor = kcm_rfree;
220 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
221 sk_mem_charge(sk, skb->truesize);
223 skb_queue_tail(list, skb);
225 if (!sock_flag(sk, SOCK_DEAD))
226 sk->sk_data_ready(sk);
228 return 0;
231 /* Requeue received messages for a kcm socket to other kcm sockets. This is
232 * called with a kcm socket is receive disabled.
233 * RX mux lock held.
235 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
237 struct sk_buff *skb;
238 struct kcm_sock *kcm;
240 while ((skb = __skb_dequeue(head))) {
241 /* Reset destructor to avoid calling kcm_rcv_ready */
242 skb->destructor = sock_rfree;
243 skb_orphan(skb);
244 try_again:
245 if (list_empty(&mux->kcm_rx_waiters)) {
246 skb_queue_tail(&mux->rx_hold_queue, skb);
247 continue;
250 kcm = list_first_entry(&mux->kcm_rx_waiters,
251 struct kcm_sock, wait_rx_list);
253 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
254 /* Should mean socket buffer full */
255 list_del(&kcm->wait_rx_list);
256 kcm->rx_wait = false;
258 /* Commit rx_wait to read in kcm_free */
259 smp_wmb();
261 goto try_again;
266 /* Lower sock lock held */
267 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
268 struct sk_buff *head)
270 struct kcm_mux *mux = psock->mux;
271 struct kcm_sock *kcm;
273 WARN_ON(psock->ready_rx_msg);
275 if (psock->rx_kcm)
276 return psock->rx_kcm;
278 spin_lock_bh(&mux->rx_lock);
280 if (psock->rx_kcm) {
281 spin_unlock_bh(&mux->rx_lock);
282 return psock->rx_kcm;
285 kcm_update_rx_mux_stats(mux, psock);
287 if (list_empty(&mux->kcm_rx_waiters)) {
288 psock->ready_rx_msg = head;
289 list_add_tail(&psock->psock_ready_list,
290 &mux->psocks_ready);
291 spin_unlock_bh(&mux->rx_lock);
292 return NULL;
295 kcm = list_first_entry(&mux->kcm_rx_waiters,
296 struct kcm_sock, wait_rx_list);
297 list_del(&kcm->wait_rx_list);
298 kcm->rx_wait = false;
300 psock->rx_kcm = kcm;
301 kcm->rx_psock = psock;
303 spin_unlock_bh(&mux->rx_lock);
305 return kcm;
308 static void kcm_done(struct kcm_sock *kcm);
310 static void kcm_done_work(struct work_struct *w)
312 kcm_done(container_of(w, struct kcm_sock, done_work));
315 /* Lower sock held */
316 static void unreserve_rx_kcm(struct kcm_psock *psock,
317 bool rcv_ready)
319 struct kcm_sock *kcm = psock->rx_kcm;
320 struct kcm_mux *mux = psock->mux;
322 if (!kcm)
323 return;
325 spin_lock_bh(&mux->rx_lock);
327 psock->rx_kcm = NULL;
328 kcm->rx_psock = NULL;
330 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
331 * kcm_rfree
333 smp_mb();
335 if (unlikely(kcm->done)) {
336 spin_unlock_bh(&mux->rx_lock);
338 /* Need to run kcm_done in a task since we need to qcquire
339 * callback locks which may already be held here.
341 INIT_WORK(&kcm->done_work, kcm_done_work);
342 schedule_work(&kcm->done_work);
343 return;
346 if (unlikely(kcm->rx_disabled)) {
347 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
348 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
349 /* Check for degenerative race with rx_wait that all
350 * data was dequeued (accounted for in kcm_rfree).
352 kcm_rcv_ready(kcm);
354 spin_unlock_bh(&mux->rx_lock);
357 static void kcm_start_rx_timer(struct kcm_psock *psock)
359 if (psock->sk->sk_rcvtimeo)
360 mod_timer(&psock->rx_msg_timer, psock->sk->sk_rcvtimeo);
363 /* Macro to invoke filter function. */
364 #define KCM_RUN_FILTER(prog, ctx) \
365 (*prog->bpf_func)(ctx, prog->insnsi)
367 /* Lower socket lock held */
368 static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
369 unsigned int orig_offset, size_t orig_len)
371 struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
372 struct kcm_rx_msg *rxm;
373 struct kcm_sock *kcm;
374 struct sk_buff *head, *skb;
375 size_t eaten = 0, cand_len;
376 ssize_t extra;
377 int err;
378 bool cloned_orig = false;
380 if (psock->ready_rx_msg)
381 return 0;
383 head = psock->rx_skb_head;
384 if (head) {
385 /* Message already in progress */
387 rxm = kcm_rx_msg(head);
388 if (unlikely(rxm->early_eaten)) {
389 /* Already some number of bytes on the receive sock
390 * data saved in rx_skb_head, just indicate they
391 * are consumed.
393 eaten = orig_len <= rxm->early_eaten ?
394 orig_len : rxm->early_eaten;
395 rxm->early_eaten -= eaten;
397 return eaten;
400 if (unlikely(orig_offset)) {
401 /* Getting data with a non-zero offset when a message is
402 * in progress is not expected. If it does happen, we
403 * need to clone and pull since we can't deal with
404 * offsets in the skbs for a message expect in the head.
406 orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
407 if (!orig_skb) {
408 KCM_STATS_INCR(psock->stats.rx_mem_fail);
409 desc->error = -ENOMEM;
410 return 0;
412 if (!pskb_pull(orig_skb, orig_offset)) {
413 KCM_STATS_INCR(psock->stats.rx_mem_fail);
414 kfree_skb(orig_skb);
415 desc->error = -ENOMEM;
416 return 0;
418 cloned_orig = true;
419 orig_offset = 0;
422 if (!psock->rx_skb_nextp) {
423 /* We are going to append to the frags_list of head.
424 * Need to unshare the frag_list.
426 err = skb_unclone(head, GFP_ATOMIC);
427 if (err) {
428 KCM_STATS_INCR(psock->stats.rx_mem_fail);
429 desc->error = err;
430 return 0;
433 if (unlikely(skb_shinfo(head)->frag_list)) {
434 /* We can't append to an sk_buff that already
435 * has a frag_list. We create a new head, point
436 * the frag_list of that to the old head, and
437 * then are able to use the old head->next for
438 * appending to the message.
440 if (WARN_ON(head->next)) {
441 desc->error = -EINVAL;
442 return 0;
445 skb = alloc_skb(0, GFP_ATOMIC);
446 if (!skb) {
447 KCM_STATS_INCR(psock->stats.rx_mem_fail);
448 desc->error = -ENOMEM;
449 return 0;
451 skb->len = head->len;
452 skb->data_len = head->len;
453 skb->truesize = head->truesize;
454 *kcm_rx_msg(skb) = *kcm_rx_msg(head);
455 psock->rx_skb_nextp = &head->next;
456 skb_shinfo(skb)->frag_list = head;
457 psock->rx_skb_head = skb;
458 head = skb;
459 } else {
460 psock->rx_skb_nextp =
461 &skb_shinfo(head)->frag_list;
466 while (eaten < orig_len) {
467 /* Always clone since we will consume something */
468 skb = skb_clone(orig_skb, GFP_ATOMIC);
469 if (!skb) {
470 KCM_STATS_INCR(psock->stats.rx_mem_fail);
471 desc->error = -ENOMEM;
472 break;
475 cand_len = orig_len - eaten;
477 head = psock->rx_skb_head;
478 if (!head) {
479 head = skb;
480 psock->rx_skb_head = head;
481 /* Will set rx_skb_nextp on next packet if needed */
482 psock->rx_skb_nextp = NULL;
483 rxm = kcm_rx_msg(head);
484 memset(rxm, 0, sizeof(*rxm));
485 rxm->offset = orig_offset + eaten;
486 } else {
487 /* Unclone since we may be appending to an skb that we
488 * already share a frag_list with.
490 err = skb_unclone(skb, GFP_ATOMIC);
491 if (err) {
492 KCM_STATS_INCR(psock->stats.rx_mem_fail);
493 desc->error = err;
494 break;
497 rxm = kcm_rx_msg(head);
498 *psock->rx_skb_nextp = skb;
499 psock->rx_skb_nextp = &skb->next;
500 head->data_len += skb->len;
501 head->len += skb->len;
502 head->truesize += skb->truesize;
505 if (!rxm->full_len) {
506 ssize_t len;
508 len = KCM_RUN_FILTER(psock->bpf_prog, head);
510 if (!len) {
511 /* Need more header to determine length */
512 if (!rxm->accum_len) {
513 /* Start RX timer for new message */
514 kcm_start_rx_timer(psock);
516 rxm->accum_len += cand_len;
517 eaten += cand_len;
518 KCM_STATS_INCR(psock->stats.rx_need_more_hdr);
519 WARN_ON(eaten != orig_len);
520 break;
521 } else if (len > psock->sk->sk_rcvbuf) {
522 /* Message length exceeds maximum allowed */
523 KCM_STATS_INCR(psock->stats.rx_msg_too_big);
524 desc->error = -EMSGSIZE;
525 psock->rx_skb_head = NULL;
526 kcm_abort_rx_psock(psock, EMSGSIZE, head);
527 break;
528 } else if (len <= (ssize_t)head->len -
529 skb->len - rxm->offset) {
530 /* Length must be into new skb (and also
531 * greater than zero)
533 KCM_STATS_INCR(psock->stats.rx_bad_hdr_len);
534 desc->error = -EPROTO;
535 psock->rx_skb_head = NULL;
536 kcm_abort_rx_psock(psock, EPROTO, head);
537 break;
540 rxm->full_len = len;
543 extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
545 if (extra < 0) {
546 /* Message not complete yet. */
547 if (rxm->full_len - rxm->accum_len >
548 tcp_inq(psock->sk)) {
549 /* Don't have the whole messages in the socket
550 * buffer. Set psock->rx_need_bytes to wait for
551 * the rest of the message. Also, set "early
552 * eaten" since we've already buffered the skb
553 * but don't consume yet per tcp_read_sock.
556 if (!rxm->accum_len) {
557 /* Start RX timer for new message */
558 kcm_start_rx_timer(psock);
561 psock->rx_need_bytes = rxm->full_len -
562 rxm->accum_len;
563 rxm->accum_len += cand_len;
564 rxm->early_eaten = cand_len;
565 KCM_STATS_ADD(psock->stats.rx_bytes, cand_len);
566 desc->count = 0; /* Stop reading socket */
567 break;
569 rxm->accum_len += cand_len;
570 eaten += cand_len;
571 WARN_ON(eaten != orig_len);
572 break;
575 /* Positive extra indicates ore bytes than needed for the
576 * message
579 WARN_ON(extra > cand_len);
581 eaten += (cand_len - extra);
583 /* Hurray, we have a new message! */
584 del_timer(&psock->rx_msg_timer);
585 psock->rx_skb_head = NULL;
586 KCM_STATS_INCR(psock->stats.rx_msgs);
588 try_queue:
589 kcm = reserve_rx_kcm(psock, head);
590 if (!kcm) {
591 /* Unable to reserve a KCM, message is held in psock. */
592 break;
595 if (kcm_queue_rcv_skb(&kcm->sk, head)) {
596 /* Should mean socket buffer full */
597 unreserve_rx_kcm(psock, false);
598 goto try_queue;
602 if (cloned_orig)
603 kfree_skb(orig_skb);
605 KCM_STATS_ADD(psock->stats.rx_bytes, eaten);
607 return eaten;
610 /* Called with lock held on lower socket */
611 static int psock_tcp_read_sock(struct kcm_psock *psock)
613 read_descriptor_t desc;
615 desc.arg.data = psock;
616 desc.error = 0;
617 desc.count = 1; /* give more than one skb per call */
619 /* sk should be locked here, so okay to do tcp_read_sock */
620 tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
622 unreserve_rx_kcm(psock, true);
624 return desc.error;
627 /* Lower sock lock held */
628 static void psock_tcp_data_ready(struct sock *sk)
630 struct kcm_psock *psock;
632 read_lock_bh(&sk->sk_callback_lock);
634 psock = (struct kcm_psock *)sk->sk_user_data;
635 if (unlikely(!psock || psock->rx_stopped))
636 goto out;
638 if (psock->ready_rx_msg)
639 goto out;
641 if (psock->rx_need_bytes) {
642 if (tcp_inq(sk) >= psock->rx_need_bytes)
643 psock->rx_need_bytes = 0;
644 else
645 goto out;
648 if (psock_tcp_read_sock(psock) == -ENOMEM)
649 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
651 out:
652 read_unlock_bh(&sk->sk_callback_lock);
655 static void do_psock_rx_work(struct kcm_psock *psock)
657 read_descriptor_t rd_desc;
658 struct sock *csk = psock->sk;
660 /* We need the read lock to synchronize with psock_tcp_data_ready. We
661 * need the socket lock for calling tcp_read_sock.
663 lock_sock(csk);
664 read_lock_bh(&csk->sk_callback_lock);
666 if (unlikely(csk->sk_user_data != psock))
667 goto out;
669 if (unlikely(psock->rx_stopped))
670 goto out;
672 if (psock->ready_rx_msg)
673 goto out;
675 rd_desc.arg.data = psock;
677 if (psock_tcp_read_sock(psock) == -ENOMEM)
678 queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
680 out:
681 read_unlock_bh(&csk->sk_callback_lock);
682 release_sock(csk);
685 static void psock_rx_work(struct work_struct *w)
687 do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
690 static void psock_rx_delayed_work(struct work_struct *w)
692 do_psock_rx_work(container_of(w, struct kcm_psock,
693 rx_delayed_work.work));
696 static void psock_tcp_state_change(struct sock *sk)
698 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
699 * since application will normally not poll with POLLIN
700 * on the TCP sockets.
703 report_csk_error(sk, EPIPE);
706 static void psock_tcp_write_space(struct sock *sk)
708 struct kcm_psock *psock;
709 struct kcm_mux *mux;
710 struct kcm_sock *kcm;
712 read_lock_bh(&sk->sk_callback_lock);
714 psock = (struct kcm_psock *)sk->sk_user_data;
715 if (unlikely(!psock))
716 goto out;
718 mux = psock->mux;
720 spin_lock_bh(&mux->lock);
722 /* Check if the socket is reserved so someone is waiting for sending. */
723 kcm = psock->tx_kcm;
724 if (kcm)
725 queue_work(kcm_wq, &kcm->tx_work);
727 spin_unlock_bh(&mux->lock);
728 out:
729 read_unlock_bh(&sk->sk_callback_lock);
732 static void unreserve_psock(struct kcm_sock *kcm);
734 /* kcm sock is locked. */
735 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
737 struct kcm_mux *mux = kcm->mux;
738 struct kcm_psock *psock;
740 psock = kcm->tx_psock;
742 smp_rmb(); /* Must read tx_psock before tx_wait */
744 if (psock) {
745 WARN_ON(kcm->tx_wait);
746 if (unlikely(psock->tx_stopped))
747 unreserve_psock(kcm);
748 else
749 return kcm->tx_psock;
752 spin_lock_bh(&mux->lock);
754 /* Check again under lock to see if psock was reserved for this
755 * psock via psock_unreserve.
757 psock = kcm->tx_psock;
758 if (unlikely(psock)) {
759 WARN_ON(kcm->tx_wait);
760 spin_unlock_bh(&mux->lock);
761 return kcm->tx_psock;
764 if (!list_empty(&mux->psocks_avail)) {
765 psock = list_first_entry(&mux->psocks_avail,
766 struct kcm_psock,
767 psock_avail_list);
768 list_del(&psock->psock_avail_list);
769 if (kcm->tx_wait) {
770 list_del(&kcm->wait_psock_list);
771 kcm->tx_wait = false;
773 kcm->tx_psock = psock;
774 psock->tx_kcm = kcm;
775 KCM_STATS_INCR(psock->stats.reserved);
776 } else if (!kcm->tx_wait) {
777 list_add_tail(&kcm->wait_psock_list,
778 &mux->kcm_tx_waiters);
779 kcm->tx_wait = true;
782 spin_unlock_bh(&mux->lock);
784 return psock;
787 /* mux lock held */
788 static void psock_now_avail(struct kcm_psock *psock)
790 struct kcm_mux *mux = psock->mux;
791 struct kcm_sock *kcm;
793 if (list_empty(&mux->kcm_tx_waiters)) {
794 list_add_tail(&psock->psock_avail_list,
795 &mux->psocks_avail);
796 } else {
797 kcm = list_first_entry(&mux->kcm_tx_waiters,
798 struct kcm_sock,
799 wait_psock_list);
800 list_del(&kcm->wait_psock_list);
801 kcm->tx_wait = false;
802 psock->tx_kcm = kcm;
804 /* Commit before changing tx_psock since that is read in
805 * reserve_psock before queuing work.
807 smp_mb();
809 kcm->tx_psock = psock;
810 KCM_STATS_INCR(psock->stats.reserved);
811 queue_work(kcm_wq, &kcm->tx_work);
815 /* kcm sock is locked. */
816 static void unreserve_psock(struct kcm_sock *kcm)
818 struct kcm_psock *psock;
819 struct kcm_mux *mux = kcm->mux;
821 spin_lock_bh(&mux->lock);
823 psock = kcm->tx_psock;
825 if (WARN_ON(!psock)) {
826 spin_unlock_bh(&mux->lock);
827 return;
830 smp_rmb(); /* Read tx_psock before tx_wait */
832 kcm_update_tx_mux_stats(mux, psock);
834 WARN_ON(kcm->tx_wait);
836 kcm->tx_psock = NULL;
837 psock->tx_kcm = NULL;
838 KCM_STATS_INCR(psock->stats.unreserved);
840 if (unlikely(psock->tx_stopped)) {
841 if (psock->done) {
842 /* Deferred free */
843 list_del(&psock->psock_list);
844 mux->psocks_cnt--;
845 sock_put(psock->sk);
846 fput(psock->sk->sk_socket->file);
847 kmem_cache_free(kcm_psockp, psock);
850 /* Don't put back on available list */
852 spin_unlock_bh(&mux->lock);
854 return;
857 psock_now_avail(psock);
859 spin_unlock_bh(&mux->lock);
862 static void kcm_report_tx_retry(struct kcm_sock *kcm)
864 struct kcm_mux *mux = kcm->mux;
866 spin_lock_bh(&mux->lock);
867 KCM_STATS_INCR(mux->stats.tx_retries);
868 spin_unlock_bh(&mux->lock);
871 /* Write any messages ready on the kcm socket. Called with kcm sock lock
872 * held. Return bytes actually sent or error.
874 static int kcm_write_msgs(struct kcm_sock *kcm)
876 struct sock *sk = &kcm->sk;
877 struct kcm_psock *psock;
878 struct sk_buff *skb, *head;
879 struct kcm_tx_msg *txm;
880 unsigned short fragidx, frag_offset;
881 unsigned int sent, total_sent = 0;
882 int ret = 0;
884 kcm->tx_wait_more = false;
885 psock = kcm->tx_psock;
886 if (unlikely(psock && psock->tx_stopped)) {
887 /* A reserved psock was aborted asynchronously. Unreserve
888 * it and we'll retry the message.
890 unreserve_psock(kcm);
891 kcm_report_tx_retry(kcm);
892 if (skb_queue_empty(&sk->sk_write_queue))
893 return 0;
895 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
897 } else if (skb_queue_empty(&sk->sk_write_queue)) {
898 return 0;
901 head = skb_peek(&sk->sk_write_queue);
902 txm = kcm_tx_msg(head);
904 if (txm->sent) {
905 /* Send of first skbuff in queue already in progress */
906 if (WARN_ON(!psock)) {
907 ret = -EINVAL;
908 goto out;
910 sent = txm->sent;
911 frag_offset = txm->frag_offset;
912 fragidx = txm->fragidx;
913 skb = txm->frag_skb;
915 goto do_frag;
918 try_again:
919 psock = reserve_psock(kcm);
920 if (!psock)
921 goto out;
923 do {
924 skb = head;
925 txm = kcm_tx_msg(head);
926 sent = 0;
928 do_frag_list:
929 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
930 ret = -EINVAL;
931 goto out;
934 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
935 fragidx++) {
936 skb_frag_t *frag;
938 frag_offset = 0;
939 do_frag:
940 frag = &skb_shinfo(skb)->frags[fragidx];
941 if (WARN_ON(!frag->size)) {
942 ret = -EINVAL;
943 goto out;
946 ret = kernel_sendpage(psock->sk->sk_socket,
947 frag->page.p,
948 frag->page_offset + frag_offset,
949 frag->size - frag_offset,
950 MSG_DONTWAIT);
951 if (ret <= 0) {
952 if (ret == -EAGAIN) {
953 /* Save state to try again when there's
954 * write space on the socket
956 txm->sent = sent;
957 txm->frag_offset = frag_offset;
958 txm->fragidx = fragidx;
959 txm->frag_skb = skb;
961 ret = 0;
962 goto out;
965 /* Hard failure in sending message, abort this
966 * psock since it has lost framing
967 * synchonization and retry sending the
968 * message from the beginning.
970 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
971 true);
972 unreserve_psock(kcm);
974 txm->sent = 0;
975 kcm_report_tx_retry(kcm);
976 ret = 0;
978 goto try_again;
981 sent += ret;
982 frag_offset += ret;
983 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
984 if (frag_offset < frag->size) {
985 /* Not finished with this frag */
986 goto do_frag;
990 if (skb == head) {
991 if (skb_has_frag_list(skb)) {
992 skb = skb_shinfo(skb)->frag_list;
993 goto do_frag_list;
995 } else if (skb->next) {
996 skb = skb->next;
997 goto do_frag_list;
1000 /* Successfully sent the whole packet, account for it. */
1001 skb_dequeue(&sk->sk_write_queue);
1002 kfree_skb(head);
1003 sk->sk_wmem_queued -= sent;
1004 total_sent += sent;
1005 KCM_STATS_INCR(psock->stats.tx_msgs);
1006 } while ((head = skb_peek(&sk->sk_write_queue)));
1007 out:
1008 if (!head) {
1009 /* Done with all queued messages. */
1010 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
1011 unreserve_psock(kcm);
1014 /* Check if write space is available */
1015 sk->sk_write_space(sk);
1017 return total_sent ? : ret;
1020 static void kcm_tx_work(struct work_struct *w)
1022 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
1023 struct sock *sk = &kcm->sk;
1024 int err;
1026 lock_sock(sk);
1028 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
1029 * aborts
1031 err = kcm_write_msgs(kcm);
1032 if (err < 0) {
1033 /* Hard failure in write, report error on KCM socket */
1034 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
1035 report_csk_error(&kcm->sk, -err);
1036 goto out;
1039 /* Primarily for SOCK_SEQPACKET sockets */
1040 if (likely(sk->sk_socket) &&
1041 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1042 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1043 sk->sk_write_space(sk);
1046 out:
1047 release_sock(sk);
1050 static void kcm_push(struct kcm_sock *kcm)
1052 if (kcm->tx_wait_more)
1053 kcm_write_msgs(kcm);
1056 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
1057 int offset, size_t size, int flags)
1060 struct sock *sk = sock->sk;
1061 struct kcm_sock *kcm = kcm_sk(sk);
1062 struct sk_buff *skb = NULL, *head = NULL;
1063 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1064 bool eor;
1065 int err = 0;
1066 int i;
1068 if (flags & MSG_SENDPAGE_NOTLAST)
1069 flags |= MSG_MORE;
1071 /* No MSG_EOR from splice, only look at MSG_MORE */
1072 eor = !(flags & MSG_MORE);
1074 lock_sock(sk);
1076 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1078 err = -EPIPE;
1079 if (sk->sk_err)
1080 goto out_error;
1082 if (kcm->seq_skb) {
1083 /* Previously opened message */
1084 head = kcm->seq_skb;
1085 skb = kcm_tx_msg(head)->last_skb;
1086 i = skb_shinfo(skb)->nr_frags;
1088 if (skb_can_coalesce(skb, i, page, offset)) {
1089 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
1090 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1091 goto coalesced;
1094 if (i >= MAX_SKB_FRAGS) {
1095 struct sk_buff *tskb;
1097 tskb = alloc_skb(0, sk->sk_allocation);
1098 while (!tskb) {
1099 kcm_push(kcm);
1100 err = sk_stream_wait_memory(sk, &timeo);
1101 if (err)
1102 goto out_error;
1105 if (head == skb)
1106 skb_shinfo(head)->frag_list = tskb;
1107 else
1108 skb->next = tskb;
1110 skb = tskb;
1111 skb->ip_summed = CHECKSUM_UNNECESSARY;
1112 i = 0;
1114 } else {
1115 /* Call the sk_stream functions to manage the sndbuf mem. */
1116 if (!sk_stream_memory_free(sk)) {
1117 kcm_push(kcm);
1118 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1119 err = sk_stream_wait_memory(sk, &timeo);
1120 if (err)
1121 goto out_error;
1124 head = alloc_skb(0, sk->sk_allocation);
1125 while (!head) {
1126 kcm_push(kcm);
1127 err = sk_stream_wait_memory(sk, &timeo);
1128 if (err)
1129 goto out_error;
1132 skb = head;
1133 i = 0;
1136 get_page(page);
1137 skb_fill_page_desc(skb, i, page, offset, size);
1138 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1140 coalesced:
1141 skb->len += size;
1142 skb->data_len += size;
1143 skb->truesize += size;
1144 sk->sk_wmem_queued += size;
1145 sk_mem_charge(sk, size);
1147 if (head != skb) {
1148 head->len += size;
1149 head->data_len += size;
1150 head->truesize += size;
1153 if (eor) {
1154 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1156 /* Message complete, queue it on send buffer */
1157 __skb_queue_tail(&sk->sk_write_queue, head);
1158 kcm->seq_skb = NULL;
1159 KCM_STATS_INCR(kcm->stats.tx_msgs);
1161 if (flags & MSG_BATCH) {
1162 kcm->tx_wait_more = true;
1163 } else if (kcm->tx_wait_more || not_busy) {
1164 err = kcm_write_msgs(kcm);
1165 if (err < 0) {
1166 /* We got a hard error in write_msgs but have
1167 * already queued this message. Report an error
1168 * in the socket, but don't affect return value
1169 * from sendmsg
1171 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1172 report_csk_error(&kcm->sk, -err);
1175 } else {
1176 /* Message not complete, save state */
1177 kcm->seq_skb = head;
1178 kcm_tx_msg(head)->last_skb = skb;
1181 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
1183 release_sock(sk);
1184 return size;
1186 out_error:
1187 kcm_push(kcm);
1189 err = sk_stream_error(sk, flags, err);
1191 /* make sure we wake any epoll edge trigger waiter */
1192 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1193 sk->sk_write_space(sk);
1195 release_sock(sk);
1196 return err;
1199 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1201 struct sock *sk = sock->sk;
1202 struct kcm_sock *kcm = kcm_sk(sk);
1203 struct sk_buff *skb = NULL, *head = NULL;
1204 size_t copy, copied = 0;
1205 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1206 int eor = (sock->type == SOCK_DGRAM) ?
1207 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
1208 int err = -EPIPE;
1210 lock_sock(sk);
1212 /* Per tcp_sendmsg this should be in poll */
1213 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1215 if (sk->sk_err)
1216 goto out_error;
1218 if (kcm->seq_skb) {
1219 /* Previously opened message */
1220 head = kcm->seq_skb;
1221 skb = kcm_tx_msg(head)->last_skb;
1222 goto start;
1225 /* Call the sk_stream functions to manage the sndbuf mem. */
1226 if (!sk_stream_memory_free(sk)) {
1227 kcm_push(kcm);
1228 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1229 err = sk_stream_wait_memory(sk, &timeo);
1230 if (err)
1231 goto out_error;
1234 /* New message, alloc head skb */
1235 head = alloc_skb(0, sk->sk_allocation);
1236 while (!head) {
1237 kcm_push(kcm);
1238 err = sk_stream_wait_memory(sk, &timeo);
1239 if (err)
1240 goto out_error;
1242 head = alloc_skb(0, sk->sk_allocation);
1245 skb = head;
1247 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
1248 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
1250 skb->ip_summed = CHECKSUM_UNNECESSARY;
1252 start:
1253 while (msg_data_left(msg)) {
1254 bool merge = true;
1255 int i = skb_shinfo(skb)->nr_frags;
1256 struct page_frag *pfrag = sk_page_frag(sk);
1258 if (!sk_page_frag_refill(sk, pfrag))
1259 goto wait_for_memory;
1261 if (!skb_can_coalesce(skb, i, pfrag->page,
1262 pfrag->offset)) {
1263 if (i == MAX_SKB_FRAGS) {
1264 struct sk_buff *tskb;
1266 tskb = alloc_skb(0, sk->sk_allocation);
1267 if (!tskb)
1268 goto wait_for_memory;
1270 if (head == skb)
1271 skb_shinfo(head)->frag_list = tskb;
1272 else
1273 skb->next = tskb;
1275 skb = tskb;
1276 skb->ip_summed = CHECKSUM_UNNECESSARY;
1277 continue;
1279 merge = false;
1282 copy = min_t(int, msg_data_left(msg),
1283 pfrag->size - pfrag->offset);
1285 if (!sk_wmem_schedule(sk, copy))
1286 goto wait_for_memory;
1288 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1289 pfrag->page,
1290 pfrag->offset,
1291 copy);
1292 if (err)
1293 goto out_error;
1295 /* Update the skb. */
1296 if (merge) {
1297 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1298 } else {
1299 skb_fill_page_desc(skb, i, pfrag->page,
1300 pfrag->offset, copy);
1301 get_page(pfrag->page);
1304 pfrag->offset += copy;
1305 copied += copy;
1306 if (head != skb) {
1307 head->len += copy;
1308 head->data_len += copy;
1311 continue;
1313 wait_for_memory:
1314 kcm_push(kcm);
1315 err = sk_stream_wait_memory(sk, &timeo);
1316 if (err)
1317 goto out_error;
1320 if (eor) {
1321 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1323 /* Message complete, queue it on send buffer */
1324 __skb_queue_tail(&sk->sk_write_queue, head);
1325 kcm->seq_skb = NULL;
1326 KCM_STATS_INCR(kcm->stats.tx_msgs);
1328 if (msg->msg_flags & MSG_BATCH) {
1329 kcm->tx_wait_more = true;
1330 } else if (kcm->tx_wait_more || not_busy) {
1331 err = kcm_write_msgs(kcm);
1332 if (err < 0) {
1333 /* We got a hard error in write_msgs but have
1334 * already queued this message. Report an error
1335 * in the socket, but don't affect return value
1336 * from sendmsg
1338 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1339 report_csk_error(&kcm->sk, -err);
1342 } else {
1343 /* Message not complete, save state */
1344 partial_message:
1345 kcm->seq_skb = head;
1346 kcm_tx_msg(head)->last_skb = skb;
1349 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1351 release_sock(sk);
1352 return copied;
1354 out_error:
1355 kcm_push(kcm);
1357 if (copied && sock->type == SOCK_SEQPACKET) {
1358 /* Wrote some bytes before encountering an
1359 * error, return partial success.
1361 goto partial_message;
1364 if (head != kcm->seq_skb)
1365 kfree_skb(head);
1367 err = sk_stream_error(sk, msg->msg_flags, err);
1369 /* make sure we wake any epoll edge trigger waiter */
1370 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1371 sk->sk_write_space(sk);
1373 release_sock(sk);
1374 return err;
1377 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1378 long timeo, int *err)
1380 struct sk_buff *skb;
1382 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1383 if (sk->sk_err) {
1384 *err = sock_error(sk);
1385 return NULL;
1388 if (sock_flag(sk, SOCK_DONE))
1389 return NULL;
1391 if ((flags & MSG_DONTWAIT) || !timeo) {
1392 *err = -EAGAIN;
1393 return NULL;
1396 sk_wait_data(sk, &timeo, NULL);
1398 /* Handle signals */
1399 if (signal_pending(current)) {
1400 *err = sock_intr_errno(timeo);
1401 return NULL;
1405 return skb;
1408 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1409 size_t len, int flags)
1411 struct sock *sk = sock->sk;
1412 struct kcm_sock *kcm = kcm_sk(sk);
1413 int err = 0;
1414 long timeo;
1415 struct kcm_rx_msg *rxm;
1416 int copied = 0;
1417 struct sk_buff *skb;
1419 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1421 lock_sock(sk);
1423 skb = kcm_wait_data(sk, flags, timeo, &err);
1424 if (!skb)
1425 goto out;
1427 /* Okay, have a message on the receive queue */
1429 rxm = kcm_rx_msg(skb);
1431 if (len > rxm->full_len)
1432 len = rxm->full_len;
1434 err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1435 if (err < 0)
1436 goto out;
1438 copied = len;
1439 if (likely(!(flags & MSG_PEEK))) {
1440 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1441 if (copied < rxm->full_len) {
1442 if (sock->type == SOCK_DGRAM) {
1443 /* Truncated message */
1444 msg->msg_flags |= MSG_TRUNC;
1445 goto msg_finished;
1447 rxm->offset += copied;
1448 rxm->full_len -= copied;
1449 } else {
1450 msg_finished:
1451 /* Finished with message */
1452 msg->msg_flags |= MSG_EOR;
1453 KCM_STATS_INCR(kcm->stats.rx_msgs);
1454 skb_unlink(skb, &sk->sk_receive_queue);
1455 kfree_skb(skb);
1459 out:
1460 release_sock(sk);
1462 return copied ? : err;
1465 static ssize_t kcm_sock_splice(struct sock *sk,
1466 struct pipe_inode_info *pipe,
1467 struct splice_pipe_desc *spd)
1469 int ret;
1471 release_sock(sk);
1472 ret = splice_to_pipe(pipe, spd);
1473 lock_sock(sk);
1475 return ret;
1478 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1479 struct pipe_inode_info *pipe, size_t len,
1480 unsigned int flags)
1482 struct sock *sk = sock->sk;
1483 struct kcm_sock *kcm = kcm_sk(sk);
1484 long timeo;
1485 struct kcm_rx_msg *rxm;
1486 int err = 0;
1487 ssize_t copied;
1488 struct sk_buff *skb;
1490 /* Only support splice for SOCKSEQPACKET */
1492 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1494 lock_sock(sk);
1496 skb = kcm_wait_data(sk, flags, timeo, &err);
1497 if (!skb)
1498 goto err_out;
1500 /* Okay, have a message on the receive queue */
1502 rxm = kcm_rx_msg(skb);
1504 if (len > rxm->full_len)
1505 len = rxm->full_len;
1507 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags,
1508 kcm_sock_splice);
1509 if (copied < 0) {
1510 err = copied;
1511 goto err_out;
1514 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1516 rxm->offset += copied;
1517 rxm->full_len -= copied;
1519 /* We have no way to return MSG_EOR. If all the bytes have been
1520 * read we still leave the message in the receive socket buffer.
1521 * A subsequent recvmsg needs to be done to return MSG_EOR and
1522 * finish reading the message.
1525 release_sock(sk);
1527 return copied;
1529 err_out:
1530 release_sock(sk);
1532 return err;
1535 /* kcm sock lock held */
1536 static void kcm_recv_disable(struct kcm_sock *kcm)
1538 struct kcm_mux *mux = kcm->mux;
1540 if (kcm->rx_disabled)
1541 return;
1543 spin_lock_bh(&mux->rx_lock);
1545 kcm->rx_disabled = 1;
1547 /* If a psock is reserved we'll do cleanup in unreserve */
1548 if (!kcm->rx_psock) {
1549 if (kcm->rx_wait) {
1550 list_del(&kcm->wait_rx_list);
1551 kcm->rx_wait = false;
1554 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1557 spin_unlock_bh(&mux->rx_lock);
1560 /* kcm sock lock held */
1561 static void kcm_recv_enable(struct kcm_sock *kcm)
1563 struct kcm_mux *mux = kcm->mux;
1565 if (!kcm->rx_disabled)
1566 return;
1568 spin_lock_bh(&mux->rx_lock);
1570 kcm->rx_disabled = 0;
1571 kcm_rcv_ready(kcm);
1573 spin_unlock_bh(&mux->rx_lock);
1576 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1577 char __user *optval, unsigned int optlen)
1579 struct kcm_sock *kcm = kcm_sk(sock->sk);
1580 int val, valbool;
1581 int err = 0;
1583 if (level != SOL_KCM)
1584 return -ENOPROTOOPT;
1586 if (optlen < sizeof(int))
1587 return -EINVAL;
1589 if (get_user(val, (int __user *)optval))
1590 return -EINVAL;
1592 valbool = val ? 1 : 0;
1594 switch (optname) {
1595 case KCM_RECV_DISABLE:
1596 lock_sock(&kcm->sk);
1597 if (valbool)
1598 kcm_recv_disable(kcm);
1599 else
1600 kcm_recv_enable(kcm);
1601 release_sock(&kcm->sk);
1602 break;
1603 default:
1604 err = -ENOPROTOOPT;
1607 return err;
1610 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1611 char __user *optval, int __user *optlen)
1613 struct kcm_sock *kcm = kcm_sk(sock->sk);
1614 int val, len;
1616 if (level != SOL_KCM)
1617 return -ENOPROTOOPT;
1619 if (get_user(len, optlen))
1620 return -EFAULT;
1622 len = min_t(unsigned int, len, sizeof(int));
1623 if (len < 0)
1624 return -EINVAL;
1626 switch (optname) {
1627 case KCM_RECV_DISABLE:
1628 val = kcm->rx_disabled;
1629 break;
1630 default:
1631 return -ENOPROTOOPT;
1634 if (put_user(len, optlen))
1635 return -EFAULT;
1636 if (copy_to_user(optval, &val, len))
1637 return -EFAULT;
1638 return 0;
1641 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1643 struct kcm_sock *tkcm;
1644 struct list_head *head;
1645 int index = 0;
1647 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1648 * we set sk_state, otherwise epoll_wait always returns right away with
1649 * POLLHUP
1651 kcm->sk.sk_state = TCP_ESTABLISHED;
1653 /* Add to mux's kcm sockets list */
1654 kcm->mux = mux;
1655 spin_lock_bh(&mux->lock);
1657 head = &mux->kcm_socks;
1658 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1659 if (tkcm->index != index)
1660 break;
1661 head = &tkcm->kcm_sock_list;
1662 index++;
1665 list_add(&kcm->kcm_sock_list, head);
1666 kcm->index = index;
1668 mux->kcm_socks_cnt++;
1669 spin_unlock_bh(&mux->lock);
1671 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1673 spin_lock_bh(&mux->rx_lock);
1674 kcm_rcv_ready(kcm);
1675 spin_unlock_bh(&mux->rx_lock);
1678 static void kcm_rx_msg_timeout(unsigned long arg)
1680 struct kcm_psock *psock = (struct kcm_psock *)arg;
1682 /* Message assembly timed out */
1683 KCM_STATS_INCR(psock->stats.rx_msg_timeouts);
1684 kcm_abort_rx_psock(psock, ETIMEDOUT, NULL);
1687 static int kcm_attach(struct socket *sock, struct socket *csock,
1688 struct bpf_prog *prog)
1690 struct kcm_sock *kcm = kcm_sk(sock->sk);
1691 struct kcm_mux *mux = kcm->mux;
1692 struct sock *csk;
1693 struct kcm_psock *psock = NULL, *tpsock;
1694 struct list_head *head;
1695 int index = 0;
1697 if (csock->ops->family != PF_INET &&
1698 csock->ops->family != PF_INET6)
1699 return -EINVAL;
1701 csk = csock->sk;
1702 if (!csk)
1703 return -EINVAL;
1705 /* Only support TCP for now */
1706 if (csk->sk_protocol != IPPROTO_TCP)
1707 return -EINVAL;
1709 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1710 if (!psock)
1711 return -ENOMEM;
1713 psock->mux = mux;
1714 psock->sk = csk;
1715 psock->bpf_prog = prog;
1717 setup_timer(&psock->rx_msg_timer, kcm_rx_msg_timeout,
1718 (unsigned long)psock);
1720 INIT_WORK(&psock->rx_work, psock_rx_work);
1721 INIT_DELAYED_WORK(&psock->rx_delayed_work, psock_rx_delayed_work);
1723 sock_hold(csk);
1725 write_lock_bh(&csk->sk_callback_lock);
1726 psock->save_data_ready = csk->sk_data_ready;
1727 psock->save_write_space = csk->sk_write_space;
1728 psock->save_state_change = csk->sk_state_change;
1729 csk->sk_user_data = psock;
1730 csk->sk_data_ready = psock_tcp_data_ready;
1731 csk->sk_write_space = psock_tcp_write_space;
1732 csk->sk_state_change = psock_tcp_state_change;
1733 write_unlock_bh(&csk->sk_callback_lock);
1735 /* Finished initialization, now add the psock to the MUX. */
1736 spin_lock_bh(&mux->lock);
1737 head = &mux->psocks;
1738 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1739 if (tpsock->index != index)
1740 break;
1741 head = &tpsock->psock_list;
1742 index++;
1745 list_add(&psock->psock_list, head);
1746 psock->index = index;
1748 KCM_STATS_INCR(mux->stats.psock_attach);
1749 mux->psocks_cnt++;
1750 psock_now_avail(psock);
1751 spin_unlock_bh(&mux->lock);
1753 /* Schedule RX work in case there are already bytes queued */
1754 queue_work(kcm_wq, &psock->rx_work);
1756 return 0;
1759 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1761 struct socket *csock;
1762 struct bpf_prog *prog;
1763 int err;
1765 csock = sockfd_lookup(info->fd, &err);
1766 if (!csock)
1767 return -ENOENT;
1769 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1770 if (IS_ERR(prog)) {
1771 err = PTR_ERR(prog);
1772 goto out;
1775 err = kcm_attach(sock, csock, prog);
1776 if (err) {
1777 bpf_prog_put(prog);
1778 goto out;
1781 /* Keep reference on file also */
1783 return 0;
1784 out:
1785 fput(csock->file);
1786 return err;
1789 static void kcm_unattach(struct kcm_psock *psock)
1791 struct sock *csk = psock->sk;
1792 struct kcm_mux *mux = psock->mux;
1794 /* Stop getting callbacks from TCP socket. After this there should
1795 * be no way to reserve a kcm for this psock.
1797 write_lock_bh(&csk->sk_callback_lock);
1798 csk->sk_user_data = NULL;
1799 csk->sk_data_ready = psock->save_data_ready;
1800 csk->sk_write_space = psock->save_write_space;
1801 csk->sk_state_change = psock->save_state_change;
1802 psock->rx_stopped = 1;
1804 if (WARN_ON(psock->rx_kcm)) {
1805 write_unlock_bh(&csk->sk_callback_lock);
1806 return;
1809 spin_lock_bh(&mux->rx_lock);
1811 /* Stop receiver activities. After this point psock should not be
1812 * able to get onto ready list either through callbacks or work.
1814 if (psock->ready_rx_msg) {
1815 list_del(&psock->psock_ready_list);
1816 kfree_skb(psock->ready_rx_msg);
1817 psock->ready_rx_msg = NULL;
1818 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1821 spin_unlock_bh(&mux->rx_lock);
1823 write_unlock_bh(&csk->sk_callback_lock);
1825 del_timer_sync(&psock->rx_msg_timer);
1826 cancel_work_sync(&psock->rx_work);
1827 cancel_delayed_work_sync(&psock->rx_delayed_work);
1829 bpf_prog_put(psock->bpf_prog);
1831 kfree_skb(psock->rx_skb_head);
1832 psock->rx_skb_head = NULL;
1834 spin_lock_bh(&mux->lock);
1836 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1838 KCM_STATS_INCR(mux->stats.psock_unattach);
1840 if (psock->tx_kcm) {
1841 /* psock was reserved. Just mark it finished and we will clean
1842 * up in the kcm paths, we need kcm lock which can not be
1843 * acquired here.
1845 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1846 spin_unlock_bh(&mux->lock);
1848 /* We are unattaching a socket that is reserved. Abort the
1849 * socket since we may be out of sync in sending on it. We need
1850 * to do this without the mux lock.
1852 kcm_abort_tx_psock(psock, EPIPE, false);
1854 spin_lock_bh(&mux->lock);
1855 if (!psock->tx_kcm) {
1856 /* psock now unreserved in window mux was unlocked */
1857 goto no_reserved;
1859 psock->done = 1;
1861 /* Commit done before queuing work to process it */
1862 smp_mb();
1864 /* Queue tx work to make sure psock->done is handled */
1865 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1866 spin_unlock_bh(&mux->lock);
1867 } else {
1868 no_reserved:
1869 if (!psock->tx_stopped)
1870 list_del(&psock->psock_avail_list);
1871 list_del(&psock->psock_list);
1872 mux->psocks_cnt--;
1873 spin_unlock_bh(&mux->lock);
1875 sock_put(csk);
1876 fput(csk->sk_socket->file);
1877 kmem_cache_free(kcm_psockp, psock);
1881 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1883 struct kcm_sock *kcm = kcm_sk(sock->sk);
1884 struct kcm_mux *mux = kcm->mux;
1885 struct kcm_psock *psock;
1886 struct socket *csock;
1887 struct sock *csk;
1888 int err;
1890 csock = sockfd_lookup(info->fd, &err);
1891 if (!csock)
1892 return -ENOENT;
1894 csk = csock->sk;
1895 if (!csk) {
1896 err = -EINVAL;
1897 goto out;
1900 err = -ENOENT;
1902 spin_lock_bh(&mux->lock);
1904 list_for_each_entry(psock, &mux->psocks, psock_list) {
1905 if (psock->sk != csk)
1906 continue;
1908 /* Found the matching psock */
1910 if (psock->unattaching || WARN_ON(psock->done)) {
1911 err = -EALREADY;
1912 break;
1915 psock->unattaching = 1;
1917 spin_unlock_bh(&mux->lock);
1919 kcm_unattach(psock);
1921 err = 0;
1922 goto out;
1925 spin_unlock_bh(&mux->lock);
1927 out:
1928 fput(csock->file);
1929 return err;
1932 static struct proto kcm_proto = {
1933 .name = "KCM",
1934 .owner = THIS_MODULE,
1935 .obj_size = sizeof(struct kcm_sock),
1938 /* Clone a kcm socket. */
1939 static int kcm_clone(struct socket *osock, struct kcm_clone *info,
1940 struct socket **newsockp)
1942 struct socket *newsock;
1943 struct sock *newsk;
1944 struct file *newfile;
1945 int err, newfd;
1947 err = -ENFILE;
1948 newsock = sock_alloc();
1949 if (!newsock)
1950 goto out;
1952 newsock->type = osock->type;
1953 newsock->ops = osock->ops;
1955 __module_get(newsock->ops->owner);
1957 newfd = get_unused_fd_flags(0);
1958 if (unlikely(newfd < 0)) {
1959 err = newfd;
1960 goto out_fd_fail;
1963 newfile = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1964 if (unlikely(IS_ERR(newfile))) {
1965 err = PTR_ERR(newfile);
1966 goto out_sock_alloc_fail;
1969 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1970 &kcm_proto, true);
1971 if (!newsk) {
1972 err = -ENOMEM;
1973 goto out_sk_alloc_fail;
1976 sock_init_data(newsock, newsk);
1977 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1979 fd_install(newfd, newfile);
1980 *newsockp = newsock;
1981 info->fd = newfd;
1983 return 0;
1985 out_sk_alloc_fail:
1986 fput(newfile);
1987 out_sock_alloc_fail:
1988 put_unused_fd(newfd);
1989 out_fd_fail:
1990 sock_release(newsock);
1991 out:
1992 return err;
1995 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1997 int err;
1999 switch (cmd) {
2000 case SIOCKCMATTACH: {
2001 struct kcm_attach info;
2003 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2004 err = -EFAULT;
2006 err = kcm_attach_ioctl(sock, &info);
2008 break;
2010 case SIOCKCMUNATTACH: {
2011 struct kcm_unattach info;
2013 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2014 err = -EFAULT;
2016 err = kcm_unattach_ioctl(sock, &info);
2018 break;
2020 case SIOCKCMCLONE: {
2021 struct kcm_clone info;
2022 struct socket *newsock = NULL;
2024 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
2025 err = -EFAULT;
2027 err = kcm_clone(sock, &info, &newsock);
2029 if (!err) {
2030 if (copy_to_user((void __user *)arg, &info,
2031 sizeof(info))) {
2032 err = -EFAULT;
2033 sys_close(info.fd);
2037 break;
2039 default:
2040 err = -ENOIOCTLCMD;
2041 break;
2044 return err;
2047 static void free_mux(struct rcu_head *rcu)
2049 struct kcm_mux *mux = container_of(rcu,
2050 struct kcm_mux, rcu);
2052 kmem_cache_free(kcm_muxp, mux);
2055 static void release_mux(struct kcm_mux *mux)
2057 struct kcm_net *knet = mux->knet;
2058 struct kcm_psock *psock, *tmp_psock;
2060 /* Release psocks */
2061 list_for_each_entry_safe(psock, tmp_psock,
2062 &mux->psocks, psock_list) {
2063 if (!WARN_ON(psock->unattaching))
2064 kcm_unattach(psock);
2067 if (WARN_ON(mux->psocks_cnt))
2068 return;
2070 __skb_queue_purge(&mux->rx_hold_queue);
2072 mutex_lock(&knet->mutex);
2073 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
2074 aggregate_psock_stats(&mux->aggregate_psock_stats,
2075 &knet->aggregate_psock_stats);
2076 list_del_rcu(&mux->kcm_mux_list);
2077 knet->count--;
2078 mutex_unlock(&knet->mutex);
2080 call_rcu(&mux->rcu, free_mux);
2083 static void kcm_done(struct kcm_sock *kcm)
2085 struct kcm_mux *mux = kcm->mux;
2086 struct sock *sk = &kcm->sk;
2087 int socks_cnt;
2089 spin_lock_bh(&mux->rx_lock);
2090 if (kcm->rx_psock) {
2091 /* Cleanup in unreserve_rx_kcm */
2092 WARN_ON(kcm->done);
2093 kcm->rx_disabled = 1;
2094 kcm->done = 1;
2095 spin_unlock_bh(&mux->rx_lock);
2096 return;
2099 if (kcm->rx_wait) {
2100 list_del(&kcm->wait_rx_list);
2101 kcm->rx_wait = false;
2103 /* Move any pending receive messages to other kcm sockets */
2104 requeue_rx_msgs(mux, &sk->sk_receive_queue);
2106 spin_unlock_bh(&mux->rx_lock);
2108 if (WARN_ON(sk_rmem_alloc_get(sk)))
2109 return;
2111 /* Detach from MUX */
2112 spin_lock_bh(&mux->lock);
2114 list_del(&kcm->kcm_sock_list);
2115 mux->kcm_socks_cnt--;
2116 socks_cnt = mux->kcm_socks_cnt;
2118 spin_unlock_bh(&mux->lock);
2120 if (!socks_cnt) {
2121 /* We are done with the mux now. */
2122 release_mux(mux);
2125 WARN_ON(kcm->rx_wait);
2127 sock_put(&kcm->sk);
2130 /* Called by kcm_release to close a KCM socket.
2131 * If this is the last KCM socket on the MUX, destroy the MUX.
2133 static int kcm_release(struct socket *sock)
2135 struct sock *sk = sock->sk;
2136 struct kcm_sock *kcm;
2137 struct kcm_mux *mux;
2138 struct kcm_psock *psock;
2140 if (!sk)
2141 return 0;
2143 kcm = kcm_sk(sk);
2144 mux = kcm->mux;
2146 sock_orphan(sk);
2147 kfree_skb(kcm->seq_skb);
2149 lock_sock(sk);
2150 /* Purge queue under lock to avoid race condition with tx_work trying
2151 * to act when queue is nonempty. If tx_work runs after this point
2152 * it will just return.
2154 __skb_queue_purge(&sk->sk_write_queue);
2155 release_sock(sk);
2157 spin_lock_bh(&mux->lock);
2158 if (kcm->tx_wait) {
2159 /* Take of tx_wait list, after this point there should be no way
2160 * that a psock will be assigned to this kcm.
2162 list_del(&kcm->wait_psock_list);
2163 kcm->tx_wait = false;
2165 spin_unlock_bh(&mux->lock);
2167 /* Cancel work. After this point there should be no outside references
2168 * to the kcm socket.
2170 cancel_work_sync(&kcm->tx_work);
2172 lock_sock(sk);
2173 psock = kcm->tx_psock;
2174 if (psock) {
2175 /* A psock was reserved, so we need to kill it since it
2176 * may already have some bytes queued from a message. We
2177 * need to do this after removing kcm from tx_wait list.
2179 kcm_abort_tx_psock(psock, EPIPE, false);
2180 unreserve_psock(kcm);
2182 release_sock(sk);
2184 WARN_ON(kcm->tx_wait);
2185 WARN_ON(kcm->tx_psock);
2187 sock->sk = NULL;
2189 kcm_done(kcm);
2191 return 0;
2194 static const struct proto_ops kcm_dgram_ops = {
2195 .family = PF_KCM,
2196 .owner = THIS_MODULE,
2197 .release = kcm_release,
2198 .bind = sock_no_bind,
2199 .connect = sock_no_connect,
2200 .socketpair = sock_no_socketpair,
2201 .accept = sock_no_accept,
2202 .getname = sock_no_getname,
2203 .poll = datagram_poll,
2204 .ioctl = kcm_ioctl,
2205 .listen = sock_no_listen,
2206 .shutdown = sock_no_shutdown,
2207 .setsockopt = kcm_setsockopt,
2208 .getsockopt = kcm_getsockopt,
2209 .sendmsg = kcm_sendmsg,
2210 .recvmsg = kcm_recvmsg,
2211 .mmap = sock_no_mmap,
2212 .sendpage = kcm_sendpage,
2215 static const struct proto_ops kcm_seqpacket_ops = {
2216 .family = PF_KCM,
2217 .owner = THIS_MODULE,
2218 .release = kcm_release,
2219 .bind = sock_no_bind,
2220 .connect = sock_no_connect,
2221 .socketpair = sock_no_socketpair,
2222 .accept = sock_no_accept,
2223 .getname = sock_no_getname,
2224 .poll = datagram_poll,
2225 .ioctl = kcm_ioctl,
2226 .listen = sock_no_listen,
2227 .shutdown = sock_no_shutdown,
2228 .setsockopt = kcm_setsockopt,
2229 .getsockopt = kcm_getsockopt,
2230 .sendmsg = kcm_sendmsg,
2231 .recvmsg = kcm_recvmsg,
2232 .mmap = sock_no_mmap,
2233 .sendpage = kcm_sendpage,
2234 .splice_read = kcm_splice_read,
2237 /* Create proto operation for kcm sockets */
2238 static int kcm_create(struct net *net, struct socket *sock,
2239 int protocol, int kern)
2241 struct kcm_net *knet = net_generic(net, kcm_net_id);
2242 struct sock *sk;
2243 struct kcm_mux *mux;
2245 switch (sock->type) {
2246 case SOCK_DGRAM:
2247 sock->ops = &kcm_dgram_ops;
2248 break;
2249 case SOCK_SEQPACKET:
2250 sock->ops = &kcm_seqpacket_ops;
2251 break;
2252 default:
2253 return -ESOCKTNOSUPPORT;
2256 if (protocol != KCMPROTO_CONNECTED)
2257 return -EPROTONOSUPPORT;
2259 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
2260 if (!sk)
2261 return -ENOMEM;
2263 /* Allocate a kcm mux, shared between KCM sockets */
2264 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
2265 if (!mux) {
2266 sk_free(sk);
2267 return -ENOMEM;
2270 spin_lock_init(&mux->lock);
2271 spin_lock_init(&mux->rx_lock);
2272 INIT_LIST_HEAD(&mux->kcm_socks);
2273 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
2274 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
2276 INIT_LIST_HEAD(&mux->psocks);
2277 INIT_LIST_HEAD(&mux->psocks_ready);
2278 INIT_LIST_HEAD(&mux->psocks_avail);
2280 mux->knet = knet;
2282 /* Add new MUX to list */
2283 mutex_lock(&knet->mutex);
2284 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
2285 knet->count++;
2286 mutex_unlock(&knet->mutex);
2288 skb_queue_head_init(&mux->rx_hold_queue);
2290 /* Init KCM socket */
2291 sock_init_data(sock, sk);
2292 init_kcm_sock(kcm_sk(sk), mux);
2294 return 0;
2297 static struct net_proto_family kcm_family_ops = {
2298 .family = PF_KCM,
2299 .create = kcm_create,
2300 .owner = THIS_MODULE,
2303 static __net_init int kcm_init_net(struct net *net)
2305 struct kcm_net *knet = net_generic(net, kcm_net_id);
2307 INIT_LIST_HEAD_RCU(&knet->mux_list);
2308 mutex_init(&knet->mutex);
2310 return 0;
2313 static __net_exit void kcm_exit_net(struct net *net)
2315 struct kcm_net *knet = net_generic(net, kcm_net_id);
2317 /* All KCM sockets should be closed at this point, which should mean
2318 * that all multiplexors and psocks have been destroyed.
2320 WARN_ON(!list_empty(&knet->mux_list));
2323 static struct pernet_operations kcm_net_ops = {
2324 .init = kcm_init_net,
2325 .exit = kcm_exit_net,
2326 .id = &kcm_net_id,
2327 .size = sizeof(struct kcm_net),
2330 static int __init kcm_init(void)
2332 int err = -ENOMEM;
2334 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2335 sizeof(struct kcm_mux), 0,
2336 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2337 if (!kcm_muxp)
2338 goto fail;
2340 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2341 sizeof(struct kcm_psock), 0,
2342 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2343 if (!kcm_psockp)
2344 goto fail;
2346 kcm_wq = create_singlethread_workqueue("kkcmd");
2347 if (!kcm_wq)
2348 goto fail;
2350 err = proto_register(&kcm_proto, 1);
2351 if (err)
2352 goto fail;
2354 err = sock_register(&kcm_family_ops);
2355 if (err)
2356 goto sock_register_fail;
2358 err = register_pernet_device(&kcm_net_ops);
2359 if (err)
2360 goto net_ops_fail;
2362 err = kcm_proc_init();
2363 if (err)
2364 goto proc_init_fail;
2366 return 0;
2368 proc_init_fail:
2369 unregister_pernet_device(&kcm_net_ops);
2371 net_ops_fail:
2372 sock_unregister(PF_KCM);
2374 sock_register_fail:
2375 proto_unregister(&kcm_proto);
2377 fail:
2378 kmem_cache_destroy(kcm_muxp);
2379 kmem_cache_destroy(kcm_psockp);
2381 if (kcm_wq)
2382 destroy_workqueue(kcm_wq);
2384 return err;
2387 static void __exit kcm_exit(void)
2389 kcm_proc_exit();
2390 unregister_pernet_device(&kcm_net_ops);
2391 sock_unregister(PF_KCM);
2392 proto_unregister(&kcm_proto);
2393 destroy_workqueue(kcm_wq);
2395 kmem_cache_destroy(kcm_muxp);
2396 kmem_cache_destroy(kcm_psockp);
2399 module_init(kcm_init);
2400 module_exit(kcm_exit);
2402 MODULE_LICENSE("GPL");
2403 MODULE_ALIAS_NETPROTO(PF_KCM);