scsi: ufs: fix race between clock gating and devfreq scaling work
[linux/fpc-iii.git] / net / kcm / kcmsock.c
blob553d0ad4a2fac593d9b16e3ebbddeedf367dbe71
1 /*
2 * Kernel Connection Multiplexor
4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 */
11 #include <linux/bpf.h>
12 #include <linux/errno.h>
13 #include <linux/errqueue.h>
14 #include <linux/file.h>
15 #include <linux/in.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/poll.h>
21 #include <linux/rculist.h>
22 #include <linux/skbuff.h>
23 #include <linux/socket.h>
24 #include <linux/uaccess.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
27 #include <net/kcm.h>
28 #include <net/netns/generic.h>
29 #include <net/sock.h>
30 #include <uapi/linux/kcm.h>
32 unsigned int kcm_net_id;
34 static struct kmem_cache *kcm_psockp __read_mostly;
35 static struct kmem_cache *kcm_muxp __read_mostly;
36 static struct workqueue_struct *kcm_wq;
38 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
40 return (struct kcm_sock *)sk;
43 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
45 return (struct kcm_tx_msg *)skb->cb;
48 static void report_csk_error(struct sock *csk, int err)
50 csk->sk_err = EPIPE;
51 csk->sk_error_report(csk);
54 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
55 bool wakeup_kcm)
57 struct sock *csk = psock->sk;
58 struct kcm_mux *mux = psock->mux;
60 /* Unrecoverable error in transmit */
62 spin_lock_bh(&mux->lock);
64 if (psock->tx_stopped) {
65 spin_unlock_bh(&mux->lock);
66 return;
69 psock->tx_stopped = 1;
70 KCM_STATS_INCR(psock->stats.tx_aborts);
72 if (!psock->tx_kcm) {
73 /* Take off psocks_avail list */
74 list_del(&psock->psock_avail_list);
75 } else if (wakeup_kcm) {
76 /* In this case psock is being aborted while outside of
77 * write_msgs and psock is reserved. Schedule tx_work
78 * to handle the failure there. Need to commit tx_stopped
79 * before queuing work.
81 smp_mb();
83 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
86 spin_unlock_bh(&mux->lock);
88 /* Report error on lower socket */
89 report_csk_error(csk, err);
92 /* RX mux lock held. */
93 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
94 struct kcm_psock *psock)
96 STRP_STATS_ADD(mux->stats.rx_bytes,
97 psock->strp.stats.rx_bytes -
98 psock->saved_rx_bytes);
99 mux->stats.rx_msgs +=
100 psock->strp.stats.rx_msgs - psock->saved_rx_msgs;
101 psock->saved_rx_msgs = psock->strp.stats.rx_msgs;
102 psock->saved_rx_bytes = psock->strp.stats.rx_bytes;
105 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
106 struct kcm_psock *psock)
108 KCM_STATS_ADD(mux->stats.tx_bytes,
109 psock->stats.tx_bytes - psock->saved_tx_bytes);
110 mux->stats.tx_msgs +=
111 psock->stats.tx_msgs - psock->saved_tx_msgs;
112 psock->saved_tx_msgs = psock->stats.tx_msgs;
113 psock->saved_tx_bytes = psock->stats.tx_bytes;
116 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
118 /* KCM is ready to receive messages on its queue-- either the KCM is new or
119 * has become unblocked after being blocked on full socket buffer. Queue any
120 * pending ready messages on a psock. RX mux lock held.
122 static void kcm_rcv_ready(struct kcm_sock *kcm)
124 struct kcm_mux *mux = kcm->mux;
125 struct kcm_psock *psock;
126 struct sk_buff *skb;
128 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
129 return;
131 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
132 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
133 /* Assuming buffer limit has been reached */
134 skb_queue_head(&mux->rx_hold_queue, skb);
135 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
136 return;
140 while (!list_empty(&mux->psocks_ready)) {
141 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
142 psock_ready_list);
144 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
145 /* Assuming buffer limit has been reached */
146 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
147 return;
150 /* Consumed the ready message on the psock. Schedule rx_work to
151 * get more messages.
153 list_del(&psock->psock_ready_list);
154 psock->ready_rx_msg = NULL;
155 /* Commit clearing of ready_rx_msg for queuing work */
156 smp_mb();
158 strp_unpause(&psock->strp);
159 strp_check_rcv(&psock->strp);
162 /* Buffer limit is okay now, add to ready list */
163 list_add_tail(&kcm->wait_rx_list,
164 &kcm->mux->kcm_rx_waiters);
165 kcm->rx_wait = true;
168 static void kcm_rfree(struct sk_buff *skb)
170 struct sock *sk = skb->sk;
171 struct kcm_sock *kcm = kcm_sk(sk);
172 struct kcm_mux *mux = kcm->mux;
173 unsigned int len = skb->truesize;
175 sk_mem_uncharge(sk, len);
176 atomic_sub(len, &sk->sk_rmem_alloc);
178 /* For reading rx_wait and rx_psock without holding lock */
179 smp_mb__after_atomic();
181 if (!kcm->rx_wait && !kcm->rx_psock &&
182 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
183 spin_lock_bh(&mux->rx_lock);
184 kcm_rcv_ready(kcm);
185 spin_unlock_bh(&mux->rx_lock);
189 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
191 struct sk_buff_head *list = &sk->sk_receive_queue;
193 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
194 return -ENOMEM;
196 if (!sk_rmem_schedule(sk, skb, skb->truesize))
197 return -ENOBUFS;
199 skb->dev = NULL;
201 skb_orphan(skb);
202 skb->sk = sk;
203 skb->destructor = kcm_rfree;
204 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
205 sk_mem_charge(sk, skb->truesize);
207 skb_queue_tail(list, skb);
209 if (!sock_flag(sk, SOCK_DEAD))
210 sk->sk_data_ready(sk);
212 return 0;
215 /* Requeue received messages for a kcm socket to other kcm sockets. This is
216 * called with a kcm socket is receive disabled.
217 * RX mux lock held.
219 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
221 struct sk_buff *skb;
222 struct kcm_sock *kcm;
224 while ((skb = __skb_dequeue(head))) {
225 /* Reset destructor to avoid calling kcm_rcv_ready */
226 skb->destructor = sock_rfree;
227 skb_orphan(skb);
228 try_again:
229 if (list_empty(&mux->kcm_rx_waiters)) {
230 skb_queue_tail(&mux->rx_hold_queue, skb);
231 continue;
234 kcm = list_first_entry(&mux->kcm_rx_waiters,
235 struct kcm_sock, wait_rx_list);
237 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
238 /* Should mean socket buffer full */
239 list_del(&kcm->wait_rx_list);
240 kcm->rx_wait = false;
242 /* Commit rx_wait to read in kcm_free */
243 smp_wmb();
245 goto try_again;
250 /* Lower sock lock held */
251 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
252 struct sk_buff *head)
254 struct kcm_mux *mux = psock->mux;
255 struct kcm_sock *kcm;
257 WARN_ON(psock->ready_rx_msg);
259 if (psock->rx_kcm)
260 return psock->rx_kcm;
262 spin_lock_bh(&mux->rx_lock);
264 if (psock->rx_kcm) {
265 spin_unlock_bh(&mux->rx_lock);
266 return psock->rx_kcm;
269 kcm_update_rx_mux_stats(mux, psock);
271 if (list_empty(&mux->kcm_rx_waiters)) {
272 psock->ready_rx_msg = head;
273 strp_pause(&psock->strp);
274 list_add_tail(&psock->psock_ready_list,
275 &mux->psocks_ready);
276 spin_unlock_bh(&mux->rx_lock);
277 return NULL;
280 kcm = list_first_entry(&mux->kcm_rx_waiters,
281 struct kcm_sock, wait_rx_list);
282 list_del(&kcm->wait_rx_list);
283 kcm->rx_wait = false;
285 psock->rx_kcm = kcm;
286 kcm->rx_psock = psock;
288 spin_unlock_bh(&mux->rx_lock);
290 return kcm;
293 static void kcm_done(struct kcm_sock *kcm);
295 static void kcm_done_work(struct work_struct *w)
297 kcm_done(container_of(w, struct kcm_sock, done_work));
300 /* Lower sock held */
301 static void unreserve_rx_kcm(struct kcm_psock *psock,
302 bool rcv_ready)
304 struct kcm_sock *kcm = psock->rx_kcm;
305 struct kcm_mux *mux = psock->mux;
307 if (!kcm)
308 return;
310 spin_lock_bh(&mux->rx_lock);
312 psock->rx_kcm = NULL;
313 kcm->rx_psock = NULL;
315 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
316 * kcm_rfree
318 smp_mb();
320 if (unlikely(kcm->done)) {
321 spin_unlock_bh(&mux->rx_lock);
323 /* Need to run kcm_done in a task since we need to qcquire
324 * callback locks which may already be held here.
326 INIT_WORK(&kcm->done_work, kcm_done_work);
327 schedule_work(&kcm->done_work);
328 return;
331 if (unlikely(kcm->rx_disabled)) {
332 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
333 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
334 /* Check for degenerative race with rx_wait that all
335 * data was dequeued (accounted for in kcm_rfree).
337 kcm_rcv_ready(kcm);
339 spin_unlock_bh(&mux->rx_lock);
342 /* Lower sock lock held */
343 static void psock_data_ready(struct sock *sk)
345 struct kcm_psock *psock;
347 read_lock_bh(&sk->sk_callback_lock);
349 psock = (struct kcm_psock *)sk->sk_user_data;
350 if (likely(psock))
351 strp_data_ready(&psock->strp);
353 read_unlock_bh(&sk->sk_callback_lock);
356 /* Called with lower sock held */
357 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
359 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
360 struct kcm_sock *kcm;
362 try_queue:
363 kcm = reserve_rx_kcm(psock, skb);
364 if (!kcm) {
365 /* Unable to reserve a KCM, message is held in psock and strp
366 * is paused.
368 return;
371 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
372 /* Should mean socket buffer full */
373 unreserve_rx_kcm(psock, false);
374 goto try_queue;
378 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
380 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
381 struct bpf_prog *prog = psock->bpf_prog;
383 return (*prog->bpf_func)(skb, prog->insnsi);
386 static int kcm_read_sock_done(struct strparser *strp, int err)
388 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
390 unreserve_rx_kcm(psock, true);
392 return err;
395 static void psock_state_change(struct sock *sk)
397 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
398 * since application will normally not poll with POLLIN
399 * on the TCP sockets.
402 report_csk_error(sk, EPIPE);
405 static void psock_write_space(struct sock *sk)
407 struct kcm_psock *psock;
408 struct kcm_mux *mux;
409 struct kcm_sock *kcm;
411 read_lock_bh(&sk->sk_callback_lock);
413 psock = (struct kcm_psock *)sk->sk_user_data;
414 if (unlikely(!psock))
415 goto out;
416 mux = psock->mux;
418 spin_lock_bh(&mux->lock);
420 /* Check if the socket is reserved so someone is waiting for sending. */
421 kcm = psock->tx_kcm;
422 if (kcm && !unlikely(kcm->tx_stopped))
423 queue_work(kcm_wq, &kcm->tx_work);
425 spin_unlock_bh(&mux->lock);
426 out:
427 read_unlock_bh(&sk->sk_callback_lock);
430 static void unreserve_psock(struct kcm_sock *kcm);
432 /* kcm sock is locked. */
433 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
435 struct kcm_mux *mux = kcm->mux;
436 struct kcm_psock *psock;
438 psock = kcm->tx_psock;
440 smp_rmb(); /* Must read tx_psock before tx_wait */
442 if (psock) {
443 WARN_ON(kcm->tx_wait);
444 if (unlikely(psock->tx_stopped))
445 unreserve_psock(kcm);
446 else
447 return kcm->tx_psock;
450 spin_lock_bh(&mux->lock);
452 /* Check again under lock to see if psock was reserved for this
453 * psock via psock_unreserve.
455 psock = kcm->tx_psock;
456 if (unlikely(psock)) {
457 WARN_ON(kcm->tx_wait);
458 spin_unlock_bh(&mux->lock);
459 return kcm->tx_psock;
462 if (!list_empty(&mux->psocks_avail)) {
463 psock = list_first_entry(&mux->psocks_avail,
464 struct kcm_psock,
465 psock_avail_list);
466 list_del(&psock->psock_avail_list);
467 if (kcm->tx_wait) {
468 list_del(&kcm->wait_psock_list);
469 kcm->tx_wait = false;
471 kcm->tx_psock = psock;
472 psock->tx_kcm = kcm;
473 KCM_STATS_INCR(psock->stats.reserved);
474 } else if (!kcm->tx_wait) {
475 list_add_tail(&kcm->wait_psock_list,
476 &mux->kcm_tx_waiters);
477 kcm->tx_wait = true;
480 spin_unlock_bh(&mux->lock);
482 return psock;
485 /* mux lock held */
486 static void psock_now_avail(struct kcm_psock *psock)
488 struct kcm_mux *mux = psock->mux;
489 struct kcm_sock *kcm;
491 if (list_empty(&mux->kcm_tx_waiters)) {
492 list_add_tail(&psock->psock_avail_list,
493 &mux->psocks_avail);
494 } else {
495 kcm = list_first_entry(&mux->kcm_tx_waiters,
496 struct kcm_sock,
497 wait_psock_list);
498 list_del(&kcm->wait_psock_list);
499 kcm->tx_wait = false;
500 psock->tx_kcm = kcm;
502 /* Commit before changing tx_psock since that is read in
503 * reserve_psock before queuing work.
505 smp_mb();
507 kcm->tx_psock = psock;
508 KCM_STATS_INCR(psock->stats.reserved);
509 queue_work(kcm_wq, &kcm->tx_work);
513 /* kcm sock is locked. */
514 static void unreserve_psock(struct kcm_sock *kcm)
516 struct kcm_psock *psock;
517 struct kcm_mux *mux = kcm->mux;
519 spin_lock_bh(&mux->lock);
521 psock = kcm->tx_psock;
523 if (WARN_ON(!psock)) {
524 spin_unlock_bh(&mux->lock);
525 return;
528 smp_rmb(); /* Read tx_psock before tx_wait */
530 kcm_update_tx_mux_stats(mux, psock);
532 WARN_ON(kcm->tx_wait);
534 kcm->tx_psock = NULL;
535 psock->tx_kcm = NULL;
536 KCM_STATS_INCR(psock->stats.unreserved);
538 if (unlikely(psock->tx_stopped)) {
539 if (psock->done) {
540 /* Deferred free */
541 list_del(&psock->psock_list);
542 mux->psocks_cnt--;
543 sock_put(psock->sk);
544 fput(psock->sk->sk_socket->file);
545 kmem_cache_free(kcm_psockp, psock);
548 /* Don't put back on available list */
550 spin_unlock_bh(&mux->lock);
552 return;
555 psock_now_avail(psock);
557 spin_unlock_bh(&mux->lock);
560 static void kcm_report_tx_retry(struct kcm_sock *kcm)
562 struct kcm_mux *mux = kcm->mux;
564 spin_lock_bh(&mux->lock);
565 KCM_STATS_INCR(mux->stats.tx_retries);
566 spin_unlock_bh(&mux->lock);
569 /* Write any messages ready on the kcm socket. Called with kcm sock lock
570 * held. Return bytes actually sent or error.
572 static int kcm_write_msgs(struct kcm_sock *kcm)
574 struct sock *sk = &kcm->sk;
575 struct kcm_psock *psock;
576 struct sk_buff *skb, *head;
577 struct kcm_tx_msg *txm;
578 unsigned short fragidx, frag_offset;
579 unsigned int sent, total_sent = 0;
580 int ret = 0;
582 kcm->tx_wait_more = false;
583 psock = kcm->tx_psock;
584 if (unlikely(psock && psock->tx_stopped)) {
585 /* A reserved psock was aborted asynchronously. Unreserve
586 * it and we'll retry the message.
588 unreserve_psock(kcm);
589 kcm_report_tx_retry(kcm);
590 if (skb_queue_empty(&sk->sk_write_queue))
591 return 0;
593 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
595 } else if (skb_queue_empty(&sk->sk_write_queue)) {
596 return 0;
599 head = skb_peek(&sk->sk_write_queue);
600 txm = kcm_tx_msg(head);
602 if (txm->sent) {
603 /* Send of first skbuff in queue already in progress */
604 if (WARN_ON(!psock)) {
605 ret = -EINVAL;
606 goto out;
608 sent = txm->sent;
609 frag_offset = txm->frag_offset;
610 fragidx = txm->fragidx;
611 skb = txm->frag_skb;
613 goto do_frag;
616 try_again:
617 psock = reserve_psock(kcm);
618 if (!psock)
619 goto out;
621 do {
622 skb = head;
623 txm = kcm_tx_msg(head);
624 sent = 0;
626 do_frag_list:
627 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
628 ret = -EINVAL;
629 goto out;
632 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
633 fragidx++) {
634 skb_frag_t *frag;
636 frag_offset = 0;
637 do_frag:
638 frag = &skb_shinfo(skb)->frags[fragidx];
639 if (WARN_ON(!frag->size)) {
640 ret = -EINVAL;
641 goto out;
644 ret = kernel_sendpage(psock->sk->sk_socket,
645 frag->page.p,
646 frag->page_offset + frag_offset,
647 frag->size - frag_offset,
648 MSG_DONTWAIT);
649 if (ret <= 0) {
650 if (ret == -EAGAIN) {
651 /* Save state to try again when there's
652 * write space on the socket
654 txm->sent = sent;
655 txm->frag_offset = frag_offset;
656 txm->fragidx = fragidx;
657 txm->frag_skb = skb;
659 ret = 0;
660 goto out;
663 /* Hard failure in sending message, abort this
664 * psock since it has lost framing
665 * synchonization and retry sending the
666 * message from the beginning.
668 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
669 true);
670 unreserve_psock(kcm);
672 txm->sent = 0;
673 kcm_report_tx_retry(kcm);
674 ret = 0;
676 goto try_again;
679 sent += ret;
680 frag_offset += ret;
681 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
682 if (frag_offset < frag->size) {
683 /* Not finished with this frag */
684 goto do_frag;
688 if (skb == head) {
689 if (skb_has_frag_list(skb)) {
690 skb = skb_shinfo(skb)->frag_list;
691 goto do_frag_list;
693 } else if (skb->next) {
694 skb = skb->next;
695 goto do_frag_list;
698 /* Successfully sent the whole packet, account for it. */
699 skb_dequeue(&sk->sk_write_queue);
700 kfree_skb(head);
701 sk->sk_wmem_queued -= sent;
702 total_sent += sent;
703 KCM_STATS_INCR(psock->stats.tx_msgs);
704 } while ((head = skb_peek(&sk->sk_write_queue)));
705 out:
706 if (!head) {
707 /* Done with all queued messages. */
708 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
709 unreserve_psock(kcm);
712 /* Check if write space is available */
713 sk->sk_write_space(sk);
715 return total_sent ? : ret;
718 static void kcm_tx_work(struct work_struct *w)
720 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
721 struct sock *sk = &kcm->sk;
722 int err;
724 lock_sock(sk);
726 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
727 * aborts
729 err = kcm_write_msgs(kcm);
730 if (err < 0) {
731 /* Hard failure in write, report error on KCM socket */
732 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
733 report_csk_error(&kcm->sk, -err);
734 goto out;
737 /* Primarily for SOCK_SEQPACKET sockets */
738 if (likely(sk->sk_socket) &&
739 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
740 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
741 sk->sk_write_space(sk);
744 out:
745 release_sock(sk);
748 static void kcm_push(struct kcm_sock *kcm)
750 if (kcm->tx_wait_more)
751 kcm_write_msgs(kcm);
754 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
755 int offset, size_t size, int flags)
758 struct sock *sk = sock->sk;
759 struct kcm_sock *kcm = kcm_sk(sk);
760 struct sk_buff *skb = NULL, *head = NULL;
761 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
762 bool eor;
763 int err = 0;
764 int i;
766 if (flags & MSG_SENDPAGE_NOTLAST)
767 flags |= MSG_MORE;
769 /* No MSG_EOR from splice, only look at MSG_MORE */
770 eor = !(flags & MSG_MORE);
772 lock_sock(sk);
774 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
776 err = -EPIPE;
777 if (sk->sk_err)
778 goto out_error;
780 if (kcm->seq_skb) {
781 /* Previously opened message */
782 head = kcm->seq_skb;
783 skb = kcm_tx_msg(head)->last_skb;
784 i = skb_shinfo(skb)->nr_frags;
786 if (skb_can_coalesce(skb, i, page, offset)) {
787 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
788 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
789 goto coalesced;
792 if (i >= MAX_SKB_FRAGS) {
793 struct sk_buff *tskb;
795 tskb = alloc_skb(0, sk->sk_allocation);
796 while (!tskb) {
797 kcm_push(kcm);
798 err = sk_stream_wait_memory(sk, &timeo);
799 if (err)
800 goto out_error;
803 if (head == skb)
804 skb_shinfo(head)->frag_list = tskb;
805 else
806 skb->next = tskb;
808 skb = tskb;
809 skb->ip_summed = CHECKSUM_UNNECESSARY;
810 i = 0;
812 } else {
813 /* Call the sk_stream functions to manage the sndbuf mem. */
814 if (!sk_stream_memory_free(sk)) {
815 kcm_push(kcm);
816 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
817 err = sk_stream_wait_memory(sk, &timeo);
818 if (err)
819 goto out_error;
822 head = alloc_skb(0, sk->sk_allocation);
823 while (!head) {
824 kcm_push(kcm);
825 err = sk_stream_wait_memory(sk, &timeo);
826 if (err)
827 goto out_error;
830 skb = head;
831 i = 0;
834 get_page(page);
835 skb_fill_page_desc(skb, i, page, offset, size);
836 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
838 coalesced:
839 skb->len += size;
840 skb->data_len += size;
841 skb->truesize += size;
842 sk->sk_wmem_queued += size;
843 sk_mem_charge(sk, size);
845 if (head != skb) {
846 head->len += size;
847 head->data_len += size;
848 head->truesize += size;
851 if (eor) {
852 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
854 /* Message complete, queue it on send buffer */
855 __skb_queue_tail(&sk->sk_write_queue, head);
856 kcm->seq_skb = NULL;
857 KCM_STATS_INCR(kcm->stats.tx_msgs);
859 if (flags & MSG_BATCH) {
860 kcm->tx_wait_more = true;
861 } else if (kcm->tx_wait_more || not_busy) {
862 err = kcm_write_msgs(kcm);
863 if (err < 0) {
864 /* We got a hard error in write_msgs but have
865 * already queued this message. Report an error
866 * in the socket, but don't affect return value
867 * from sendmsg
869 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
870 report_csk_error(&kcm->sk, -err);
873 } else {
874 /* Message not complete, save state */
875 kcm->seq_skb = head;
876 kcm_tx_msg(head)->last_skb = skb;
879 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
881 release_sock(sk);
882 return size;
884 out_error:
885 kcm_push(kcm);
887 err = sk_stream_error(sk, flags, err);
889 /* make sure we wake any epoll edge trigger waiter */
890 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
891 sk->sk_write_space(sk);
893 release_sock(sk);
894 return err;
897 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
899 struct sock *sk = sock->sk;
900 struct kcm_sock *kcm = kcm_sk(sk);
901 struct sk_buff *skb = NULL, *head = NULL;
902 size_t copy, copied = 0;
903 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
904 int eor = (sock->type == SOCK_DGRAM) ?
905 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
906 int err = -EPIPE;
908 lock_sock(sk);
910 /* Per tcp_sendmsg this should be in poll */
911 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
913 if (sk->sk_err)
914 goto out_error;
916 if (kcm->seq_skb) {
917 /* Previously opened message */
918 head = kcm->seq_skb;
919 skb = kcm_tx_msg(head)->last_skb;
920 goto start;
923 /* Call the sk_stream functions to manage the sndbuf mem. */
924 if (!sk_stream_memory_free(sk)) {
925 kcm_push(kcm);
926 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
927 err = sk_stream_wait_memory(sk, &timeo);
928 if (err)
929 goto out_error;
932 if (msg_data_left(msg)) {
933 /* New message, alloc head skb */
934 head = alloc_skb(0, sk->sk_allocation);
935 while (!head) {
936 kcm_push(kcm);
937 err = sk_stream_wait_memory(sk, &timeo);
938 if (err)
939 goto out_error;
941 head = alloc_skb(0, sk->sk_allocation);
944 skb = head;
946 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
947 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
949 skb->ip_summed = CHECKSUM_UNNECESSARY;
952 start:
953 while (msg_data_left(msg)) {
954 bool merge = true;
955 int i = skb_shinfo(skb)->nr_frags;
956 struct page_frag *pfrag = sk_page_frag(sk);
958 if (!sk_page_frag_refill(sk, pfrag))
959 goto wait_for_memory;
961 if (!skb_can_coalesce(skb, i, pfrag->page,
962 pfrag->offset)) {
963 if (i == MAX_SKB_FRAGS) {
964 struct sk_buff *tskb;
966 tskb = alloc_skb(0, sk->sk_allocation);
967 if (!tskb)
968 goto wait_for_memory;
970 if (head == skb)
971 skb_shinfo(head)->frag_list = tskb;
972 else
973 skb->next = tskb;
975 skb = tskb;
976 skb->ip_summed = CHECKSUM_UNNECESSARY;
977 continue;
979 merge = false;
982 copy = min_t(int, msg_data_left(msg),
983 pfrag->size - pfrag->offset);
985 if (!sk_wmem_schedule(sk, copy))
986 goto wait_for_memory;
988 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
989 pfrag->page,
990 pfrag->offset,
991 copy);
992 if (err)
993 goto out_error;
995 /* Update the skb. */
996 if (merge) {
997 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
998 } else {
999 skb_fill_page_desc(skb, i, pfrag->page,
1000 pfrag->offset, copy);
1001 get_page(pfrag->page);
1004 pfrag->offset += copy;
1005 copied += copy;
1006 if (head != skb) {
1007 head->len += copy;
1008 head->data_len += copy;
1011 continue;
1013 wait_for_memory:
1014 kcm_push(kcm);
1015 err = sk_stream_wait_memory(sk, &timeo);
1016 if (err)
1017 goto out_error;
1020 if (eor) {
1021 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1023 if (head) {
1024 /* Message complete, queue it on send buffer */
1025 __skb_queue_tail(&sk->sk_write_queue, head);
1026 kcm->seq_skb = NULL;
1027 KCM_STATS_INCR(kcm->stats.tx_msgs);
1030 if (msg->msg_flags & MSG_BATCH) {
1031 kcm->tx_wait_more = true;
1032 } else if (kcm->tx_wait_more || not_busy) {
1033 err = kcm_write_msgs(kcm);
1034 if (err < 0) {
1035 /* We got a hard error in write_msgs but have
1036 * already queued this message. Report an error
1037 * in the socket, but don't affect return value
1038 * from sendmsg
1040 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1041 report_csk_error(&kcm->sk, -err);
1044 } else {
1045 /* Message not complete, save state */
1046 partial_message:
1047 if (head) {
1048 kcm->seq_skb = head;
1049 kcm_tx_msg(head)->last_skb = skb;
1053 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1055 release_sock(sk);
1056 return copied;
1058 out_error:
1059 kcm_push(kcm);
1061 if (copied && sock->type == SOCK_SEQPACKET) {
1062 /* Wrote some bytes before encountering an
1063 * error, return partial success.
1065 goto partial_message;
1068 if (head != kcm->seq_skb)
1069 kfree_skb(head);
1071 err = sk_stream_error(sk, msg->msg_flags, err);
1073 /* make sure we wake any epoll edge trigger waiter */
1074 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1075 sk->sk_write_space(sk);
1077 release_sock(sk);
1078 return err;
1081 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1082 long timeo, int *err)
1084 struct sk_buff *skb;
1086 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1087 if (sk->sk_err) {
1088 *err = sock_error(sk);
1089 return NULL;
1092 if (sock_flag(sk, SOCK_DONE))
1093 return NULL;
1095 if ((flags & MSG_DONTWAIT) || !timeo) {
1096 *err = -EAGAIN;
1097 return NULL;
1100 sk_wait_data(sk, &timeo, NULL);
1102 /* Handle signals */
1103 if (signal_pending(current)) {
1104 *err = sock_intr_errno(timeo);
1105 return NULL;
1109 return skb;
1112 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1113 size_t len, int flags)
1115 struct sock *sk = sock->sk;
1116 struct kcm_sock *kcm = kcm_sk(sk);
1117 int err = 0;
1118 long timeo;
1119 struct strp_rx_msg *rxm;
1120 int copied = 0;
1121 struct sk_buff *skb;
1123 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1125 lock_sock(sk);
1127 skb = kcm_wait_data(sk, flags, timeo, &err);
1128 if (!skb)
1129 goto out;
1131 /* Okay, have a message on the receive queue */
1133 rxm = strp_rx_msg(skb);
1135 if (len > rxm->full_len)
1136 len = rxm->full_len;
1138 err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1139 if (err < 0)
1140 goto out;
1142 copied = len;
1143 if (likely(!(flags & MSG_PEEK))) {
1144 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1145 if (copied < rxm->full_len) {
1146 if (sock->type == SOCK_DGRAM) {
1147 /* Truncated message */
1148 msg->msg_flags |= MSG_TRUNC;
1149 goto msg_finished;
1151 rxm->offset += copied;
1152 rxm->full_len -= copied;
1153 } else {
1154 msg_finished:
1155 /* Finished with message */
1156 msg->msg_flags |= MSG_EOR;
1157 KCM_STATS_INCR(kcm->stats.rx_msgs);
1158 skb_unlink(skb, &sk->sk_receive_queue);
1159 kfree_skb(skb);
1163 out:
1164 release_sock(sk);
1166 return copied ? : err;
1169 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1170 struct pipe_inode_info *pipe, size_t len,
1171 unsigned int flags)
1173 struct sock *sk = sock->sk;
1174 struct kcm_sock *kcm = kcm_sk(sk);
1175 long timeo;
1176 struct strp_rx_msg *rxm;
1177 int err = 0;
1178 ssize_t copied;
1179 struct sk_buff *skb;
1181 /* Only support splice for SOCKSEQPACKET */
1183 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1185 lock_sock(sk);
1187 skb = kcm_wait_data(sk, flags, timeo, &err);
1188 if (!skb)
1189 goto err_out;
1191 /* Okay, have a message on the receive queue */
1193 rxm = strp_rx_msg(skb);
1195 if (len > rxm->full_len)
1196 len = rxm->full_len;
1198 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags);
1199 if (copied < 0) {
1200 err = copied;
1201 goto err_out;
1204 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1206 rxm->offset += copied;
1207 rxm->full_len -= copied;
1209 /* We have no way to return MSG_EOR. If all the bytes have been
1210 * read we still leave the message in the receive socket buffer.
1211 * A subsequent recvmsg needs to be done to return MSG_EOR and
1212 * finish reading the message.
1215 release_sock(sk);
1217 return copied;
1219 err_out:
1220 release_sock(sk);
1222 return err;
1225 /* kcm sock lock held */
1226 static void kcm_recv_disable(struct kcm_sock *kcm)
1228 struct kcm_mux *mux = kcm->mux;
1230 if (kcm->rx_disabled)
1231 return;
1233 spin_lock_bh(&mux->rx_lock);
1235 kcm->rx_disabled = 1;
1237 /* If a psock is reserved we'll do cleanup in unreserve */
1238 if (!kcm->rx_psock) {
1239 if (kcm->rx_wait) {
1240 list_del(&kcm->wait_rx_list);
1241 kcm->rx_wait = false;
1244 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1247 spin_unlock_bh(&mux->rx_lock);
1250 /* kcm sock lock held */
1251 static void kcm_recv_enable(struct kcm_sock *kcm)
1253 struct kcm_mux *mux = kcm->mux;
1255 if (!kcm->rx_disabled)
1256 return;
1258 spin_lock_bh(&mux->rx_lock);
1260 kcm->rx_disabled = 0;
1261 kcm_rcv_ready(kcm);
1263 spin_unlock_bh(&mux->rx_lock);
1266 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1267 char __user *optval, unsigned int optlen)
1269 struct kcm_sock *kcm = kcm_sk(sock->sk);
1270 int val, valbool;
1271 int err = 0;
1273 if (level != SOL_KCM)
1274 return -ENOPROTOOPT;
1276 if (optlen < sizeof(int))
1277 return -EINVAL;
1279 if (get_user(val, (int __user *)optval))
1280 return -EINVAL;
1282 valbool = val ? 1 : 0;
1284 switch (optname) {
1285 case KCM_RECV_DISABLE:
1286 lock_sock(&kcm->sk);
1287 if (valbool)
1288 kcm_recv_disable(kcm);
1289 else
1290 kcm_recv_enable(kcm);
1291 release_sock(&kcm->sk);
1292 break;
1293 default:
1294 err = -ENOPROTOOPT;
1297 return err;
1300 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1301 char __user *optval, int __user *optlen)
1303 struct kcm_sock *kcm = kcm_sk(sock->sk);
1304 int val, len;
1306 if (level != SOL_KCM)
1307 return -ENOPROTOOPT;
1309 if (get_user(len, optlen))
1310 return -EFAULT;
1312 len = min_t(unsigned int, len, sizeof(int));
1313 if (len < 0)
1314 return -EINVAL;
1316 switch (optname) {
1317 case KCM_RECV_DISABLE:
1318 val = kcm->rx_disabled;
1319 break;
1320 default:
1321 return -ENOPROTOOPT;
1324 if (put_user(len, optlen))
1325 return -EFAULT;
1326 if (copy_to_user(optval, &val, len))
1327 return -EFAULT;
1328 return 0;
1331 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1333 struct kcm_sock *tkcm;
1334 struct list_head *head;
1335 int index = 0;
1337 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1338 * we set sk_state, otherwise epoll_wait always returns right away with
1339 * POLLHUP
1341 kcm->sk.sk_state = TCP_ESTABLISHED;
1343 /* Add to mux's kcm sockets list */
1344 kcm->mux = mux;
1345 spin_lock_bh(&mux->lock);
1347 head = &mux->kcm_socks;
1348 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1349 if (tkcm->index != index)
1350 break;
1351 head = &tkcm->kcm_sock_list;
1352 index++;
1355 list_add(&kcm->kcm_sock_list, head);
1356 kcm->index = index;
1358 mux->kcm_socks_cnt++;
1359 spin_unlock_bh(&mux->lock);
1361 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1363 spin_lock_bh(&mux->rx_lock);
1364 kcm_rcv_ready(kcm);
1365 spin_unlock_bh(&mux->rx_lock);
1368 static int kcm_attach(struct socket *sock, struct socket *csock,
1369 struct bpf_prog *prog)
1371 struct kcm_sock *kcm = kcm_sk(sock->sk);
1372 struct kcm_mux *mux = kcm->mux;
1373 struct sock *csk;
1374 struct kcm_psock *psock = NULL, *tpsock;
1375 struct list_head *head;
1376 int index = 0;
1377 struct strp_callbacks cb;
1378 int err = 0;
1380 csk = csock->sk;
1381 if (!csk)
1382 return -EINVAL;
1384 lock_sock(csk);
1386 /* Only allow TCP sockets to be attached for now */
1387 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1388 csk->sk_protocol != IPPROTO_TCP) {
1389 err = -EOPNOTSUPP;
1390 goto out;
1393 /* Don't allow listeners or closed sockets */
1394 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1395 err = -EOPNOTSUPP;
1396 goto out;
1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1400 if (!psock) {
1401 err = -ENOMEM;
1402 goto out;
1405 psock->mux = mux;
1406 psock->sk = csk;
1407 psock->bpf_prog = prog;
1409 cb.rcv_msg = kcm_rcv_strparser;
1410 cb.abort_parser = NULL;
1411 cb.parse_msg = kcm_parse_func_strparser;
1412 cb.read_sock_done = kcm_read_sock_done;
1414 err = strp_init(&psock->strp, csk, &cb);
1415 if (err) {
1416 kmem_cache_free(kcm_psockp, psock);
1417 goto out;
1420 write_lock_bh(&csk->sk_callback_lock);
1422 /* Check if sk_user_data is aready by KCM or someone else.
1423 * Must be done under lock to prevent race conditions.
1425 if (csk->sk_user_data) {
1426 write_unlock_bh(&csk->sk_callback_lock);
1427 strp_stop(&psock->strp);
1428 strp_done(&psock->strp);
1429 kmem_cache_free(kcm_psockp, psock);
1430 err = -EALREADY;
1431 goto out;
1434 psock->save_data_ready = csk->sk_data_ready;
1435 psock->save_write_space = csk->sk_write_space;
1436 psock->save_state_change = csk->sk_state_change;
1437 csk->sk_user_data = psock;
1438 csk->sk_data_ready = psock_data_ready;
1439 csk->sk_write_space = psock_write_space;
1440 csk->sk_state_change = psock_state_change;
1442 write_unlock_bh(&csk->sk_callback_lock);
1444 sock_hold(csk);
1446 /* Finished initialization, now add the psock to the MUX. */
1447 spin_lock_bh(&mux->lock);
1448 head = &mux->psocks;
1449 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1450 if (tpsock->index != index)
1451 break;
1452 head = &tpsock->psock_list;
1453 index++;
1456 list_add(&psock->psock_list, head);
1457 psock->index = index;
1459 KCM_STATS_INCR(mux->stats.psock_attach);
1460 mux->psocks_cnt++;
1461 psock_now_avail(psock);
1462 spin_unlock_bh(&mux->lock);
1464 /* Schedule RX work in case there are already bytes queued */
1465 strp_check_rcv(&psock->strp);
1467 out:
1468 release_sock(csk);
1470 return err;
1473 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1475 struct socket *csock;
1476 struct bpf_prog *prog;
1477 int err;
1479 csock = sockfd_lookup(info->fd, &err);
1480 if (!csock)
1481 return -ENOENT;
1483 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1484 if (IS_ERR(prog)) {
1485 err = PTR_ERR(prog);
1486 goto out;
1489 err = kcm_attach(sock, csock, prog);
1490 if (err) {
1491 bpf_prog_put(prog);
1492 goto out;
1495 /* Keep reference on file also */
1497 return 0;
1498 out:
1499 fput(csock->file);
1500 return err;
1503 static void kcm_unattach(struct kcm_psock *psock)
1505 struct sock *csk = psock->sk;
1506 struct kcm_mux *mux = psock->mux;
1508 lock_sock(csk);
1510 /* Stop getting callbacks from TCP socket. After this there should
1511 * be no way to reserve a kcm for this psock.
1513 write_lock_bh(&csk->sk_callback_lock);
1514 csk->sk_user_data = NULL;
1515 csk->sk_data_ready = psock->save_data_ready;
1516 csk->sk_write_space = psock->save_write_space;
1517 csk->sk_state_change = psock->save_state_change;
1518 strp_stop(&psock->strp);
1520 if (WARN_ON(psock->rx_kcm)) {
1521 write_unlock_bh(&csk->sk_callback_lock);
1522 release_sock(csk);
1523 return;
1526 spin_lock_bh(&mux->rx_lock);
1528 /* Stop receiver activities. After this point psock should not be
1529 * able to get onto ready list either through callbacks or work.
1531 if (psock->ready_rx_msg) {
1532 list_del(&psock->psock_ready_list);
1533 kfree_skb(psock->ready_rx_msg);
1534 psock->ready_rx_msg = NULL;
1535 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1538 spin_unlock_bh(&mux->rx_lock);
1540 write_unlock_bh(&csk->sk_callback_lock);
1542 /* Call strp_done without sock lock */
1543 release_sock(csk);
1544 strp_done(&psock->strp);
1545 lock_sock(csk);
1547 bpf_prog_put(psock->bpf_prog);
1549 spin_lock_bh(&mux->lock);
1551 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1552 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1554 KCM_STATS_INCR(mux->stats.psock_unattach);
1556 if (psock->tx_kcm) {
1557 /* psock was reserved. Just mark it finished and we will clean
1558 * up in the kcm paths, we need kcm lock which can not be
1559 * acquired here.
1561 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1562 spin_unlock_bh(&mux->lock);
1564 /* We are unattaching a socket that is reserved. Abort the
1565 * socket since we may be out of sync in sending on it. We need
1566 * to do this without the mux lock.
1568 kcm_abort_tx_psock(psock, EPIPE, false);
1570 spin_lock_bh(&mux->lock);
1571 if (!psock->tx_kcm) {
1572 /* psock now unreserved in window mux was unlocked */
1573 goto no_reserved;
1575 psock->done = 1;
1577 /* Commit done before queuing work to process it */
1578 smp_mb();
1580 /* Queue tx work to make sure psock->done is handled */
1581 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1582 spin_unlock_bh(&mux->lock);
1583 } else {
1584 no_reserved:
1585 if (!psock->tx_stopped)
1586 list_del(&psock->psock_avail_list);
1587 list_del(&psock->psock_list);
1588 mux->psocks_cnt--;
1589 spin_unlock_bh(&mux->lock);
1591 sock_put(csk);
1592 fput(csk->sk_socket->file);
1593 kmem_cache_free(kcm_psockp, psock);
1596 release_sock(csk);
1599 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1601 struct kcm_sock *kcm = kcm_sk(sock->sk);
1602 struct kcm_mux *mux = kcm->mux;
1603 struct kcm_psock *psock;
1604 struct socket *csock;
1605 struct sock *csk;
1606 int err;
1608 csock = sockfd_lookup(info->fd, &err);
1609 if (!csock)
1610 return -ENOENT;
1612 csk = csock->sk;
1613 if (!csk) {
1614 err = -EINVAL;
1615 goto out;
1618 err = -ENOENT;
1620 spin_lock_bh(&mux->lock);
1622 list_for_each_entry(psock, &mux->psocks, psock_list) {
1623 if (psock->sk != csk)
1624 continue;
1626 /* Found the matching psock */
1628 if (psock->unattaching || WARN_ON(psock->done)) {
1629 err = -EALREADY;
1630 break;
1633 psock->unattaching = 1;
1635 spin_unlock_bh(&mux->lock);
1637 /* Lower socket lock should already be held */
1638 kcm_unattach(psock);
1640 err = 0;
1641 goto out;
1644 spin_unlock_bh(&mux->lock);
1646 out:
1647 fput(csock->file);
1648 return err;
1651 static struct proto kcm_proto = {
1652 .name = "KCM",
1653 .owner = THIS_MODULE,
1654 .obj_size = sizeof(struct kcm_sock),
1657 /* Clone a kcm socket. */
1658 static struct file *kcm_clone(struct socket *osock)
1660 struct socket *newsock;
1661 struct sock *newsk;
1662 struct file *file;
1664 newsock = sock_alloc();
1665 if (!newsock)
1666 return ERR_PTR(-ENFILE);
1668 newsock->type = osock->type;
1669 newsock->ops = osock->ops;
1671 __module_get(newsock->ops->owner);
1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1674 &kcm_proto, false);
1675 if (!newsk) {
1676 sock_release(newsock);
1677 return ERR_PTR(-ENOMEM);
1679 sock_init_data(newsock, newsk);
1680 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1682 file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1683 if (IS_ERR(file))
1684 sock_release(newsock);
1686 return file;
1689 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1691 int err;
1693 switch (cmd) {
1694 case SIOCKCMATTACH: {
1695 struct kcm_attach info;
1697 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1698 return -EFAULT;
1700 err = kcm_attach_ioctl(sock, &info);
1702 break;
1704 case SIOCKCMUNATTACH: {
1705 struct kcm_unattach info;
1707 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1708 return -EFAULT;
1710 err = kcm_unattach_ioctl(sock, &info);
1712 break;
1714 case SIOCKCMCLONE: {
1715 struct kcm_clone info;
1716 struct file *file;
1718 info.fd = get_unused_fd_flags(0);
1719 if (unlikely(info.fd < 0))
1720 return info.fd;
1722 file = kcm_clone(sock);
1723 if (IS_ERR(file)) {
1724 put_unused_fd(info.fd);
1725 return PTR_ERR(file);
1727 if (copy_to_user((void __user *)arg, &info,
1728 sizeof(info))) {
1729 put_unused_fd(info.fd);
1730 fput(file);
1731 return -EFAULT;
1733 fd_install(info.fd, file);
1734 err = 0;
1735 break;
1737 default:
1738 err = -ENOIOCTLCMD;
1739 break;
1742 return err;
1745 static void free_mux(struct rcu_head *rcu)
1747 struct kcm_mux *mux = container_of(rcu,
1748 struct kcm_mux, rcu);
1750 kmem_cache_free(kcm_muxp, mux);
1753 static void release_mux(struct kcm_mux *mux)
1755 struct kcm_net *knet = mux->knet;
1756 struct kcm_psock *psock, *tmp_psock;
1758 /* Release psocks */
1759 list_for_each_entry_safe(psock, tmp_psock,
1760 &mux->psocks, psock_list) {
1761 if (!WARN_ON(psock->unattaching))
1762 kcm_unattach(psock);
1765 if (WARN_ON(mux->psocks_cnt))
1766 return;
1768 __skb_queue_purge(&mux->rx_hold_queue);
1770 mutex_lock(&knet->mutex);
1771 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1772 aggregate_psock_stats(&mux->aggregate_psock_stats,
1773 &knet->aggregate_psock_stats);
1774 aggregate_strp_stats(&mux->aggregate_strp_stats,
1775 &knet->aggregate_strp_stats);
1776 list_del_rcu(&mux->kcm_mux_list);
1777 knet->count--;
1778 mutex_unlock(&knet->mutex);
1780 call_rcu(&mux->rcu, free_mux);
1783 static void kcm_done(struct kcm_sock *kcm)
1785 struct kcm_mux *mux = kcm->mux;
1786 struct sock *sk = &kcm->sk;
1787 int socks_cnt;
1789 spin_lock_bh(&mux->rx_lock);
1790 if (kcm->rx_psock) {
1791 /* Cleanup in unreserve_rx_kcm */
1792 WARN_ON(kcm->done);
1793 kcm->rx_disabled = 1;
1794 kcm->done = 1;
1795 spin_unlock_bh(&mux->rx_lock);
1796 return;
1799 if (kcm->rx_wait) {
1800 list_del(&kcm->wait_rx_list);
1801 kcm->rx_wait = false;
1803 /* Move any pending receive messages to other kcm sockets */
1804 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1806 spin_unlock_bh(&mux->rx_lock);
1808 if (WARN_ON(sk_rmem_alloc_get(sk)))
1809 return;
1811 /* Detach from MUX */
1812 spin_lock_bh(&mux->lock);
1814 list_del(&kcm->kcm_sock_list);
1815 mux->kcm_socks_cnt--;
1816 socks_cnt = mux->kcm_socks_cnt;
1818 spin_unlock_bh(&mux->lock);
1820 if (!socks_cnt) {
1821 /* We are done with the mux now. */
1822 release_mux(mux);
1825 WARN_ON(kcm->rx_wait);
1827 sock_put(&kcm->sk);
1830 /* Called by kcm_release to close a KCM socket.
1831 * If this is the last KCM socket on the MUX, destroy the MUX.
1833 static int kcm_release(struct socket *sock)
1835 struct sock *sk = sock->sk;
1836 struct kcm_sock *kcm;
1837 struct kcm_mux *mux;
1838 struct kcm_psock *psock;
1840 if (!sk)
1841 return 0;
1843 kcm = kcm_sk(sk);
1844 mux = kcm->mux;
1846 sock_orphan(sk);
1847 kfree_skb(kcm->seq_skb);
1849 lock_sock(sk);
1850 /* Purge queue under lock to avoid race condition with tx_work trying
1851 * to act when queue is nonempty. If tx_work runs after this point
1852 * it will just return.
1854 __skb_queue_purge(&sk->sk_write_queue);
1856 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1857 * get a writespace callback. This prevents further work being queued
1858 * from the callback (unbinding the psock occurs after canceling work.
1860 kcm->tx_stopped = 1;
1862 release_sock(sk);
1864 spin_lock_bh(&mux->lock);
1865 if (kcm->tx_wait) {
1866 /* Take of tx_wait list, after this point there should be no way
1867 * that a psock will be assigned to this kcm.
1869 list_del(&kcm->wait_psock_list);
1870 kcm->tx_wait = false;
1872 spin_unlock_bh(&mux->lock);
1874 /* Cancel work. After this point there should be no outside references
1875 * to the kcm socket.
1877 cancel_work_sync(&kcm->tx_work);
1879 lock_sock(sk);
1880 psock = kcm->tx_psock;
1881 if (psock) {
1882 /* A psock was reserved, so we need to kill it since it
1883 * may already have some bytes queued from a message. We
1884 * need to do this after removing kcm from tx_wait list.
1886 kcm_abort_tx_psock(psock, EPIPE, false);
1887 unreserve_psock(kcm);
1889 release_sock(sk);
1891 WARN_ON(kcm->tx_wait);
1892 WARN_ON(kcm->tx_psock);
1894 sock->sk = NULL;
1896 kcm_done(kcm);
1898 return 0;
1901 static const struct proto_ops kcm_dgram_ops = {
1902 .family = PF_KCM,
1903 .owner = THIS_MODULE,
1904 .release = kcm_release,
1905 .bind = sock_no_bind,
1906 .connect = sock_no_connect,
1907 .socketpair = sock_no_socketpair,
1908 .accept = sock_no_accept,
1909 .getname = sock_no_getname,
1910 .poll = datagram_poll,
1911 .ioctl = kcm_ioctl,
1912 .listen = sock_no_listen,
1913 .shutdown = sock_no_shutdown,
1914 .setsockopt = kcm_setsockopt,
1915 .getsockopt = kcm_getsockopt,
1916 .sendmsg = kcm_sendmsg,
1917 .recvmsg = kcm_recvmsg,
1918 .mmap = sock_no_mmap,
1919 .sendpage = kcm_sendpage,
1922 static const struct proto_ops kcm_seqpacket_ops = {
1923 .family = PF_KCM,
1924 .owner = THIS_MODULE,
1925 .release = kcm_release,
1926 .bind = sock_no_bind,
1927 .connect = sock_no_connect,
1928 .socketpair = sock_no_socketpair,
1929 .accept = sock_no_accept,
1930 .getname = sock_no_getname,
1931 .poll = datagram_poll,
1932 .ioctl = kcm_ioctl,
1933 .listen = sock_no_listen,
1934 .shutdown = sock_no_shutdown,
1935 .setsockopt = kcm_setsockopt,
1936 .getsockopt = kcm_getsockopt,
1937 .sendmsg = kcm_sendmsg,
1938 .recvmsg = kcm_recvmsg,
1939 .mmap = sock_no_mmap,
1940 .sendpage = kcm_sendpage,
1941 .splice_read = kcm_splice_read,
1944 /* Create proto operation for kcm sockets */
1945 static int kcm_create(struct net *net, struct socket *sock,
1946 int protocol, int kern)
1948 struct kcm_net *knet = net_generic(net, kcm_net_id);
1949 struct sock *sk;
1950 struct kcm_mux *mux;
1952 switch (sock->type) {
1953 case SOCK_DGRAM:
1954 sock->ops = &kcm_dgram_ops;
1955 break;
1956 case SOCK_SEQPACKET:
1957 sock->ops = &kcm_seqpacket_ops;
1958 break;
1959 default:
1960 return -ESOCKTNOSUPPORT;
1963 if (protocol != KCMPROTO_CONNECTED)
1964 return -EPROTONOSUPPORT;
1966 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1967 if (!sk)
1968 return -ENOMEM;
1970 /* Allocate a kcm mux, shared between KCM sockets */
1971 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1972 if (!mux) {
1973 sk_free(sk);
1974 return -ENOMEM;
1977 spin_lock_init(&mux->lock);
1978 spin_lock_init(&mux->rx_lock);
1979 INIT_LIST_HEAD(&mux->kcm_socks);
1980 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1981 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1983 INIT_LIST_HEAD(&mux->psocks);
1984 INIT_LIST_HEAD(&mux->psocks_ready);
1985 INIT_LIST_HEAD(&mux->psocks_avail);
1987 mux->knet = knet;
1989 /* Add new MUX to list */
1990 mutex_lock(&knet->mutex);
1991 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1992 knet->count++;
1993 mutex_unlock(&knet->mutex);
1995 skb_queue_head_init(&mux->rx_hold_queue);
1997 /* Init KCM socket */
1998 sock_init_data(sock, sk);
1999 init_kcm_sock(kcm_sk(sk), mux);
2001 return 0;
2004 static struct net_proto_family kcm_family_ops = {
2005 .family = PF_KCM,
2006 .create = kcm_create,
2007 .owner = THIS_MODULE,
2010 static __net_init int kcm_init_net(struct net *net)
2012 struct kcm_net *knet = net_generic(net, kcm_net_id);
2014 INIT_LIST_HEAD_RCU(&knet->mux_list);
2015 mutex_init(&knet->mutex);
2017 return 0;
2020 static __net_exit void kcm_exit_net(struct net *net)
2022 struct kcm_net *knet = net_generic(net, kcm_net_id);
2024 /* All KCM sockets should be closed at this point, which should mean
2025 * that all multiplexors and psocks have been destroyed.
2027 WARN_ON(!list_empty(&knet->mux_list));
2030 static struct pernet_operations kcm_net_ops = {
2031 .init = kcm_init_net,
2032 .exit = kcm_exit_net,
2033 .id = &kcm_net_id,
2034 .size = sizeof(struct kcm_net),
2037 static int __init kcm_init(void)
2039 int err = -ENOMEM;
2041 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2042 sizeof(struct kcm_mux), 0,
2043 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2044 if (!kcm_muxp)
2045 goto fail;
2047 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2048 sizeof(struct kcm_psock), 0,
2049 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2050 if (!kcm_psockp)
2051 goto fail;
2053 kcm_wq = create_singlethread_workqueue("kkcmd");
2054 if (!kcm_wq)
2055 goto fail;
2057 err = proto_register(&kcm_proto, 1);
2058 if (err)
2059 goto fail;
2061 err = sock_register(&kcm_family_ops);
2062 if (err)
2063 goto sock_register_fail;
2065 err = register_pernet_device(&kcm_net_ops);
2066 if (err)
2067 goto net_ops_fail;
2069 err = kcm_proc_init();
2070 if (err)
2071 goto proc_init_fail;
2073 return 0;
2075 proc_init_fail:
2076 unregister_pernet_device(&kcm_net_ops);
2078 net_ops_fail:
2079 sock_unregister(PF_KCM);
2081 sock_register_fail:
2082 proto_unregister(&kcm_proto);
2084 fail:
2085 kmem_cache_destroy(kcm_muxp);
2086 kmem_cache_destroy(kcm_psockp);
2088 if (kcm_wq)
2089 destroy_workqueue(kcm_wq);
2091 return err;
2094 static void __exit kcm_exit(void)
2096 kcm_proc_exit();
2097 unregister_pernet_device(&kcm_net_ops);
2098 sock_unregister(PF_KCM);
2099 proto_unregister(&kcm_proto);
2100 destroy_workqueue(kcm_wq);
2102 kmem_cache_destroy(kcm_muxp);
2103 kmem_cache_destroy(kcm_psockp);
2106 module_init(kcm_init);
2107 module_exit(kcm_exit);
2109 MODULE_LICENSE("GPL");
2110 MODULE_ALIAS_NETPROTO(PF_KCM);