2 * Kernel Connection Multiplexor
4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
11 #include <linux/bpf.h>
12 #include <linux/errno.h>
13 #include <linux/errqueue.h>
14 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/poll.h>
21 #include <linux/rculist.h>
22 #include <linux/skbuff.h>
23 #include <linux/socket.h>
24 #include <linux/uaccess.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
27 #include <linux/sched/signal.h>
30 #include <net/netns/generic.h>
32 #include <uapi/linux/kcm.h>
34 unsigned int kcm_net_id
;
36 static struct kmem_cache
*kcm_psockp __read_mostly
;
37 static struct kmem_cache
*kcm_muxp __read_mostly
;
38 static struct workqueue_struct
*kcm_wq
;
40 static inline struct kcm_sock
*kcm_sk(const struct sock
*sk
)
42 return (struct kcm_sock
*)sk
;
45 static inline struct kcm_tx_msg
*kcm_tx_msg(struct sk_buff
*skb
)
47 return (struct kcm_tx_msg
*)skb
->cb
;
50 static void report_csk_error(struct sock
*csk
, int err
)
53 csk
->sk_error_report(csk
);
56 static void kcm_abort_tx_psock(struct kcm_psock
*psock
, int err
,
59 struct sock
*csk
= psock
->sk
;
60 struct kcm_mux
*mux
= psock
->mux
;
62 /* Unrecoverable error in transmit */
64 spin_lock_bh(&mux
->lock
);
66 if (psock
->tx_stopped
) {
67 spin_unlock_bh(&mux
->lock
);
71 psock
->tx_stopped
= 1;
72 KCM_STATS_INCR(psock
->stats
.tx_aborts
);
75 /* Take off psocks_avail list */
76 list_del(&psock
->psock_avail_list
);
77 } else if (wakeup_kcm
) {
78 /* In this case psock is being aborted while outside of
79 * write_msgs and psock is reserved. Schedule tx_work
80 * to handle the failure there. Need to commit tx_stopped
81 * before queuing work.
85 queue_work(kcm_wq
, &psock
->tx_kcm
->tx_work
);
88 spin_unlock_bh(&mux
->lock
);
90 /* Report error on lower socket */
91 report_csk_error(csk
, err
);
94 /* RX mux lock held. */
95 static void kcm_update_rx_mux_stats(struct kcm_mux
*mux
,
96 struct kcm_psock
*psock
)
98 STRP_STATS_ADD(mux
->stats
.rx_bytes
,
99 psock
->strp
.stats
.rx_bytes
-
100 psock
->saved_rx_bytes
);
101 mux
->stats
.rx_msgs
+=
102 psock
->strp
.stats
.rx_msgs
- psock
->saved_rx_msgs
;
103 psock
->saved_rx_msgs
= psock
->strp
.stats
.rx_msgs
;
104 psock
->saved_rx_bytes
= psock
->strp
.stats
.rx_bytes
;
107 static void kcm_update_tx_mux_stats(struct kcm_mux
*mux
,
108 struct kcm_psock
*psock
)
110 KCM_STATS_ADD(mux
->stats
.tx_bytes
,
111 psock
->stats
.tx_bytes
- psock
->saved_tx_bytes
);
112 mux
->stats
.tx_msgs
+=
113 psock
->stats
.tx_msgs
- psock
->saved_tx_msgs
;
114 psock
->saved_tx_msgs
= psock
->stats
.tx_msgs
;
115 psock
->saved_tx_bytes
= psock
->stats
.tx_bytes
;
118 static int kcm_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
120 /* KCM is ready to receive messages on its queue-- either the KCM is new or
121 * has become unblocked after being blocked on full socket buffer. Queue any
122 * pending ready messages on a psock. RX mux lock held.
124 static void kcm_rcv_ready(struct kcm_sock
*kcm
)
126 struct kcm_mux
*mux
= kcm
->mux
;
127 struct kcm_psock
*psock
;
130 if (unlikely(kcm
->rx_wait
|| kcm
->rx_psock
|| kcm
->rx_disabled
))
133 while (unlikely((skb
= __skb_dequeue(&mux
->rx_hold_queue
)))) {
134 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
135 /* Assuming buffer limit has been reached */
136 skb_queue_head(&mux
->rx_hold_queue
, skb
);
137 WARN_ON(!sk_rmem_alloc_get(&kcm
->sk
));
142 while (!list_empty(&mux
->psocks_ready
)) {
143 psock
= list_first_entry(&mux
->psocks_ready
, struct kcm_psock
,
146 if (kcm_queue_rcv_skb(&kcm
->sk
, psock
->ready_rx_msg
)) {
147 /* Assuming buffer limit has been reached */
148 WARN_ON(!sk_rmem_alloc_get(&kcm
->sk
));
152 /* Consumed the ready message on the psock. Schedule rx_work to
155 list_del(&psock
->psock_ready_list
);
156 psock
->ready_rx_msg
= NULL
;
157 /* Commit clearing of ready_rx_msg for queuing work */
160 strp_unpause(&psock
->strp
);
161 strp_check_rcv(&psock
->strp
);
164 /* Buffer limit is okay now, add to ready list */
165 list_add_tail(&kcm
->wait_rx_list
,
166 &kcm
->mux
->kcm_rx_waiters
);
170 static void kcm_rfree(struct sk_buff
*skb
)
172 struct sock
*sk
= skb
->sk
;
173 struct kcm_sock
*kcm
= kcm_sk(sk
);
174 struct kcm_mux
*mux
= kcm
->mux
;
175 unsigned int len
= skb
->truesize
;
177 sk_mem_uncharge(sk
, len
);
178 atomic_sub(len
, &sk
->sk_rmem_alloc
);
180 /* For reading rx_wait and rx_psock without holding lock */
181 smp_mb__after_atomic();
183 if (!kcm
->rx_wait
&& !kcm
->rx_psock
&&
184 sk_rmem_alloc_get(sk
) < sk
->sk_rcvlowat
) {
185 spin_lock_bh(&mux
->rx_lock
);
187 spin_unlock_bh(&mux
->rx_lock
);
191 static int kcm_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
193 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
195 if (atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
198 if (!sk_rmem_schedule(sk
, skb
, skb
->truesize
))
205 skb
->destructor
= kcm_rfree
;
206 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
207 sk_mem_charge(sk
, skb
->truesize
);
209 skb_queue_tail(list
, skb
);
211 if (!sock_flag(sk
, SOCK_DEAD
))
212 sk
->sk_data_ready(sk
);
217 /* Requeue received messages for a kcm socket to other kcm sockets. This is
218 * called with a kcm socket is receive disabled.
221 static void requeue_rx_msgs(struct kcm_mux
*mux
, struct sk_buff_head
*head
)
224 struct kcm_sock
*kcm
;
226 while ((skb
= __skb_dequeue(head
))) {
227 /* Reset destructor to avoid calling kcm_rcv_ready */
228 skb
->destructor
= sock_rfree
;
231 if (list_empty(&mux
->kcm_rx_waiters
)) {
232 skb_queue_tail(&mux
->rx_hold_queue
, skb
);
236 kcm
= list_first_entry(&mux
->kcm_rx_waiters
,
237 struct kcm_sock
, wait_rx_list
);
239 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
240 /* Should mean socket buffer full */
241 list_del(&kcm
->wait_rx_list
);
242 kcm
->rx_wait
= false;
244 /* Commit rx_wait to read in kcm_free */
252 /* Lower sock lock held */
253 static struct kcm_sock
*reserve_rx_kcm(struct kcm_psock
*psock
,
254 struct sk_buff
*head
)
256 struct kcm_mux
*mux
= psock
->mux
;
257 struct kcm_sock
*kcm
;
259 WARN_ON(psock
->ready_rx_msg
);
262 return psock
->rx_kcm
;
264 spin_lock_bh(&mux
->rx_lock
);
267 spin_unlock_bh(&mux
->rx_lock
);
268 return psock
->rx_kcm
;
271 kcm_update_rx_mux_stats(mux
, psock
);
273 if (list_empty(&mux
->kcm_rx_waiters
)) {
274 psock
->ready_rx_msg
= head
;
275 strp_pause(&psock
->strp
);
276 list_add_tail(&psock
->psock_ready_list
,
278 spin_unlock_bh(&mux
->rx_lock
);
282 kcm
= list_first_entry(&mux
->kcm_rx_waiters
,
283 struct kcm_sock
, wait_rx_list
);
284 list_del(&kcm
->wait_rx_list
);
285 kcm
->rx_wait
= false;
288 kcm
->rx_psock
= psock
;
290 spin_unlock_bh(&mux
->rx_lock
);
295 static void kcm_done(struct kcm_sock
*kcm
);
297 static void kcm_done_work(struct work_struct
*w
)
299 kcm_done(container_of(w
, struct kcm_sock
, done_work
));
302 /* Lower sock held */
303 static void unreserve_rx_kcm(struct kcm_psock
*psock
,
306 struct kcm_sock
*kcm
= psock
->rx_kcm
;
307 struct kcm_mux
*mux
= psock
->mux
;
312 spin_lock_bh(&mux
->rx_lock
);
314 psock
->rx_kcm
= NULL
;
315 kcm
->rx_psock
= NULL
;
317 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
322 if (unlikely(kcm
->done
)) {
323 spin_unlock_bh(&mux
->rx_lock
);
325 /* Need to run kcm_done in a task since we need to qcquire
326 * callback locks which may already be held here.
328 INIT_WORK(&kcm
->done_work
, kcm_done_work
);
329 schedule_work(&kcm
->done_work
);
333 if (unlikely(kcm
->rx_disabled
)) {
334 requeue_rx_msgs(mux
, &kcm
->sk
.sk_receive_queue
);
335 } else if (rcv_ready
|| unlikely(!sk_rmem_alloc_get(&kcm
->sk
))) {
336 /* Check for degenerative race with rx_wait that all
337 * data was dequeued (accounted for in kcm_rfree).
341 spin_unlock_bh(&mux
->rx_lock
);
344 /* Lower sock lock held */
345 static void psock_data_ready(struct sock
*sk
)
347 struct kcm_psock
*psock
;
349 read_lock_bh(&sk
->sk_callback_lock
);
351 psock
= (struct kcm_psock
*)sk
->sk_user_data
;
353 strp_data_ready(&psock
->strp
);
355 read_unlock_bh(&sk
->sk_callback_lock
);
358 /* Called with lower sock held */
359 static void kcm_rcv_strparser(struct strparser
*strp
, struct sk_buff
*skb
)
361 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
362 struct kcm_sock
*kcm
;
365 kcm
= reserve_rx_kcm(psock
, skb
);
367 /* Unable to reserve a KCM, message is held in psock and strp
373 if (kcm_queue_rcv_skb(&kcm
->sk
, skb
)) {
374 /* Should mean socket buffer full */
375 unreserve_rx_kcm(psock
, false);
380 static int kcm_parse_func_strparser(struct strparser
*strp
, struct sk_buff
*skb
)
382 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
383 struct bpf_prog
*prog
= psock
->bpf_prog
;
385 return (*prog
->bpf_func
)(skb
, prog
->insnsi
);
388 static int kcm_read_sock_done(struct strparser
*strp
, int err
)
390 struct kcm_psock
*psock
= container_of(strp
, struct kcm_psock
, strp
);
392 unreserve_rx_kcm(psock
, true);
397 static void psock_state_change(struct sock
*sk
)
399 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
400 * since application will normally not poll with POLLIN
401 * on the TCP sockets.
404 report_csk_error(sk
, EPIPE
);
407 static void psock_write_space(struct sock
*sk
)
409 struct kcm_psock
*psock
;
411 struct kcm_sock
*kcm
;
413 read_lock_bh(&sk
->sk_callback_lock
);
415 psock
= (struct kcm_psock
*)sk
->sk_user_data
;
416 if (unlikely(!psock
))
420 spin_lock_bh(&mux
->lock
);
422 /* Check if the socket is reserved so someone is waiting for sending. */
424 if (kcm
&& !unlikely(kcm
->tx_stopped
))
425 queue_work(kcm_wq
, &kcm
->tx_work
);
427 spin_unlock_bh(&mux
->lock
);
429 read_unlock_bh(&sk
->sk_callback_lock
);
432 static void unreserve_psock(struct kcm_sock
*kcm
);
434 /* kcm sock is locked. */
435 static struct kcm_psock
*reserve_psock(struct kcm_sock
*kcm
)
437 struct kcm_mux
*mux
= kcm
->mux
;
438 struct kcm_psock
*psock
;
440 psock
= kcm
->tx_psock
;
442 smp_rmb(); /* Must read tx_psock before tx_wait */
445 WARN_ON(kcm
->tx_wait
);
446 if (unlikely(psock
->tx_stopped
))
447 unreserve_psock(kcm
);
449 return kcm
->tx_psock
;
452 spin_lock_bh(&mux
->lock
);
454 /* Check again under lock to see if psock was reserved for this
455 * psock via psock_unreserve.
457 psock
= kcm
->tx_psock
;
458 if (unlikely(psock
)) {
459 WARN_ON(kcm
->tx_wait
);
460 spin_unlock_bh(&mux
->lock
);
461 return kcm
->tx_psock
;
464 if (!list_empty(&mux
->psocks_avail
)) {
465 psock
= list_first_entry(&mux
->psocks_avail
,
468 list_del(&psock
->psock_avail_list
);
470 list_del(&kcm
->wait_psock_list
);
471 kcm
->tx_wait
= false;
473 kcm
->tx_psock
= psock
;
475 KCM_STATS_INCR(psock
->stats
.reserved
);
476 } else if (!kcm
->tx_wait
) {
477 list_add_tail(&kcm
->wait_psock_list
,
478 &mux
->kcm_tx_waiters
);
482 spin_unlock_bh(&mux
->lock
);
488 static void psock_now_avail(struct kcm_psock
*psock
)
490 struct kcm_mux
*mux
= psock
->mux
;
491 struct kcm_sock
*kcm
;
493 if (list_empty(&mux
->kcm_tx_waiters
)) {
494 list_add_tail(&psock
->psock_avail_list
,
497 kcm
= list_first_entry(&mux
->kcm_tx_waiters
,
500 list_del(&kcm
->wait_psock_list
);
501 kcm
->tx_wait
= false;
504 /* Commit before changing tx_psock since that is read in
505 * reserve_psock before queuing work.
509 kcm
->tx_psock
= psock
;
510 KCM_STATS_INCR(psock
->stats
.reserved
);
511 queue_work(kcm_wq
, &kcm
->tx_work
);
515 /* kcm sock is locked. */
516 static void unreserve_psock(struct kcm_sock
*kcm
)
518 struct kcm_psock
*psock
;
519 struct kcm_mux
*mux
= kcm
->mux
;
521 spin_lock_bh(&mux
->lock
);
523 psock
= kcm
->tx_psock
;
525 if (WARN_ON(!psock
)) {
526 spin_unlock_bh(&mux
->lock
);
530 smp_rmb(); /* Read tx_psock before tx_wait */
532 kcm_update_tx_mux_stats(mux
, psock
);
534 WARN_ON(kcm
->tx_wait
);
536 kcm
->tx_psock
= NULL
;
537 psock
->tx_kcm
= NULL
;
538 KCM_STATS_INCR(psock
->stats
.unreserved
);
540 if (unlikely(psock
->tx_stopped
)) {
543 list_del(&psock
->psock_list
);
546 fput(psock
->sk
->sk_socket
->file
);
547 kmem_cache_free(kcm_psockp
, psock
);
550 /* Don't put back on available list */
552 spin_unlock_bh(&mux
->lock
);
557 psock_now_avail(psock
);
559 spin_unlock_bh(&mux
->lock
);
562 static void kcm_report_tx_retry(struct kcm_sock
*kcm
)
564 struct kcm_mux
*mux
= kcm
->mux
;
566 spin_lock_bh(&mux
->lock
);
567 KCM_STATS_INCR(mux
->stats
.tx_retries
);
568 spin_unlock_bh(&mux
->lock
);
571 /* Write any messages ready on the kcm socket. Called with kcm sock lock
572 * held. Return bytes actually sent or error.
574 static int kcm_write_msgs(struct kcm_sock
*kcm
)
576 struct sock
*sk
= &kcm
->sk
;
577 struct kcm_psock
*psock
;
578 struct sk_buff
*skb
, *head
;
579 struct kcm_tx_msg
*txm
;
580 unsigned short fragidx
, frag_offset
;
581 unsigned int sent
, total_sent
= 0;
584 kcm
->tx_wait_more
= false;
585 psock
= kcm
->tx_psock
;
586 if (unlikely(psock
&& psock
->tx_stopped
)) {
587 /* A reserved psock was aborted asynchronously. Unreserve
588 * it and we'll retry the message.
590 unreserve_psock(kcm
);
591 kcm_report_tx_retry(kcm
);
592 if (skb_queue_empty(&sk
->sk_write_queue
))
595 kcm_tx_msg(skb_peek(&sk
->sk_write_queue
))->sent
= 0;
597 } else if (skb_queue_empty(&sk
->sk_write_queue
)) {
601 head
= skb_peek(&sk
->sk_write_queue
);
602 txm
= kcm_tx_msg(head
);
605 /* Send of first skbuff in queue already in progress */
606 if (WARN_ON(!psock
)) {
611 frag_offset
= txm
->frag_offset
;
612 fragidx
= txm
->fragidx
;
619 psock
= reserve_psock(kcm
);
625 txm
= kcm_tx_msg(head
);
629 if (WARN_ON(!skb_shinfo(skb
)->nr_frags
)) {
634 for (fragidx
= 0; fragidx
< skb_shinfo(skb
)->nr_frags
;
640 frag
= &skb_shinfo(skb
)->frags
[fragidx
];
641 if (WARN_ON(!frag
->size
)) {
646 ret
= kernel_sendpage(psock
->sk
->sk_socket
,
648 frag
->page_offset
+ frag_offset
,
649 frag
->size
- frag_offset
,
652 if (ret
== -EAGAIN
) {
653 /* Save state to try again when there's
654 * write space on the socket
657 txm
->frag_offset
= frag_offset
;
658 txm
->fragidx
= fragidx
;
665 /* Hard failure in sending message, abort this
666 * psock since it has lost framing
667 * synchonization and retry sending the
668 * message from the beginning.
670 kcm_abort_tx_psock(psock
, ret
? -ret
: EPIPE
,
672 unreserve_psock(kcm
);
675 kcm_report_tx_retry(kcm
);
683 KCM_STATS_ADD(psock
->stats
.tx_bytes
, ret
);
684 if (frag_offset
< frag
->size
) {
685 /* Not finished with this frag */
691 if (skb_has_frag_list(skb
)) {
692 skb
= skb_shinfo(skb
)->frag_list
;
695 } else if (skb
->next
) {
700 /* Successfully sent the whole packet, account for it. */
701 skb_dequeue(&sk
->sk_write_queue
);
703 sk
->sk_wmem_queued
-= sent
;
705 KCM_STATS_INCR(psock
->stats
.tx_msgs
);
706 } while ((head
= skb_peek(&sk
->sk_write_queue
)));
709 /* Done with all queued messages. */
710 WARN_ON(!skb_queue_empty(&sk
->sk_write_queue
));
711 unreserve_psock(kcm
);
714 /* Check if write space is available */
715 sk
->sk_write_space(sk
);
717 return total_sent
? : ret
;
720 static void kcm_tx_work(struct work_struct
*w
)
722 struct kcm_sock
*kcm
= container_of(w
, struct kcm_sock
, tx_work
);
723 struct sock
*sk
= &kcm
->sk
;
728 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
731 err
= kcm_write_msgs(kcm
);
733 /* Hard failure in write, report error on KCM socket */
734 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err
);
735 report_csk_error(&kcm
->sk
, -err
);
739 /* Primarily for SOCK_SEQPACKET sockets */
740 if (likely(sk
->sk_socket
) &&
741 test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
742 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
743 sk
->sk_write_space(sk
);
750 static void kcm_push(struct kcm_sock
*kcm
)
752 if (kcm
->tx_wait_more
)
756 static ssize_t
kcm_sendpage(struct socket
*sock
, struct page
*page
,
757 int offset
, size_t size
, int flags
)
760 struct sock
*sk
= sock
->sk
;
761 struct kcm_sock
*kcm
= kcm_sk(sk
);
762 struct sk_buff
*skb
= NULL
, *head
= NULL
;
763 long timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
768 if (flags
& MSG_SENDPAGE_NOTLAST
)
771 /* No MSG_EOR from splice, only look at MSG_MORE */
772 eor
= !(flags
& MSG_MORE
);
776 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
783 /* Previously opened message */
785 skb
= kcm_tx_msg(head
)->last_skb
;
786 i
= skb_shinfo(skb
)->nr_frags
;
788 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
789 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], size
);
790 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
794 if (i
>= MAX_SKB_FRAGS
) {
795 struct sk_buff
*tskb
;
797 tskb
= alloc_skb(0, sk
->sk_allocation
);
800 err
= sk_stream_wait_memory(sk
, &timeo
);
806 skb_shinfo(head
)->frag_list
= tskb
;
811 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
815 /* Call the sk_stream functions to manage the sndbuf mem. */
816 if (!sk_stream_memory_free(sk
)) {
818 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
819 err
= sk_stream_wait_memory(sk
, &timeo
);
824 head
= alloc_skb(0, sk
->sk_allocation
);
827 err
= sk_stream_wait_memory(sk
, &timeo
);
837 skb_fill_page_desc(skb
, i
, page
, offset
, size
);
838 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
842 skb
->data_len
+= size
;
843 skb
->truesize
+= size
;
844 sk
->sk_wmem_queued
+= size
;
845 sk_mem_charge(sk
, size
);
849 head
->data_len
+= size
;
850 head
->truesize
+= size
;
854 bool not_busy
= skb_queue_empty(&sk
->sk_write_queue
);
856 /* Message complete, queue it on send buffer */
857 __skb_queue_tail(&sk
->sk_write_queue
, head
);
859 KCM_STATS_INCR(kcm
->stats
.tx_msgs
);
861 if (flags
& MSG_BATCH
) {
862 kcm
->tx_wait_more
= true;
863 } else if (kcm
->tx_wait_more
|| not_busy
) {
864 err
= kcm_write_msgs(kcm
);
866 /* We got a hard error in write_msgs but have
867 * already queued this message. Report an error
868 * in the socket, but don't affect return value
871 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
872 report_csk_error(&kcm
->sk
, -err
);
876 /* Message not complete, save state */
878 kcm_tx_msg(head
)->last_skb
= skb
;
881 KCM_STATS_ADD(kcm
->stats
.tx_bytes
, size
);
889 err
= sk_stream_error(sk
, flags
, err
);
891 /* make sure we wake any epoll edge trigger waiter */
892 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 && err
== -EAGAIN
))
893 sk
->sk_write_space(sk
);
899 static int kcm_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
901 struct sock
*sk
= sock
->sk
;
902 struct kcm_sock
*kcm
= kcm_sk(sk
);
903 struct sk_buff
*skb
= NULL
, *head
= NULL
;
904 size_t copy
, copied
= 0;
905 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
906 int eor
= (sock
->type
== SOCK_DGRAM
) ?
907 !(msg
->msg_flags
& MSG_MORE
) : !!(msg
->msg_flags
& MSG_EOR
);
912 /* Per tcp_sendmsg this should be in poll */
913 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
919 /* Previously opened message */
921 skb
= kcm_tx_msg(head
)->last_skb
;
925 /* Call the sk_stream functions to manage the sndbuf mem. */
926 if (!sk_stream_memory_free(sk
)) {
928 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
929 err
= sk_stream_wait_memory(sk
, &timeo
);
934 if (msg_data_left(msg
)) {
935 /* New message, alloc head skb */
936 head
= alloc_skb(0, sk
->sk_allocation
);
939 err
= sk_stream_wait_memory(sk
, &timeo
);
943 head
= alloc_skb(0, sk
->sk_allocation
);
948 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
949 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
951 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
955 while (msg_data_left(msg
)) {
957 int i
= skb_shinfo(skb
)->nr_frags
;
958 struct page_frag
*pfrag
= sk_page_frag(sk
);
960 if (!sk_page_frag_refill(sk
, pfrag
))
961 goto wait_for_memory
;
963 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
965 if (i
== MAX_SKB_FRAGS
) {
966 struct sk_buff
*tskb
;
968 tskb
= alloc_skb(0, sk
->sk_allocation
);
970 goto wait_for_memory
;
973 skb_shinfo(head
)->frag_list
= tskb
;
978 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
984 copy
= min_t(int, msg_data_left(msg
),
985 pfrag
->size
- pfrag
->offset
);
987 if (!sk_wmem_schedule(sk
, copy
))
988 goto wait_for_memory
;
990 err
= skb_copy_to_page_nocache(sk
, &msg
->msg_iter
, skb
,
997 /* Update the skb. */
999 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1001 skb_fill_page_desc(skb
, i
, pfrag
->page
,
1002 pfrag
->offset
, copy
);
1003 get_page(pfrag
->page
);
1006 pfrag
->offset
+= copy
;
1010 head
->data_len
+= copy
;
1017 err
= sk_stream_wait_memory(sk
, &timeo
);
1023 bool not_busy
= skb_queue_empty(&sk
->sk_write_queue
);
1026 /* Message complete, queue it on send buffer */
1027 __skb_queue_tail(&sk
->sk_write_queue
, head
);
1028 kcm
->seq_skb
= NULL
;
1029 KCM_STATS_INCR(kcm
->stats
.tx_msgs
);
1032 if (msg
->msg_flags
& MSG_BATCH
) {
1033 kcm
->tx_wait_more
= true;
1034 } else if (kcm
->tx_wait_more
|| not_busy
) {
1035 err
= kcm_write_msgs(kcm
);
1037 /* We got a hard error in write_msgs but have
1038 * already queued this message. Report an error
1039 * in the socket, but don't affect return value
1042 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1043 report_csk_error(&kcm
->sk
, -err
);
1047 /* Message not complete, save state */
1050 kcm
->seq_skb
= head
;
1051 kcm_tx_msg(head
)->last_skb
= skb
;
1055 KCM_STATS_ADD(kcm
->stats
.tx_bytes
, copied
);
1063 if (copied
&& sock
->type
== SOCK_SEQPACKET
) {
1064 /* Wrote some bytes before encountering an
1065 * error, return partial success.
1067 goto partial_message
;
1070 if (head
!= kcm
->seq_skb
)
1073 err
= sk_stream_error(sk
, msg
->msg_flags
, err
);
1075 /* make sure we wake any epoll edge trigger waiter */
1076 if (unlikely(skb_queue_len(&sk
->sk_write_queue
) == 0 && err
== -EAGAIN
))
1077 sk
->sk_write_space(sk
);
1083 static struct sk_buff
*kcm_wait_data(struct sock
*sk
, int flags
,
1084 long timeo
, int *err
)
1086 struct sk_buff
*skb
;
1088 while (!(skb
= skb_peek(&sk
->sk_receive_queue
))) {
1090 *err
= sock_error(sk
);
1094 if (sock_flag(sk
, SOCK_DONE
))
1097 if ((flags
& MSG_DONTWAIT
) || !timeo
) {
1102 sk_wait_data(sk
, &timeo
, NULL
);
1104 /* Handle signals */
1105 if (signal_pending(current
)) {
1106 *err
= sock_intr_errno(timeo
);
1114 static int kcm_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1115 size_t len
, int flags
)
1117 struct sock
*sk
= sock
->sk
;
1118 struct kcm_sock
*kcm
= kcm_sk(sk
);
1121 struct strp_rx_msg
*rxm
;
1123 struct sk_buff
*skb
;
1125 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1129 skb
= kcm_wait_data(sk
, flags
, timeo
, &err
);
1133 /* Okay, have a message on the receive queue */
1135 rxm
= strp_rx_msg(skb
);
1137 if (len
> rxm
->full_len
)
1138 len
= rxm
->full_len
;
1140 err
= skb_copy_datagram_msg(skb
, rxm
->offset
, msg
, len
);
1145 if (likely(!(flags
& MSG_PEEK
))) {
1146 KCM_STATS_ADD(kcm
->stats
.rx_bytes
, copied
);
1147 if (copied
< rxm
->full_len
) {
1148 if (sock
->type
== SOCK_DGRAM
) {
1149 /* Truncated message */
1150 msg
->msg_flags
|= MSG_TRUNC
;
1153 rxm
->offset
+= copied
;
1154 rxm
->full_len
-= copied
;
1157 /* Finished with message */
1158 msg
->msg_flags
|= MSG_EOR
;
1159 KCM_STATS_INCR(kcm
->stats
.rx_msgs
);
1160 skb_unlink(skb
, &sk
->sk_receive_queue
);
1168 return copied
? : err
;
1171 static ssize_t
kcm_splice_read(struct socket
*sock
, loff_t
*ppos
,
1172 struct pipe_inode_info
*pipe
, size_t len
,
1175 struct sock
*sk
= sock
->sk
;
1176 struct kcm_sock
*kcm
= kcm_sk(sk
);
1178 struct strp_rx_msg
*rxm
;
1181 struct sk_buff
*skb
;
1183 /* Only support splice for SOCKSEQPACKET */
1185 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1189 skb
= kcm_wait_data(sk
, flags
, timeo
, &err
);
1193 /* Okay, have a message on the receive queue */
1195 rxm
= strp_rx_msg(skb
);
1197 if (len
> rxm
->full_len
)
1198 len
= rxm
->full_len
;
1200 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, len
, flags
);
1206 KCM_STATS_ADD(kcm
->stats
.rx_bytes
, copied
);
1208 rxm
->offset
+= copied
;
1209 rxm
->full_len
-= copied
;
1211 /* We have no way to return MSG_EOR. If all the bytes have been
1212 * read we still leave the message in the receive socket buffer.
1213 * A subsequent recvmsg needs to be done to return MSG_EOR and
1214 * finish reading the message.
1227 /* kcm sock lock held */
1228 static void kcm_recv_disable(struct kcm_sock
*kcm
)
1230 struct kcm_mux
*mux
= kcm
->mux
;
1232 if (kcm
->rx_disabled
)
1235 spin_lock_bh(&mux
->rx_lock
);
1237 kcm
->rx_disabled
= 1;
1239 /* If a psock is reserved we'll do cleanup in unreserve */
1240 if (!kcm
->rx_psock
) {
1242 list_del(&kcm
->wait_rx_list
);
1243 kcm
->rx_wait
= false;
1246 requeue_rx_msgs(mux
, &kcm
->sk
.sk_receive_queue
);
1249 spin_unlock_bh(&mux
->rx_lock
);
1252 /* kcm sock lock held */
1253 static void kcm_recv_enable(struct kcm_sock
*kcm
)
1255 struct kcm_mux
*mux
= kcm
->mux
;
1257 if (!kcm
->rx_disabled
)
1260 spin_lock_bh(&mux
->rx_lock
);
1262 kcm
->rx_disabled
= 0;
1265 spin_unlock_bh(&mux
->rx_lock
);
1268 static int kcm_setsockopt(struct socket
*sock
, int level
, int optname
,
1269 char __user
*optval
, unsigned int optlen
)
1271 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1275 if (level
!= SOL_KCM
)
1276 return -ENOPROTOOPT
;
1278 if (optlen
< sizeof(int))
1281 if (get_user(val
, (int __user
*)optval
))
1284 valbool
= val
? 1 : 0;
1287 case KCM_RECV_DISABLE
:
1288 lock_sock(&kcm
->sk
);
1290 kcm_recv_disable(kcm
);
1292 kcm_recv_enable(kcm
);
1293 release_sock(&kcm
->sk
);
1302 static int kcm_getsockopt(struct socket
*sock
, int level
, int optname
,
1303 char __user
*optval
, int __user
*optlen
)
1305 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1308 if (level
!= SOL_KCM
)
1309 return -ENOPROTOOPT
;
1311 if (get_user(len
, optlen
))
1314 len
= min_t(unsigned int, len
, sizeof(int));
1319 case KCM_RECV_DISABLE
:
1320 val
= kcm
->rx_disabled
;
1323 return -ENOPROTOOPT
;
1326 if (put_user(len
, optlen
))
1328 if (copy_to_user(optval
, &val
, len
))
1333 static void init_kcm_sock(struct kcm_sock
*kcm
, struct kcm_mux
*mux
)
1335 struct kcm_sock
*tkcm
;
1336 struct list_head
*head
;
1339 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1340 * we set sk_state, otherwise epoll_wait always returns right away with
1343 kcm
->sk
.sk_state
= TCP_ESTABLISHED
;
1345 /* Add to mux's kcm sockets list */
1347 spin_lock_bh(&mux
->lock
);
1349 head
= &mux
->kcm_socks
;
1350 list_for_each_entry(tkcm
, &mux
->kcm_socks
, kcm_sock_list
) {
1351 if (tkcm
->index
!= index
)
1353 head
= &tkcm
->kcm_sock_list
;
1357 list_add(&kcm
->kcm_sock_list
, head
);
1360 mux
->kcm_socks_cnt
++;
1361 spin_unlock_bh(&mux
->lock
);
1363 INIT_WORK(&kcm
->tx_work
, kcm_tx_work
);
1365 spin_lock_bh(&mux
->rx_lock
);
1367 spin_unlock_bh(&mux
->rx_lock
);
1370 static int kcm_attach(struct socket
*sock
, struct socket
*csock
,
1371 struct bpf_prog
*prog
)
1373 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1374 struct kcm_mux
*mux
= kcm
->mux
;
1376 struct kcm_psock
*psock
= NULL
, *tpsock
;
1377 struct list_head
*head
;
1379 struct strp_callbacks cb
;
1386 psock
= kmem_cache_zalloc(kcm_psockp
, GFP_KERNEL
);
1392 psock
->bpf_prog
= prog
;
1394 cb
.rcv_msg
= kcm_rcv_strparser
;
1395 cb
.abort_parser
= NULL
;
1396 cb
.parse_msg
= kcm_parse_func_strparser
;
1397 cb
.read_sock_done
= kcm_read_sock_done
;
1399 err
= strp_init(&psock
->strp
, csk
, &cb
);
1401 kmem_cache_free(kcm_psockp
, psock
);
1407 write_lock_bh(&csk
->sk_callback_lock
);
1408 psock
->save_data_ready
= csk
->sk_data_ready
;
1409 psock
->save_write_space
= csk
->sk_write_space
;
1410 psock
->save_state_change
= csk
->sk_state_change
;
1411 csk
->sk_user_data
= psock
;
1412 csk
->sk_data_ready
= psock_data_ready
;
1413 csk
->sk_write_space
= psock_write_space
;
1414 csk
->sk_state_change
= psock_state_change
;
1415 write_unlock_bh(&csk
->sk_callback_lock
);
1417 /* Finished initialization, now add the psock to the MUX. */
1418 spin_lock_bh(&mux
->lock
);
1419 head
= &mux
->psocks
;
1420 list_for_each_entry(tpsock
, &mux
->psocks
, psock_list
) {
1421 if (tpsock
->index
!= index
)
1423 head
= &tpsock
->psock_list
;
1427 list_add(&psock
->psock_list
, head
);
1428 psock
->index
= index
;
1430 KCM_STATS_INCR(mux
->stats
.psock_attach
);
1432 psock_now_avail(psock
);
1433 spin_unlock_bh(&mux
->lock
);
1435 /* Schedule RX work in case there are already bytes queued */
1436 strp_check_rcv(&psock
->strp
);
1441 static int kcm_attach_ioctl(struct socket
*sock
, struct kcm_attach
*info
)
1443 struct socket
*csock
;
1444 struct bpf_prog
*prog
;
1447 csock
= sockfd_lookup(info
->fd
, &err
);
1451 prog
= bpf_prog_get_type(info
->bpf_fd
, BPF_PROG_TYPE_SOCKET_FILTER
);
1453 err
= PTR_ERR(prog
);
1457 err
= kcm_attach(sock
, csock
, prog
);
1463 /* Keep reference on file also */
1471 static void kcm_unattach(struct kcm_psock
*psock
)
1473 struct sock
*csk
= psock
->sk
;
1474 struct kcm_mux
*mux
= psock
->mux
;
1478 /* Stop getting callbacks from TCP socket. After this there should
1479 * be no way to reserve a kcm for this psock.
1481 write_lock_bh(&csk
->sk_callback_lock
);
1482 csk
->sk_user_data
= NULL
;
1483 csk
->sk_data_ready
= psock
->save_data_ready
;
1484 csk
->sk_write_space
= psock
->save_write_space
;
1485 csk
->sk_state_change
= psock
->save_state_change
;
1486 strp_stop(&psock
->strp
);
1488 if (WARN_ON(psock
->rx_kcm
)) {
1489 write_unlock_bh(&csk
->sk_callback_lock
);
1493 spin_lock_bh(&mux
->rx_lock
);
1495 /* Stop receiver activities. After this point psock should not be
1496 * able to get onto ready list either through callbacks or work.
1498 if (psock
->ready_rx_msg
) {
1499 list_del(&psock
->psock_ready_list
);
1500 kfree_skb(psock
->ready_rx_msg
);
1501 psock
->ready_rx_msg
= NULL
;
1502 KCM_STATS_INCR(mux
->stats
.rx_ready_drops
);
1505 spin_unlock_bh(&mux
->rx_lock
);
1507 write_unlock_bh(&csk
->sk_callback_lock
);
1509 /* Call strp_done without sock lock */
1511 strp_done(&psock
->strp
);
1514 bpf_prog_put(psock
->bpf_prog
);
1516 spin_lock_bh(&mux
->lock
);
1518 aggregate_psock_stats(&psock
->stats
, &mux
->aggregate_psock_stats
);
1519 save_strp_stats(&psock
->strp
, &mux
->aggregate_strp_stats
);
1521 KCM_STATS_INCR(mux
->stats
.psock_unattach
);
1523 if (psock
->tx_kcm
) {
1524 /* psock was reserved. Just mark it finished and we will clean
1525 * up in the kcm paths, we need kcm lock which can not be
1528 KCM_STATS_INCR(mux
->stats
.psock_unattach_rsvd
);
1529 spin_unlock_bh(&mux
->lock
);
1531 /* We are unattaching a socket that is reserved. Abort the
1532 * socket since we may be out of sync in sending on it. We need
1533 * to do this without the mux lock.
1535 kcm_abort_tx_psock(psock
, EPIPE
, false);
1537 spin_lock_bh(&mux
->lock
);
1538 if (!psock
->tx_kcm
) {
1539 /* psock now unreserved in window mux was unlocked */
1544 /* Commit done before queuing work to process it */
1547 /* Queue tx work to make sure psock->done is handled */
1548 queue_work(kcm_wq
, &psock
->tx_kcm
->tx_work
);
1549 spin_unlock_bh(&mux
->lock
);
1552 if (!psock
->tx_stopped
)
1553 list_del(&psock
->psock_avail_list
);
1554 list_del(&psock
->psock_list
);
1556 spin_unlock_bh(&mux
->lock
);
1559 fput(csk
->sk_socket
->file
);
1560 kmem_cache_free(kcm_psockp
, psock
);
1566 static int kcm_unattach_ioctl(struct socket
*sock
, struct kcm_unattach
*info
)
1568 struct kcm_sock
*kcm
= kcm_sk(sock
->sk
);
1569 struct kcm_mux
*mux
= kcm
->mux
;
1570 struct kcm_psock
*psock
;
1571 struct socket
*csock
;
1575 csock
= sockfd_lookup(info
->fd
, &err
);
1587 spin_lock_bh(&mux
->lock
);
1589 list_for_each_entry(psock
, &mux
->psocks
, psock_list
) {
1590 if (psock
->sk
!= csk
)
1593 /* Found the matching psock */
1595 if (psock
->unattaching
|| WARN_ON(psock
->done
)) {
1600 psock
->unattaching
= 1;
1602 spin_unlock_bh(&mux
->lock
);
1604 /* Lower socket lock should already be held */
1605 kcm_unattach(psock
);
1611 spin_unlock_bh(&mux
->lock
);
1618 static struct proto kcm_proto
= {
1620 .owner
= THIS_MODULE
,
1621 .obj_size
= sizeof(struct kcm_sock
),
1624 /* Clone a kcm socket. */
1625 static int kcm_clone(struct socket
*osock
, struct kcm_clone
*info
,
1626 struct socket
**newsockp
)
1628 struct socket
*newsock
;
1630 struct file
*newfile
;
1634 newsock
= sock_alloc();
1638 newsock
->type
= osock
->type
;
1639 newsock
->ops
= osock
->ops
;
1641 __module_get(newsock
->ops
->owner
);
1643 newfd
= get_unused_fd_flags(0);
1644 if (unlikely(newfd
< 0)) {
1649 newfile
= sock_alloc_file(newsock
, 0, osock
->sk
->sk_prot_creator
->name
);
1650 if (unlikely(IS_ERR(newfile
))) {
1651 err
= PTR_ERR(newfile
);
1652 goto out_sock_alloc_fail
;
1655 newsk
= sk_alloc(sock_net(osock
->sk
), PF_KCM
, GFP_KERNEL
,
1659 goto out_sk_alloc_fail
;
1662 sock_init_data(newsock
, newsk
);
1663 init_kcm_sock(kcm_sk(newsk
), kcm_sk(osock
->sk
)->mux
);
1665 fd_install(newfd
, newfile
);
1666 *newsockp
= newsock
;
1673 out_sock_alloc_fail
:
1674 put_unused_fd(newfd
);
1676 sock_release(newsock
);
1681 static int kcm_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1686 case SIOCKCMATTACH
: {
1687 struct kcm_attach info
;
1689 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
1692 err
= kcm_attach_ioctl(sock
, &info
);
1696 case SIOCKCMUNATTACH
: {
1697 struct kcm_unattach info
;
1699 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
1702 err
= kcm_unattach_ioctl(sock
, &info
);
1706 case SIOCKCMCLONE
: {
1707 struct kcm_clone info
;
1708 struct socket
*newsock
= NULL
;
1710 err
= kcm_clone(sock
, &info
, &newsock
);
1712 if (copy_to_user((void __user
*)arg
, &info
,
1729 static void free_mux(struct rcu_head
*rcu
)
1731 struct kcm_mux
*mux
= container_of(rcu
,
1732 struct kcm_mux
, rcu
);
1734 kmem_cache_free(kcm_muxp
, mux
);
1737 static void release_mux(struct kcm_mux
*mux
)
1739 struct kcm_net
*knet
= mux
->knet
;
1740 struct kcm_psock
*psock
, *tmp_psock
;
1742 /* Release psocks */
1743 list_for_each_entry_safe(psock
, tmp_psock
,
1744 &mux
->psocks
, psock_list
) {
1745 if (!WARN_ON(psock
->unattaching
))
1746 kcm_unattach(psock
);
1749 if (WARN_ON(mux
->psocks_cnt
))
1752 __skb_queue_purge(&mux
->rx_hold_queue
);
1754 mutex_lock(&knet
->mutex
);
1755 aggregate_mux_stats(&mux
->stats
, &knet
->aggregate_mux_stats
);
1756 aggregate_psock_stats(&mux
->aggregate_psock_stats
,
1757 &knet
->aggregate_psock_stats
);
1758 aggregate_strp_stats(&mux
->aggregate_strp_stats
,
1759 &knet
->aggregate_strp_stats
);
1760 list_del_rcu(&mux
->kcm_mux_list
);
1762 mutex_unlock(&knet
->mutex
);
1764 call_rcu(&mux
->rcu
, free_mux
);
1767 static void kcm_done(struct kcm_sock
*kcm
)
1769 struct kcm_mux
*mux
= kcm
->mux
;
1770 struct sock
*sk
= &kcm
->sk
;
1773 spin_lock_bh(&mux
->rx_lock
);
1774 if (kcm
->rx_psock
) {
1775 /* Cleanup in unreserve_rx_kcm */
1777 kcm
->rx_disabled
= 1;
1779 spin_unlock_bh(&mux
->rx_lock
);
1784 list_del(&kcm
->wait_rx_list
);
1785 kcm
->rx_wait
= false;
1787 /* Move any pending receive messages to other kcm sockets */
1788 requeue_rx_msgs(mux
, &sk
->sk_receive_queue
);
1790 spin_unlock_bh(&mux
->rx_lock
);
1792 if (WARN_ON(sk_rmem_alloc_get(sk
)))
1795 /* Detach from MUX */
1796 spin_lock_bh(&mux
->lock
);
1798 list_del(&kcm
->kcm_sock_list
);
1799 mux
->kcm_socks_cnt
--;
1800 socks_cnt
= mux
->kcm_socks_cnt
;
1802 spin_unlock_bh(&mux
->lock
);
1805 /* We are done with the mux now. */
1809 WARN_ON(kcm
->rx_wait
);
1814 /* Called by kcm_release to close a KCM socket.
1815 * If this is the last KCM socket on the MUX, destroy the MUX.
1817 static int kcm_release(struct socket
*sock
)
1819 struct sock
*sk
= sock
->sk
;
1820 struct kcm_sock
*kcm
;
1821 struct kcm_mux
*mux
;
1822 struct kcm_psock
*psock
;
1831 kfree_skb(kcm
->seq_skb
);
1834 /* Purge queue under lock to avoid race condition with tx_work trying
1835 * to act when queue is nonempty. If tx_work runs after this point
1836 * it will just return.
1838 __skb_queue_purge(&sk
->sk_write_queue
);
1840 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1841 * get a writespace callback. This prevents further work being queued
1842 * from the callback (unbinding the psock occurs after canceling work.
1844 kcm
->tx_stopped
= 1;
1848 spin_lock_bh(&mux
->lock
);
1850 /* Take of tx_wait list, after this point there should be no way
1851 * that a psock will be assigned to this kcm.
1853 list_del(&kcm
->wait_psock_list
);
1854 kcm
->tx_wait
= false;
1856 spin_unlock_bh(&mux
->lock
);
1858 /* Cancel work. After this point there should be no outside references
1859 * to the kcm socket.
1861 cancel_work_sync(&kcm
->tx_work
);
1864 psock
= kcm
->tx_psock
;
1866 /* A psock was reserved, so we need to kill it since it
1867 * may already have some bytes queued from a message. We
1868 * need to do this after removing kcm from tx_wait list.
1870 kcm_abort_tx_psock(psock
, EPIPE
, false);
1871 unreserve_psock(kcm
);
1875 WARN_ON(kcm
->tx_wait
);
1876 WARN_ON(kcm
->tx_psock
);
1885 static const struct proto_ops kcm_dgram_ops
= {
1887 .owner
= THIS_MODULE
,
1888 .release
= kcm_release
,
1889 .bind
= sock_no_bind
,
1890 .connect
= sock_no_connect
,
1891 .socketpair
= sock_no_socketpair
,
1892 .accept
= sock_no_accept
,
1893 .getname
= sock_no_getname
,
1894 .poll
= datagram_poll
,
1896 .listen
= sock_no_listen
,
1897 .shutdown
= sock_no_shutdown
,
1898 .setsockopt
= kcm_setsockopt
,
1899 .getsockopt
= kcm_getsockopt
,
1900 .sendmsg
= kcm_sendmsg
,
1901 .recvmsg
= kcm_recvmsg
,
1902 .mmap
= sock_no_mmap
,
1903 .sendpage
= kcm_sendpage
,
1906 static const struct proto_ops kcm_seqpacket_ops
= {
1908 .owner
= THIS_MODULE
,
1909 .release
= kcm_release
,
1910 .bind
= sock_no_bind
,
1911 .connect
= sock_no_connect
,
1912 .socketpair
= sock_no_socketpair
,
1913 .accept
= sock_no_accept
,
1914 .getname
= sock_no_getname
,
1915 .poll
= datagram_poll
,
1917 .listen
= sock_no_listen
,
1918 .shutdown
= sock_no_shutdown
,
1919 .setsockopt
= kcm_setsockopt
,
1920 .getsockopt
= kcm_getsockopt
,
1921 .sendmsg
= kcm_sendmsg
,
1922 .recvmsg
= kcm_recvmsg
,
1923 .mmap
= sock_no_mmap
,
1924 .sendpage
= kcm_sendpage
,
1925 .splice_read
= kcm_splice_read
,
1928 /* Create proto operation for kcm sockets */
1929 static int kcm_create(struct net
*net
, struct socket
*sock
,
1930 int protocol
, int kern
)
1932 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
1934 struct kcm_mux
*mux
;
1936 switch (sock
->type
) {
1938 sock
->ops
= &kcm_dgram_ops
;
1940 case SOCK_SEQPACKET
:
1941 sock
->ops
= &kcm_seqpacket_ops
;
1944 return -ESOCKTNOSUPPORT
;
1947 if (protocol
!= KCMPROTO_CONNECTED
)
1948 return -EPROTONOSUPPORT
;
1950 sk
= sk_alloc(net
, PF_KCM
, GFP_KERNEL
, &kcm_proto
, kern
);
1954 /* Allocate a kcm mux, shared between KCM sockets */
1955 mux
= kmem_cache_zalloc(kcm_muxp
, GFP_KERNEL
);
1961 spin_lock_init(&mux
->lock
);
1962 spin_lock_init(&mux
->rx_lock
);
1963 INIT_LIST_HEAD(&mux
->kcm_socks
);
1964 INIT_LIST_HEAD(&mux
->kcm_rx_waiters
);
1965 INIT_LIST_HEAD(&mux
->kcm_tx_waiters
);
1967 INIT_LIST_HEAD(&mux
->psocks
);
1968 INIT_LIST_HEAD(&mux
->psocks_ready
);
1969 INIT_LIST_HEAD(&mux
->psocks_avail
);
1973 /* Add new MUX to list */
1974 mutex_lock(&knet
->mutex
);
1975 list_add_rcu(&mux
->kcm_mux_list
, &knet
->mux_list
);
1977 mutex_unlock(&knet
->mutex
);
1979 skb_queue_head_init(&mux
->rx_hold_queue
);
1981 /* Init KCM socket */
1982 sock_init_data(sock
, sk
);
1983 init_kcm_sock(kcm_sk(sk
), mux
);
1988 static const struct net_proto_family kcm_family_ops
= {
1990 .create
= kcm_create
,
1991 .owner
= THIS_MODULE
,
1994 static __net_init
int kcm_init_net(struct net
*net
)
1996 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
1998 INIT_LIST_HEAD_RCU(&knet
->mux_list
);
1999 mutex_init(&knet
->mutex
);
2004 static __net_exit
void kcm_exit_net(struct net
*net
)
2006 struct kcm_net
*knet
= net_generic(net
, kcm_net_id
);
2008 /* All KCM sockets should be closed at this point, which should mean
2009 * that all multiplexors and psocks have been destroyed.
2011 WARN_ON(!list_empty(&knet
->mux_list
));
2014 static struct pernet_operations kcm_net_ops
= {
2015 .init
= kcm_init_net
,
2016 .exit
= kcm_exit_net
,
2018 .size
= sizeof(struct kcm_net
),
2021 static int __init
kcm_init(void)
2025 kcm_muxp
= kmem_cache_create("kcm_mux_cache",
2026 sizeof(struct kcm_mux
), 0,
2027 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2031 kcm_psockp
= kmem_cache_create("kcm_psock_cache",
2032 sizeof(struct kcm_psock
), 0,
2033 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2037 kcm_wq
= create_singlethread_workqueue("kkcmd");
2041 err
= proto_register(&kcm_proto
, 1);
2045 err
= sock_register(&kcm_family_ops
);
2047 goto sock_register_fail
;
2049 err
= register_pernet_device(&kcm_net_ops
);
2053 err
= kcm_proc_init();
2055 goto proc_init_fail
;
2060 unregister_pernet_device(&kcm_net_ops
);
2063 sock_unregister(PF_KCM
);
2066 proto_unregister(&kcm_proto
);
2069 kmem_cache_destroy(kcm_muxp
);
2070 kmem_cache_destroy(kcm_psockp
);
2073 destroy_workqueue(kcm_wq
);
2078 static void __exit
kcm_exit(void)
2081 unregister_pernet_device(&kcm_net_ops
);
2082 sock_unregister(PF_KCM
);
2083 proto_unregister(&kcm_proto
);
2084 destroy_workqueue(kcm_wq
);
2086 kmem_cache_destroy(kcm_muxp
);
2087 kmem_cache_destroy(kcm_psockp
);
2090 module_init(kcm_init
);
2091 module_exit(kcm_exit
);
2093 MODULE_LICENSE("GPL");
2094 MODULE_ALIAS_NETPROTO(PF_KCM
);