1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/sched/signal.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kprobes.h>
20 #include <linux/if_vlan.h>
21 #include <net/inet_common.h>
30 * State transitions and actions for close. Note that if we are in SYN_SENT
31 * we remain in that state as we cannot control a connection while it's in
32 * SYN_SENT; such connections are allowed to establish and are then aborted.
34 static unsigned char new_state
[16] = {
35 /* current state: new state: action: */
36 /* (Invalid) */ TCP_CLOSE
,
37 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
38 /* TCP_SYN_SENT */ TCP_SYN_SENT
,
39 /* TCP_SYN_RECV */ TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
40 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1
,
41 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2
,
42 /* TCP_TIME_WAIT */ TCP_CLOSE
,
43 /* TCP_CLOSE */ TCP_CLOSE
,
44 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK
| TCP_ACTION_FIN
,
45 /* TCP_LAST_ACK */ TCP_LAST_ACK
,
46 /* TCP_LISTEN */ TCP_CLOSE
,
47 /* TCP_CLOSING */ TCP_CLOSING
,
50 static struct chtls_sock
*chtls_sock_create(struct chtls_dev
*cdev
)
52 struct chtls_sock
*csk
= kzalloc(sizeof(*csk
), GFP_ATOMIC
);
57 csk
->txdata_skb_cache
= alloc_skb(TXDATA_SKB_LEN
, GFP_ATOMIC
);
58 if (!csk
->txdata_skb_cache
) {
63 kref_init(&csk
->kref
);
65 skb_queue_head_init(&csk
->txq
);
66 csk
->wr_skb_head
= NULL
;
67 csk
->wr_skb_tail
= NULL
;
70 csk
->tlshws
.txkey
= -1;
71 csk
->tlshws
.rxkey
= -1;
72 csk
->tlshws
.mfs
= TLS_MFS
;
73 skb_queue_head_init(&csk
->tlshws
.sk_recv_queue
);
77 static void chtls_sock_release(struct kref
*ref
)
79 struct chtls_sock
*csk
=
80 container_of(ref
, struct chtls_sock
, kref
);
85 static struct net_device
*chtls_ipv4_netdev(struct chtls_dev
*cdev
,
88 struct net_device
*ndev
= cdev
->ports
[0];
90 if (likely(!inet_sk(sk
)->inet_rcv_saddr
))
93 ndev
= ip_dev_find(&init_net
, inet_sk(sk
)->inet_rcv_saddr
);
97 if (is_vlan_dev(ndev
))
98 return vlan_dev_real_dev(ndev
);
102 static void assign_rxopt(struct sock
*sk
, unsigned int opt
)
104 const struct chtls_dev
*cdev
;
105 struct chtls_sock
*csk
;
108 csk
= rcu_dereference_sk_user_data(sk
);
112 tp
->tcp_header_len
= sizeof(struct tcphdr
);
113 tp
->rx_opt
.mss_clamp
= cdev
->mtus
[TCPOPT_MSS_G(opt
)] - 40;
114 tp
->mss_cache
= tp
->rx_opt
.mss_clamp
;
115 tp
->rx_opt
.tstamp_ok
= TCPOPT_TSTAMP_G(opt
);
116 tp
->rx_opt
.snd_wscale
= TCPOPT_SACK_G(opt
);
117 tp
->rx_opt
.wscale_ok
= TCPOPT_WSCALE_OK_G(opt
);
118 SND_WSCALE(tp
) = TCPOPT_SND_WSCALE_G(opt
);
119 if (!tp
->rx_opt
.wscale_ok
)
120 tp
->rx_opt
.rcv_wscale
= 0;
121 if (tp
->rx_opt
.tstamp_ok
) {
122 tp
->tcp_header_len
+= TCPOLEN_TSTAMP_ALIGNED
;
123 tp
->rx_opt
.mss_clamp
-= TCPOLEN_TSTAMP_ALIGNED
;
124 } else if (csk
->opt2
& TSTAMPS_EN_F
) {
125 csk
->opt2
&= ~TSTAMPS_EN_F
;
126 csk
->mtu_idx
= TCPOPT_MSS_G(opt
);
130 static void chtls_purge_receive_queue(struct sock
*sk
)
134 while ((skb
= __skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
135 skb_dst_set(skb
, (void *)NULL
);
140 static void chtls_purge_write_queue(struct sock
*sk
)
142 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
145 while ((skb
= __skb_dequeue(&csk
->txq
))) {
146 sk
->sk_wmem_queued
-= skb
->truesize
;
151 static void chtls_purge_recv_queue(struct sock
*sk
)
153 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
154 struct chtls_hws
*tlsk
= &csk
->tlshws
;
157 while ((skb
= __skb_dequeue(&tlsk
->sk_recv_queue
)) != NULL
) {
158 skb_dst_set(skb
, NULL
);
163 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
165 struct cpl_abort_req
*req
= cplhdr(skb
);
166 struct chtls_dev
*cdev
;
168 cdev
= (struct chtls_dev
*)handle
;
169 req
->cmd
= CPL_ABORT_NO_RST
;
170 cxgb4_ofld_send(cdev
->lldi
->ports
[0], skb
);
173 static struct sk_buff
*alloc_ctrl_skb(struct sk_buff
*skb
, int len
)
175 if (likely(skb
&& !skb_shared(skb
) && !skb_cloned(skb
))) {
177 refcount_add(2, &skb
->users
);
179 skb
= alloc_skb(len
, GFP_KERNEL
| __GFP_NOFAIL
);
184 static void chtls_send_abort(struct sock
*sk
, int mode
, struct sk_buff
*skb
)
186 struct cpl_abort_req
*req
;
187 struct chtls_sock
*csk
;
190 csk
= rcu_dereference_sk_user_data(sk
);
194 skb
= alloc_ctrl_skb(csk
->txdata_skb_cache
, sizeof(*req
));
196 req
= (struct cpl_abort_req
*)skb_put(skb
, sizeof(*req
));
197 INIT_TP_WR_CPL(req
, CPL_ABORT_REQ
, csk
->tid
);
198 skb_set_queue_mapping(skb
, (csk
->txq_idx
<< 1) | CPL_PRIORITY_DATA
);
199 req
->rsvd0
= htonl(tp
->snd_nxt
);
200 req
->rsvd1
= !csk_flag_nochk(csk
, CSK_TX_DATA_SENT
);
202 t4_set_arp_err_handler(skb
, csk
->cdev
, abort_arp_failure
);
203 send_or_defer(sk
, tp
, skb
, mode
== CPL_ABORT_SEND_RST
);
206 static void chtls_send_reset(struct sock
*sk
, int mode
, struct sk_buff
*skb
)
208 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
210 if (unlikely(csk_flag_nochk(csk
, CSK_ABORT_SHUTDOWN
) ||
212 if (sk
->sk_state
== TCP_SYN_RECV
)
213 csk_set_flag(csk
, CSK_RST_ABORTED
);
217 if (!csk_flag_nochk(csk
, CSK_TX_DATA_SENT
)) {
218 struct tcp_sock
*tp
= tcp_sk(sk
);
220 if (send_tx_flowc_wr(sk
, 0, tp
->snd_nxt
, tp
->rcv_nxt
) < 0)
221 WARN_ONCE(1, "send tx flowc error");
222 csk_set_flag(csk
, CSK_TX_DATA_SENT
);
225 csk_set_flag(csk
, CSK_ABORT_RPL_PENDING
);
226 chtls_purge_write_queue(sk
);
228 csk_set_flag(csk
, CSK_ABORT_SHUTDOWN
);
229 if (sk
->sk_state
!= TCP_SYN_RECV
)
230 chtls_send_abort(sk
, mode
, skb
);
239 static void release_tcp_port(struct sock
*sk
)
241 if (inet_csk(sk
)->icsk_bind_hash
)
245 static void tcp_uncork(struct sock
*sk
)
247 struct tcp_sock
*tp
= tcp_sk(sk
);
249 if (tp
->nonagle
& TCP_NAGLE_CORK
) {
250 tp
->nonagle
&= ~TCP_NAGLE_CORK
;
251 chtls_tcp_push(sk
, 0);
255 static void chtls_close_conn(struct sock
*sk
)
257 struct cpl_close_con_req
*req
;
258 struct chtls_sock
*csk
;
263 len
= roundup(sizeof(struct cpl_close_con_req
), 16);
264 csk
= rcu_dereference_sk_user_data(sk
);
267 skb
= alloc_skb(len
, GFP_KERNEL
| __GFP_NOFAIL
);
268 req
= (struct cpl_close_con_req
*)__skb_put(skb
, len
);
270 req
->wr
.wr_hi
= htonl(FW_WR_OP_V(FW_TP_WR
) |
271 FW_WR_IMMDLEN_V(sizeof(*req
) -
273 req
->wr
.wr_mid
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)) |
274 FW_WR_FLOWID_V(tid
));
276 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, tid
));
279 skb_entail(sk
, skb
, ULPCB_FLAG_NO_HDR
| ULPCB_FLAG_NO_APPEND
);
280 if (sk
->sk_state
!= TCP_SYN_SENT
)
281 chtls_push_frames(csk
, 1);
285 * Perform a state transition during close and return the actions indicated
286 * for the transition. Do not make this function inline, the main reason
287 * it exists at all is to avoid multiple inlining of tcp_set_state.
289 static int make_close_transition(struct sock
*sk
)
291 int next
= (int)new_state
[sk
->sk_state
];
293 tcp_set_state(sk
, next
& TCP_STATE_MASK
);
294 return next
& TCP_ACTION_FIN
;
297 void chtls_close(struct sock
*sk
, long timeout
)
299 int data_lost
, prev_state
;
300 struct chtls_sock
*csk
;
302 csk
= rcu_dereference_sk_user_data(sk
);
305 sk
->sk_shutdown
|= SHUTDOWN_MASK
;
307 data_lost
= skb_queue_len(&sk
->sk_receive_queue
);
308 data_lost
|= skb_queue_len(&csk
->tlshws
.sk_recv_queue
);
309 chtls_purge_recv_queue(sk
);
310 chtls_purge_receive_queue(sk
);
312 if (sk
->sk_state
== TCP_CLOSE
) {
314 } else if (data_lost
|| sk
->sk_state
== TCP_SYN_SENT
) {
315 chtls_send_reset(sk
, CPL_ABORT_SEND_RST
, NULL
);
316 release_tcp_port(sk
);
318 } else if (sock_flag(sk
, SOCK_LINGER
) && !sk
->sk_lingertime
) {
319 sk
->sk_prot
->disconnect(sk
, 0);
320 } else if (make_close_transition(sk
)) {
321 chtls_close_conn(sk
);
325 sk_stream_wait_close(sk
, timeout
);
328 prev_state
= sk
->sk_state
;
337 if (prev_state
!= TCP_CLOSE
&& sk
->sk_state
== TCP_CLOSE
)
340 if (sk
->sk_state
== TCP_FIN_WAIT2
&& tcp_sk(sk
)->linger2
< 0 &&
341 !csk_flag(sk
, CSK_ABORT_SHUTDOWN
)) {
344 skb
= alloc_skb(sizeof(struct cpl_abort_req
), GFP_ATOMIC
);
346 chtls_send_reset(sk
, CPL_ABORT_SEND_RST
, skb
);
349 if (sk
->sk_state
== TCP_CLOSE
)
350 inet_csk_destroy_sock(sk
);
359 * Wait until a socket enters on of the given states.
361 static int wait_for_states(struct sock
*sk
, unsigned int states
)
363 DECLARE_WAITQUEUE(wait
, current
);
364 struct socket_wq _sk_wq
;
371 * We want this to work even when there's no associated struct socket.
372 * In that case we provide a temporary wait_queue_head_t.
375 init_waitqueue_head(&_sk_wq
.wait
);
376 _sk_wq
.fasync_list
= NULL
;
377 init_rcu_head_on_stack(&_sk_wq
.rcu
);
378 RCU_INIT_POINTER(sk
->sk_wq
, &_sk_wq
);
381 add_wait_queue(sk_sleep(sk
), &wait
);
382 while (!sk_in_state(sk
, states
)) {
383 if (!current_timeo
) {
387 if (signal_pending(current
)) {
388 err
= sock_intr_errno(current_timeo
);
391 set_current_state(TASK_UNINTERRUPTIBLE
);
393 if (!sk_in_state(sk
, states
))
394 current_timeo
= schedule_timeout(current_timeo
);
395 __set_current_state(TASK_RUNNING
);
398 remove_wait_queue(sk_sleep(sk
), &wait
);
400 if (rcu_dereference(sk
->sk_wq
) == &_sk_wq
)
405 int chtls_disconnect(struct sock
*sk
, int flags
)
411 chtls_purge_recv_queue(sk
);
412 chtls_purge_receive_queue(sk
);
413 chtls_purge_write_queue(sk
);
415 if (sk
->sk_state
!= TCP_CLOSE
) {
416 sk
->sk_err
= ECONNRESET
;
417 chtls_send_reset(sk
, CPL_ABORT_SEND_RST
, NULL
);
418 err
= wait_for_states(sk
, TCPF_CLOSE
);
422 chtls_purge_recv_queue(sk
);
423 chtls_purge_receive_queue(sk
);
424 tp
->max_window
= 0xFFFF << (tp
->rx_opt
.snd_wscale
);
425 return tcp_disconnect(sk
, flags
);
428 #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \
429 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)
430 void chtls_shutdown(struct sock
*sk
, int how
)
432 if ((how
& SEND_SHUTDOWN
) &&
433 sk_in_state(sk
, SHUTDOWN_ELIGIBLE_STATE
) &&
434 make_close_transition(sk
))
435 chtls_close_conn(sk
);
438 void chtls_destroy_sock(struct sock
*sk
)
440 struct chtls_sock
*csk
;
442 csk
= rcu_dereference_sk_user_data(sk
);
443 chtls_purge_recv_queue(sk
);
444 csk
->ulp_mode
= ULP_MODE_NONE
;
445 chtls_purge_write_queue(sk
);
447 kref_put(&csk
->kref
, chtls_sock_release
);
448 sk
->sk_prot
= &tcp_prot
;
449 sk
->sk_prot
->destroy(sk
);
452 static void reset_listen_child(struct sock
*child
)
454 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(child
);
457 skb
= alloc_ctrl_skb(csk
->txdata_skb_cache
,
458 sizeof(struct cpl_abort_req
));
460 chtls_send_reset(child
, CPL_ABORT_SEND_RST
, skb
);
462 INC_ORPHAN_COUNT(child
);
463 if (child
->sk_state
== TCP_CLOSE
)
464 inet_csk_destroy_sock(child
);
467 static void chtls_disconnect_acceptq(struct sock
*listen_sk
)
469 struct request_sock
**pprev
;
471 pprev
= ACCEPT_QUEUE(listen_sk
);
473 struct request_sock
*req
= *pprev
;
475 if (req
->rsk_ops
== &chtls_rsk_ops
) {
476 struct sock
*child
= req
->sk
;
478 *pprev
= req
->dl_next
;
479 sk_acceptq_removed(listen_sk
);
484 release_tcp_port(child
);
485 reset_listen_child(child
);
486 bh_unlock_sock(child
);
490 pprev
= &req
->dl_next
;
495 static int listen_hashfn(const struct sock
*sk
)
497 return ((unsigned long)sk
>> 10) & (LISTEN_INFO_HASH_SIZE
- 1);
500 static struct listen_info
*listen_hash_add(struct chtls_dev
*cdev
,
504 struct listen_info
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
507 int key
= listen_hashfn(sk
);
511 spin_lock(&cdev
->listen_lock
);
512 p
->next
= cdev
->listen_hash_tab
[key
];
513 cdev
->listen_hash_tab
[key
] = p
;
514 spin_unlock(&cdev
->listen_lock
);
519 static int listen_hash_find(struct chtls_dev
*cdev
,
522 struct listen_info
*p
;
526 key
= listen_hashfn(sk
);
528 spin_lock(&cdev
->listen_lock
);
529 for (p
= cdev
->listen_hash_tab
[key
]; p
; p
= p
->next
)
534 spin_unlock(&cdev
->listen_lock
);
538 static int listen_hash_del(struct chtls_dev
*cdev
,
541 struct listen_info
*p
, **prev
;
545 key
= listen_hashfn(sk
);
546 prev
= &cdev
->listen_hash_tab
[key
];
548 spin_lock(&cdev
->listen_lock
);
549 for (p
= *prev
; p
; prev
= &p
->next
, p
= p
->next
)
556 spin_unlock(&cdev
->listen_lock
);
560 static void cleanup_syn_rcv_conn(struct sock
*child
, struct sock
*parent
)
562 struct request_sock
*req
;
563 struct chtls_sock
*csk
;
565 csk
= rcu_dereference_sk_user_data(child
);
566 req
= csk
->passive_reap_next
;
568 reqsk_queue_removed(&inet_csk(parent
)->icsk_accept_queue
, req
);
569 __skb_unlink((struct sk_buff
*)&csk
->synq
, &csk
->listen_ctx
->synq
);
570 chtls_reqsk_free(req
);
571 csk
->passive_reap_next
= NULL
;
574 static void chtls_reset_synq(struct listen_ctx
*listen_ctx
)
576 struct sock
*listen_sk
= listen_ctx
->lsk
;
578 while (!skb_queue_empty(&listen_ctx
->synq
)) {
579 struct chtls_sock
*csk
=
580 container_of((struct synq
*)__skb_dequeue
581 (&listen_ctx
->synq
), struct chtls_sock
, synq
);
582 struct sock
*child
= csk
->sk
;
584 cleanup_syn_rcv_conn(child
, listen_sk
);
588 release_tcp_port(child
);
589 reset_listen_child(child
);
590 bh_unlock_sock(child
);
596 int chtls_listen_start(struct chtls_dev
*cdev
, struct sock
*sk
)
598 struct net_device
*ndev
;
599 struct listen_ctx
*ctx
;
600 struct adapter
*adap
;
601 struct port_info
*pi
;
605 if (sk
->sk_family
!= PF_INET
)
609 ndev
= chtls_ipv4_netdev(cdev
, sk
);
614 pi
= netdev_priv(ndev
);
616 if (!(adap
->flags
& CXGB4_FULL_INIT_DONE
))
619 if (listen_hash_find(cdev
, sk
) >= 0) /* already have it */
622 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
626 __module_get(THIS_MODULE
);
629 ctx
->state
= T4_LISTEN_START_PENDING
;
630 skb_queue_head_init(&ctx
->synq
);
632 stid
= cxgb4_alloc_stid(cdev
->tids
, sk
->sk_family
, ctx
);
637 if (!listen_hash_add(cdev
, sk
, stid
))
640 ret
= cxgb4_create_server(ndev
, stid
,
641 inet_sk(sk
)->inet_rcv_saddr
,
642 inet_sk(sk
)->inet_sport
, 0,
643 cdev
->lldi
->rxq_ids
[0]);
645 ret
= net_xmit_errno(ret
);
650 listen_hash_del(cdev
, sk
);
652 cxgb4_free_stid(cdev
->tids
, stid
, sk
->sk_family
);
656 module_put(THIS_MODULE
);
660 void chtls_listen_stop(struct chtls_dev
*cdev
, struct sock
*sk
)
662 struct listen_ctx
*listen_ctx
;
665 stid
= listen_hash_del(cdev
, sk
);
669 listen_ctx
= (struct listen_ctx
*)lookup_stid(cdev
->tids
, stid
);
670 chtls_reset_synq(listen_ctx
);
672 cxgb4_remove_server(cdev
->lldi
->ports
[0], stid
,
673 cdev
->lldi
->rxq_ids
[0], 0);
674 chtls_disconnect_acceptq(sk
);
677 static int chtls_pass_open_rpl(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
679 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
) + RSS_HDR
;
680 unsigned int stid
= GET_TID(rpl
);
681 struct listen_ctx
*listen_ctx
;
683 listen_ctx
= (struct listen_ctx
*)lookup_stid(cdev
->tids
, stid
);
685 return CPL_RET_BUF_DONE
;
687 if (listen_ctx
->state
== T4_LISTEN_START_PENDING
) {
688 listen_ctx
->state
= T4_LISTEN_STARTED
;
689 return CPL_RET_BUF_DONE
;
692 if (rpl
->status
!= CPL_ERR_NONE
) {
693 pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
695 return CPL_RET_BUF_DONE
;
697 cxgb4_free_stid(cdev
->tids
, stid
, listen_ctx
->lsk
->sk_family
);
698 sock_put(listen_ctx
->lsk
);
700 module_put(THIS_MODULE
);
705 static int chtls_close_listsrv_rpl(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
707 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
) + RSS_HDR
;
708 struct listen_ctx
*listen_ctx
;
713 data
= lookup_stid(cdev
->tids
, stid
);
714 listen_ctx
= (struct listen_ctx
*)data
;
716 if (rpl
->status
!= CPL_ERR_NONE
) {
717 pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
719 return CPL_RET_BUF_DONE
;
722 cxgb4_free_stid(cdev
->tids
, stid
, listen_ctx
->lsk
->sk_family
);
723 sock_put(listen_ctx
->lsk
);
725 module_put(THIS_MODULE
);
730 static void chtls_purge_wr_queue(struct sock
*sk
)
734 while ((skb
= dequeue_wr(sk
)) != NULL
)
738 static void chtls_release_resources(struct sock
*sk
)
740 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
741 struct chtls_dev
*cdev
= csk
->cdev
;
742 unsigned int tid
= csk
->tid
;
743 struct tid_info
*tids
;
749 kfree_skb(csk
->txdata_skb_cache
);
750 csk
->txdata_skb_cache
= NULL
;
752 if (csk
->wr_credits
!= csk
->wr_max_credits
) {
753 chtls_purge_wr_queue(sk
);
754 chtls_reset_wr_list(csk
);
757 if (csk
->l2t_entry
) {
758 cxgb4_l2t_release(csk
->l2t_entry
);
759 csk
->l2t_entry
= NULL
;
762 cxgb4_remove_tid(tids
, csk
->port_id
, tid
, sk
->sk_family
);
766 static void chtls_conn_done(struct sock
*sk
)
768 if (sock_flag(sk
, SOCK_DEAD
))
769 chtls_purge_receive_queue(sk
);
770 sk_wakeup_sleepers(sk
, 0);
774 static void do_abort_syn_rcv(struct sock
*child
, struct sock
*parent
)
777 * If the server is still open we clean up the child connection,
778 * otherwise the server already did the clean up as it was purging
779 * its SYN queue and the skb was just sitting in its backlog.
781 if (likely(parent
->sk_state
== TCP_LISTEN
)) {
782 cleanup_syn_rcv_conn(child
, parent
);
783 /* Without the below call to sock_orphan,
784 * we leak the socket resource with syn_flood test
785 * as inet_csk_destroy_sock will not be called
786 * in tcp_done since SOCK_DEAD flag is not set.
787 * Kernel handles this differently where new socket is
788 * created only after 3 way handshake is done.
791 percpu_counter_inc((child
)->sk_prot
->orphan_count
);
792 chtls_release_resources(child
);
793 chtls_conn_done(child
);
795 if (csk_flag(child
, CSK_RST_ABORTED
)) {
796 chtls_release_resources(child
);
797 chtls_conn_done(child
);
802 static void pass_open_abort(struct sock
*child
, struct sock
*parent
,
805 do_abort_syn_rcv(child
, parent
);
809 static void bl_pass_open_abort(struct sock
*lsk
, struct sk_buff
*skb
)
811 pass_open_abort(skb
->sk
, lsk
, skb
);
814 static void chtls_pass_open_arp_failure(struct sock
*sk
,
817 const struct request_sock
*oreq
;
818 struct chtls_sock
*csk
;
819 struct chtls_dev
*cdev
;
823 csk
= rcu_dereference_sk_user_data(sk
);
827 * If the connection is being aborted due to the parent listening
828 * socket going away there's nothing to do, the ABORT_REQ will close
831 if (csk_flag(sk
, CSK_ABORT_RPL_PENDING
)) {
836 oreq
= csk
->passive_reap_next
;
837 data
= lookup_stid(cdev
->tids
, oreq
->ts_recent
);
838 parent
= ((struct listen_ctx
*)data
)->lsk
;
840 bh_lock_sock(parent
);
841 if (!sock_owned_by_user(parent
)) {
842 pass_open_abort(sk
, parent
, skb
);
844 BLOG_SKB_CB(skb
)->backlog_rcv
= bl_pass_open_abort
;
845 __sk_add_backlog(parent
, skb
);
847 bh_unlock_sock(parent
);
850 static void chtls_accept_rpl_arp_failure(void *handle
,
853 struct sock
*sk
= (struct sock
*)handle
;
856 process_cpl_msg(chtls_pass_open_arp_failure
, sk
, skb
);
860 static unsigned int chtls_select_mss(const struct chtls_sock
*csk
,
862 struct cpl_pass_accept_req
*req
)
864 struct chtls_dev
*cdev
;
865 struct dst_entry
*dst
;
866 unsigned int tcpoptsz
;
867 unsigned int iphdrsz
;
868 unsigned int mtu_idx
;
873 mss
= ntohs(req
->tcpopt
.mss
);
875 dst
= __sk_dst_get(sk
);
880 iphdrsz
= sizeof(struct iphdr
) + sizeof(struct tcphdr
);
881 if (req
->tcpopt
.tstamp
)
882 tcpoptsz
+= round_up(TCPOLEN_TIMESTAMP
, 4);
884 tp
->advmss
= dst_metric_advmss(dst
);
885 if (USER_MSS(tp
) && tp
->advmss
> USER_MSS(tp
))
886 tp
->advmss
= USER_MSS(tp
);
887 if (tp
->advmss
> pmtu
- iphdrsz
)
888 tp
->advmss
= pmtu
- iphdrsz
;
889 if (mss
&& tp
->advmss
> mss
)
892 tp
->advmss
= cxgb4_best_aligned_mtu(cdev
->lldi
->mtus
,
894 tp
->advmss
- tcpoptsz
,
896 tp
->advmss
-= iphdrsz
;
898 inet_csk(sk
)->icsk_pmtu_cookie
= pmtu
;
902 static unsigned int select_rcv_wscale(int space
, int wscale_ok
, int win_clamp
)
906 if (space
> MAX_RCV_WND
)
908 if (win_clamp
&& win_clamp
< space
)
912 while (wscale
< 14 && (65535 << wscale
) < space
)
918 static void chtls_pass_accept_rpl(struct sk_buff
*skb
,
919 struct cpl_pass_accept_req
*req
,
923 struct cpl_t5_pass_accept_rpl
*rpl5
;
924 struct cxgb4_lld_info
*lldi
;
925 const struct tcphdr
*tcph
;
926 const struct tcp_sock
*tp
;
927 struct chtls_sock
*csk
;
935 csk
= sk
->sk_user_data
;
937 lldi
= csk
->cdev
->lldi
;
938 len
= roundup(sizeof(*rpl5
), 16);
940 rpl5
= __skb_put_zero(skb
, len
);
941 INIT_TP_WR(rpl5
, tid
);
943 OPCODE_TID(rpl5
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
945 csk
->mtu_idx
= chtls_select_mss(csk
, dst_mtu(__sk_dst_get(sk
)),
947 opt0
= TCAM_BYPASS_F
|
948 WND_SCALE_V(RCV_WSCALE(tp
)) |
949 MSS_IDX_V(csk
->mtu_idx
) |
950 L2T_IDX_V(csk
->l2t_entry
->idx
) |
951 NAGLE_V(!(tp
->nonagle
& TCP_NAGLE_OFF
)) |
952 TX_CHAN_V(csk
->tx_chan
) |
953 SMAC_SEL_V(csk
->smac_idx
) |
954 DSCP_V(csk
->tos
>> 2) |
955 ULP_MODE_V(ULP_MODE_TLS
) |
956 RCV_BUFSIZ_V(min(tp
->rcv_wnd
>> 10, RCV_BUFSIZ_M
));
958 opt2
= RX_CHANNEL_V(0) |
959 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(csk
->rss_qid
);
961 if (!is_t5(lldi
->adapter_type
))
962 opt2
|= RX_FC_DISABLE_F
;
963 if (req
->tcpopt
.tstamp
)
964 opt2
|= TSTAMPS_EN_F
;
965 if (req
->tcpopt
.sack
)
967 hlen
= ntohl(req
->hdr_len
);
969 tcph
= (struct tcphdr
*)((u8
*)(req
+ 1) +
970 T6_ETH_HDR_LEN_G(hlen
) + T6_IP_HDR_LEN_G(hlen
));
971 if (tcph
->ece
&& tcph
->cwr
)
972 opt2
|= CCTRL_ECN_V(1);
973 opt2
|= CONG_CNTRL_V(CONG_ALG_NEWRENO
);
975 opt2
|= T5_OPT_2_VALID_F
;
976 rpl5
->opt0
= cpu_to_be64(opt0
);
977 rpl5
->opt2
= cpu_to_be32(opt2
);
978 rpl5
->iss
= cpu_to_be32((prandom_u32() & ~7UL) - 1);
979 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, csk
->port_id
);
980 t4_set_arp_err_handler(skb
, sk
, chtls_accept_rpl_arp_failure
);
981 cxgb4_l2t_send(csk
->egress_dev
, skb
, csk
->l2t_entry
);
984 static void inet_inherit_port(struct inet_hashinfo
*hash_info
,
985 struct sock
*lsk
, struct sock
*newsk
)
988 __inet_inherit_port(lsk
, newsk
);
992 static int chtls_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
)
998 BLOG_SKB_CB(skb
)->backlog_rcv(sk
, skb
);
1002 static void chtls_set_tcp_window(struct chtls_sock
*csk
)
1004 struct net_device
*ndev
= csk
->egress_dev
;
1005 struct port_info
*pi
= netdev_priv(ndev
);
1006 unsigned int linkspeed
;
1009 linkspeed
= pi
->link_cfg
.speed
;
1010 scale
= linkspeed
/ SPEED_10000
;
1011 #define CHTLS_10G_RCVWIN (256 * 1024)
1012 csk
->rcv_win
= CHTLS_10G_RCVWIN
;
1014 csk
->rcv_win
*= scale
;
1015 #define CHTLS_10G_SNDWIN (256 * 1024)
1016 csk
->snd_win
= CHTLS_10G_SNDWIN
;
1018 csk
->snd_win
*= scale
;
1021 static struct sock
*chtls_recv_sock(struct sock
*lsk
,
1022 struct request_sock
*oreq
,
1024 const struct cpl_pass_accept_req
*req
,
1025 struct chtls_dev
*cdev
)
1027 struct inet_sock
*newinet
;
1028 const struct iphdr
*iph
;
1029 struct tls_context
*ctx
;
1030 struct net_device
*ndev
;
1031 struct chtls_sock
*csk
;
1032 struct dst_entry
*dst
;
1033 struct neighbour
*n
;
1034 struct tcp_sock
*tp
;
1040 iph
= (const struct iphdr
*)network_hdr
;
1041 newsk
= tcp_create_openreq_child(lsk
, oreq
, cdev
->askb
);
1045 dst
= inet_csk_route_child_sock(lsk
, newsk
, oreq
);
1049 n
= dst_neigh_lookup(dst
, &iph
->saddr
);
1056 port_id
= cxgb4_port_idx(ndev
);
1058 csk
= chtls_sock_create(cdev
);
1062 csk
->l2t_entry
= cxgb4_l2t_get(cdev
->lldi
->l2t
, n
, ndev
, 0);
1063 if (!csk
->l2t_entry
)
1066 newsk
->sk_user_data
= csk
;
1067 newsk
->sk_backlog_rcv
= chtls_backlog_rcv
;
1070 newinet
= inet_sk(newsk
);
1072 newinet
->inet_daddr
= iph
->saddr
;
1073 newinet
->inet_rcv_saddr
= iph
->daddr
;
1074 newinet
->inet_saddr
= iph
->daddr
;
1076 oreq
->ts_recent
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
1077 sk_setup_caps(newsk
, dst
);
1078 ctx
= tls_get_ctx(lsk
);
1079 newsk
->sk_destruct
= ctx
->sk_destruct
;
1081 csk
->passive_reap_next
= oreq
;
1082 csk
->tx_chan
= cxgb4_port_chan(ndev
);
1083 csk
->port_id
= port_id
;
1084 csk
->egress_dev
= ndev
;
1085 csk
->tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
1086 chtls_set_tcp_window(csk
);
1087 tp
->rcv_wnd
= csk
->rcv_win
;
1088 csk
->sndbuf
= csk
->snd_win
;
1089 csk
->ulp_mode
= ULP_MODE_TLS
;
1090 step
= cdev
->lldi
->nrxq
/ cdev
->lldi
->nchan
;
1091 csk
->rss_qid
= cdev
->lldi
->rxq_ids
[port_id
* step
];
1092 rxq_idx
= port_id
* step
;
1093 csk
->txq_idx
= (rxq_idx
< cdev
->lldi
->ntxq
) ? rxq_idx
:
1095 csk
->sndbuf
= newsk
->sk_sndbuf
;
1096 csk
->smac_idx
= ((struct port_info
*)netdev_priv(ndev
))->smt_idx
;
1097 RCV_WSCALE(tp
) = select_rcv_wscale(tcp_full_space(newsk
),
1099 ipv4
.sysctl_tcp_window_scaling
,
1102 inet_inherit_port(&tcp_hashinfo
, lsk
, newsk
);
1103 csk_set_flag(csk
, CSK_CONN_INLINE
);
1104 bh_unlock_sock(newsk
); /* tcp_create_openreq_child ->sk_clone_lock */
1108 chtls_sock_release(&csk
->kref
);
1112 inet_csk_prepare_forced_close(newsk
);
1115 chtls_reqsk_free(oreq
);
1120 * Populate a TID_RELEASE WR. The skb must be already propely sized.
1122 static void mk_tid_release(struct sk_buff
*skb
,
1123 unsigned int chan
, unsigned int tid
)
1125 struct cpl_tid_release
*req
;
1128 len
= roundup(sizeof(struct cpl_tid_release
), 16);
1129 req
= (struct cpl_tid_release
*)__skb_put(skb
, len
);
1130 memset(req
, 0, len
);
1131 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1132 INIT_TP_WR_CPL(req
, CPL_TID_RELEASE
, tid
);
1135 static int chtls_get_module(struct sock
*sk
)
1137 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1139 if (!try_module_get(icsk
->icsk_ulp_ops
->owner
))
1145 static void chtls_pass_accept_request(struct sock
*sk
,
1146 struct sk_buff
*skb
)
1148 struct cpl_t5_pass_accept_rpl
*rpl
;
1149 struct cpl_pass_accept_req
*req
;
1150 struct listen_ctx
*listen_ctx
;
1151 struct vlan_ethhdr
*vlan_eh
;
1152 struct request_sock
*oreq
;
1153 struct sk_buff
*reply_skb
;
1154 struct chtls_sock
*csk
;
1155 struct chtls_dev
*cdev
;
1156 struct tcphdr
*tcph
;
1165 __u8 ip_dsfield
; /* IPv4 tos or IPv6 dsfield */
1169 req
= cplhdr(skb
) + RSS_HDR
;
1171 cdev
= BLOG_SKB_CB(skb
)->cdev
;
1172 newsk
= lookup_tid(cdev
->tids
, tid
);
1173 stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
1175 pr_info("tid (%d) already in use\n", tid
);
1179 len
= roundup(sizeof(*rpl
), 16);
1180 reply_skb
= alloc_skb(len
, GFP_ATOMIC
);
1182 cxgb4_remove_tid(cdev
->tids
, 0, tid
, sk
->sk_family
);
1187 if (sk
->sk_state
!= TCP_LISTEN
)
1190 if (inet_csk_reqsk_queue_is_full(sk
))
1193 if (sk_acceptq_is_full(sk
))
1196 oreq
= inet_reqsk_alloc(&chtls_rsk_ops
, sk
, true);
1200 oreq
->rsk_rcv_wnd
= 0;
1201 oreq
->rsk_window_clamp
= 0;
1202 oreq
->cookie_ts
= 0;
1204 oreq
->ts_recent
= 0;
1206 eth_hdr_len
= T6_ETH_HDR_LEN_G(ntohl(req
->hdr_len
));
1207 if (eth_hdr_len
== ETH_HLEN
) {
1208 eh
= (struct ethhdr
*)(req
+ 1);
1209 iph
= (struct iphdr
*)(eh
+ 1);
1210 network_hdr
= (void *)(eh
+ 1);
1212 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
1213 iph
= (struct iphdr
*)(vlan_eh
+ 1);
1214 network_hdr
= (void *)(vlan_eh
+ 1);
1216 if (iph
->version
!= 0x4)
1219 tcph
= (struct tcphdr
*)(iph
+ 1);
1220 skb_set_network_header(skb
, (void *)iph
- (void *)req
);
1222 tcp_rsk(oreq
)->tfo_listener
= false;
1223 tcp_rsk(oreq
)->rcv_isn
= ntohl(tcph
->seq
);
1224 chtls_set_req_port(oreq
, tcph
->source
, tcph
->dest
);
1225 chtls_set_req_addr(oreq
, iph
->daddr
, iph
->saddr
);
1226 ip_dsfield
= ipv4_get_dsfield(iph
);
1227 if (req
->tcpopt
.wsf
<= 14 &&
1228 sock_net(sk
)->ipv4
.sysctl_tcp_window_scaling
) {
1229 inet_rsk(oreq
)->wscale_ok
= 1;
1230 inet_rsk(oreq
)->snd_wscale
= req
->tcpopt
.wsf
;
1232 inet_rsk(oreq
)->ir_iif
= sk
->sk_bound_dev_if
;
1233 th_ecn
= tcph
->ece
&& tcph
->cwr
;
1235 ect
= !INET_ECN_is_not_ect(ip_dsfield
);
1236 ecn_ok
= sock_net(sk
)->ipv4
.sysctl_tcp_ecn
;
1237 if ((!ect
&& ecn_ok
) || tcp_ca_needs_ecn(sk
))
1238 inet_rsk(oreq
)->ecn_ok
= 1;
1241 newsk
= chtls_recv_sock(sk
, oreq
, network_hdr
, req
, cdev
);
1245 if (chtls_get_module(newsk
))
1247 inet_csk_reqsk_queue_added(sk
);
1248 reply_skb
->sk
= newsk
;
1249 chtls_install_cpl_ops(newsk
);
1250 cxgb4_insert_tid(cdev
->tids
, newsk
, tid
, newsk
->sk_family
);
1251 csk
= rcu_dereference_sk_user_data(newsk
);
1252 listen_ctx
= (struct listen_ctx
*)lookup_stid(cdev
->tids
, stid
);
1253 csk
->listen_ctx
= listen_ctx
;
1254 __skb_queue_tail(&listen_ctx
->synq
, (struct sk_buff
*)&csk
->synq
);
1255 chtls_pass_accept_rpl(reply_skb
, req
, tid
);
1260 chtls_reqsk_free(oreq
);
1262 mk_tid_release(reply_skb
, 0, tid
);
1263 cxgb4_ofld_send(cdev
->lldi
->ports
[0], reply_skb
);
1268 * Handle a CPL_PASS_ACCEPT_REQ message.
1270 static int chtls_pass_accept_req(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1272 struct cpl_pass_accept_req
*req
= cplhdr(skb
) + RSS_HDR
;
1273 struct listen_ctx
*ctx
;
1279 stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
1282 data
= lookup_stid(cdev
->tids
, stid
);
1286 ctx
= (struct listen_ctx
*)data
;
1289 if (unlikely(tid_out_of_range(cdev
->tids
, tid
))) {
1290 pr_info("passive open TID %u too large\n", tid
);
1294 BLOG_SKB_CB(skb
)->cdev
= cdev
;
1295 process_cpl_msg(chtls_pass_accept_request
, lsk
, skb
);
1300 * Completes some final bits of initialization for just established connections
1301 * and changes their state to TCP_ESTABLISHED.
1303 * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1.
1305 static void make_established(struct sock
*sk
, u32 snd_isn
, unsigned int opt
)
1307 struct tcp_sock
*tp
= tcp_sk(sk
);
1309 tp
->pushed_seq
= snd_isn
;
1310 tp
->write_seq
= snd_isn
;
1311 tp
->snd_nxt
= snd_isn
;
1312 tp
->snd_una
= snd_isn
;
1313 inet_sk(sk
)->inet_id
= prandom_u32();
1314 assign_rxopt(sk
, opt
);
1316 if (tp
->rcv_wnd
> (RCV_BUFSIZ_M
<< 10))
1317 tp
->rcv_wup
-= tp
->rcv_wnd
- (RCV_BUFSIZ_M
<< 10);
1320 tcp_set_state(sk
, TCP_ESTABLISHED
);
1323 static void chtls_abort_conn(struct sock
*sk
, struct sk_buff
*skb
)
1325 struct sk_buff
*abort_skb
;
1327 abort_skb
= alloc_skb(sizeof(struct cpl_abort_req
), GFP_ATOMIC
);
1329 chtls_send_reset(sk
, CPL_ABORT_SEND_RST
, abort_skb
);
1332 static struct sock
*reap_list
;
1333 static DEFINE_SPINLOCK(reap_list_lock
);
1336 * Process the reap list.
1338 DECLARE_TASK_FUNC(process_reap_list
, task_param
)
1340 spin_lock_bh(&reap_list_lock
);
1342 struct sock
*sk
= reap_list
;
1343 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
1345 reap_list
= csk
->passive_reap_next
;
1346 csk
->passive_reap_next
= NULL
;
1347 spin_unlock(&reap_list_lock
);
1351 chtls_abort_conn(sk
, NULL
);
1353 if (sk
->sk_state
== TCP_CLOSE
)
1354 inet_csk_destroy_sock(sk
);
1357 spin_lock(&reap_list_lock
);
1359 spin_unlock_bh(&reap_list_lock
);
1362 static DECLARE_WORK(reap_task
, process_reap_list
);
1364 static void add_to_reap_list(struct sock
*sk
)
1366 struct chtls_sock
*csk
= sk
->sk_user_data
;
1370 release_tcp_port(sk
); /* release the port immediately */
1372 spin_lock(&reap_list_lock
);
1373 csk
->passive_reap_next
= reap_list
;
1375 if (!csk
->passive_reap_next
)
1376 schedule_work(&reap_task
);
1377 spin_unlock(&reap_list_lock
);
1382 static void add_pass_open_to_parent(struct sock
*child
, struct sock
*lsk
,
1383 struct chtls_dev
*cdev
)
1385 struct request_sock
*oreq
;
1386 struct chtls_sock
*csk
;
1388 if (lsk
->sk_state
!= TCP_LISTEN
)
1391 csk
= child
->sk_user_data
;
1392 oreq
= csk
->passive_reap_next
;
1393 csk
->passive_reap_next
= NULL
;
1395 reqsk_queue_removed(&inet_csk(lsk
)->icsk_accept_queue
, oreq
);
1396 __skb_unlink((struct sk_buff
*)&csk
->synq
, &csk
->listen_ctx
->synq
);
1398 if (sk_acceptq_is_full(lsk
)) {
1399 chtls_reqsk_free(oreq
);
1400 add_to_reap_list(child
);
1402 refcount_set(&oreq
->rsk_refcnt
, 1);
1403 inet_csk_reqsk_queue_add(lsk
, oreq
, child
);
1404 lsk
->sk_data_ready(lsk
);
1408 static void bl_add_pass_open_to_parent(struct sock
*lsk
, struct sk_buff
*skb
)
1410 struct sock
*child
= skb
->sk
;
1413 add_pass_open_to_parent(child
, lsk
, BLOG_SKB_CB(skb
)->cdev
);
1417 static int chtls_pass_establish(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1419 struct cpl_pass_establish
*req
= cplhdr(skb
) + RSS_HDR
;
1420 struct chtls_sock
*csk
;
1421 struct sock
*lsk
, *sk
;
1424 hwtid
= GET_TID(req
);
1425 sk
= lookup_tid(cdev
->tids
, hwtid
);
1427 return (CPL_RET_UNKNOWN_TID
| CPL_RET_BUF_DONE
);
1430 if (unlikely(sock_owned_by_user(sk
))) {
1436 csk
= sk
->sk_user_data
;
1437 csk
->wr_max_credits
= 64;
1438 csk
->wr_credits
= 64;
1439 csk
->wr_unacked
= 0;
1440 make_established(sk
, ntohl(req
->snd_isn
), ntohs(req
->tcp_opt
));
1441 stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
1442 sk
->sk_state_change(sk
);
1443 if (unlikely(sk
->sk_socket
))
1444 sk_wake_async(sk
, 0, POLL_OUT
);
1446 data
= lookup_stid(cdev
->tids
, stid
);
1447 lsk
= ((struct listen_ctx
*)data
)->lsk
;
1450 if (unlikely(skb_queue_empty(&csk
->listen_ctx
->synq
))) {
1451 /* removed from synq */
1452 bh_unlock_sock(lsk
);
1457 if (likely(!sock_owned_by_user(lsk
))) {
1459 add_pass_open_to_parent(sk
, lsk
, cdev
);
1462 BLOG_SKB_CB(skb
)->cdev
= cdev
;
1463 BLOG_SKB_CB(skb
)->backlog_rcv
=
1464 bl_add_pass_open_to_parent
;
1465 __sk_add_backlog(lsk
, skb
);
1467 bh_unlock_sock(lsk
);
1475 * Handle receipt of an urgent pointer.
1477 static void handle_urg_ptr(struct sock
*sk
, u32 urg_seq
)
1479 struct tcp_sock
*tp
= tcp_sk(sk
);
1482 if (tp
->urg_data
&& !after(urg_seq
, tp
->urg_seq
))
1483 return; /* duplicate pointer */
1486 if (tp
->urg_seq
== tp
->copied_seq
&& tp
->urg_data
&&
1487 !sock_flag(sk
, SOCK_URGINLINE
) &&
1488 tp
->copied_seq
!= tp
->rcv_nxt
) {
1489 struct sk_buff
*skb
= skb_peek(&sk
->sk_receive_queue
);
1492 if (skb
&& tp
->copied_seq
- ULP_SKB_CB(skb
)->seq
>= skb
->len
)
1493 chtls_free_skb(sk
, skb
);
1496 tp
->urg_data
= TCP_URG_NOTYET
;
1497 tp
->urg_seq
= urg_seq
;
1500 static void check_sk_callbacks(struct chtls_sock
*csk
)
1502 struct sock
*sk
= csk
->sk
;
1504 if (unlikely(sk
->sk_user_data
&&
1505 !csk_flag_nochk(csk
, CSK_CALLBACKS_CHKD
)))
1506 csk_set_flag(csk
, CSK_CALLBACKS_CHKD
);
1510 * Handles Rx data that arrives in a state where the socket isn't accepting
1513 static void handle_excess_rx(struct sock
*sk
, struct sk_buff
*skb
)
1515 if (!csk_flag(sk
, CSK_ABORT_SHUTDOWN
))
1516 chtls_abort_conn(sk
, skb
);
1521 static void chtls_recv_data(struct sock
*sk
, struct sk_buff
*skb
)
1523 struct cpl_rx_data
*hdr
= cplhdr(skb
) + RSS_HDR
;
1524 struct chtls_sock
*csk
;
1525 struct tcp_sock
*tp
;
1527 csk
= rcu_dereference_sk_user_data(sk
);
1530 if (unlikely(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1531 handle_excess_rx(sk
, skb
);
1535 ULP_SKB_CB(skb
)->seq
= ntohl(hdr
->seq
);
1536 ULP_SKB_CB(skb
)->psh
= hdr
->psh
;
1537 skb_ulp_mode(skb
) = ULP_MODE_NONE
;
1539 skb_reset_transport_header(skb
);
1540 __skb_pull(skb
, sizeof(*hdr
) + RSS_HDR
);
1542 __skb_trim(skb
, ntohs(hdr
->len
));
1544 if (unlikely(hdr
->urg
))
1545 handle_urg_ptr(sk
, tp
->rcv_nxt
+ ntohs(hdr
->urg
));
1546 if (unlikely(tp
->urg_data
== TCP_URG_NOTYET
&&
1547 tp
->urg_seq
- tp
->rcv_nxt
< skb
->len
))
1548 tp
->urg_data
= TCP_URG_VALID
|
1549 skb
->data
[tp
->urg_seq
- tp
->rcv_nxt
];
1551 if (unlikely(hdr
->dack_mode
!= csk
->delack_mode
)) {
1552 csk
->delack_mode
= hdr
->dack_mode
;
1553 csk
->delack_seq
= tp
->rcv_nxt
;
1556 tcp_hdr(skb
)->fin
= 0;
1557 tp
->rcv_nxt
+= skb
->len
;
1559 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1561 if (!sock_flag(sk
, SOCK_DEAD
)) {
1562 check_sk_callbacks(csk
);
1563 sk
->sk_data_ready(sk
);
1567 static int chtls_rx_data(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1569 struct cpl_rx_data
*req
= cplhdr(skb
) + RSS_HDR
;
1570 unsigned int hwtid
= GET_TID(req
);
1573 sk
= lookup_tid(cdev
->tids
, hwtid
);
1574 if (unlikely(!sk
)) {
1575 pr_err("can't find conn. for hwtid %u.\n", hwtid
);
1578 skb_dst_set(skb
, NULL
);
1579 process_cpl_msg(chtls_recv_data
, sk
, skb
);
1583 static void chtls_recv_pdu(struct sock
*sk
, struct sk_buff
*skb
)
1585 struct cpl_tls_data
*hdr
= cplhdr(skb
);
1586 struct chtls_sock
*csk
;
1587 struct chtls_hws
*tlsk
;
1588 struct tcp_sock
*tp
;
1590 csk
= rcu_dereference_sk_user_data(sk
);
1591 tlsk
= &csk
->tlshws
;
1594 if (unlikely(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1595 handle_excess_rx(sk
, skb
);
1599 ULP_SKB_CB(skb
)->seq
= ntohl(hdr
->seq
);
1600 ULP_SKB_CB(skb
)->flags
= 0;
1601 skb_ulp_mode(skb
) = ULP_MODE_TLS
;
1603 skb_reset_transport_header(skb
);
1604 __skb_pull(skb
, sizeof(*hdr
));
1607 CPL_TLS_DATA_LENGTH_G(ntohl(hdr
->length_pkd
)));
1609 if (unlikely(tp
->urg_data
== TCP_URG_NOTYET
&& tp
->urg_seq
-
1610 tp
->rcv_nxt
< skb
->len
))
1611 tp
->urg_data
= TCP_URG_VALID
|
1612 skb
->data
[tp
->urg_seq
- tp
->rcv_nxt
];
1614 tcp_hdr(skb
)->fin
= 0;
1615 tlsk
->pldlen
= CPL_TLS_DATA_LENGTH_G(ntohl(hdr
->length_pkd
));
1616 __skb_queue_tail(&tlsk
->sk_recv_queue
, skb
);
1619 static int chtls_rx_pdu(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1621 struct cpl_tls_data
*req
= cplhdr(skb
);
1622 unsigned int hwtid
= GET_TID(req
);
1625 sk
= lookup_tid(cdev
->tids
, hwtid
);
1626 if (unlikely(!sk
)) {
1627 pr_err("can't find conn. for hwtid %u.\n", hwtid
);
1630 skb_dst_set(skb
, NULL
);
1631 process_cpl_msg(chtls_recv_pdu
, sk
, skb
);
1635 static void chtls_set_hdrlen(struct sk_buff
*skb
, unsigned int nlen
)
1637 struct tlsrx_cmp_hdr
*tls_cmp_hdr
= cplhdr(skb
);
1639 skb
->hdr_len
= ntohs((__force __be16
)tls_cmp_hdr
->length
);
1640 tls_cmp_hdr
->length
= ntohs((__force __be16
)nlen
);
1643 static void chtls_rx_hdr(struct sock
*sk
, struct sk_buff
*skb
)
1645 struct tlsrx_cmp_hdr
*tls_hdr_pkt
;
1646 struct cpl_rx_tls_cmp
*cmp_cpl
;
1647 struct sk_buff
*skb_rec
;
1648 struct chtls_sock
*csk
;
1649 struct chtls_hws
*tlsk
;
1650 struct tcp_sock
*tp
;
1652 cmp_cpl
= cplhdr(skb
);
1653 csk
= rcu_dereference_sk_user_data(sk
);
1654 tlsk
= &csk
->tlshws
;
1657 ULP_SKB_CB(skb
)->seq
= ntohl(cmp_cpl
->seq
);
1658 ULP_SKB_CB(skb
)->flags
= 0;
1660 skb_reset_transport_header(skb
);
1661 __skb_pull(skb
, sizeof(*cmp_cpl
));
1662 tls_hdr_pkt
= (struct tlsrx_cmp_hdr
*)skb
->data
;
1663 if (tls_hdr_pkt
->res_to_mac_error
& TLSRX_HDR_PKT_ERROR_M
)
1664 tls_hdr_pkt
->type
= CONTENT_TYPE_ERROR
;
1666 __skb_trim(skb
, TLS_HEADER_LENGTH
);
1669 CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl
->pdulength_length
));
1671 ULP_SKB_CB(skb
)->flags
|= ULPCB_FLAG_TLS_HDR
;
1672 skb_rec
= __skb_dequeue(&tlsk
->sk_recv_queue
);
1674 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1676 chtls_set_hdrlen(skb
, tlsk
->pldlen
);
1678 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1679 __skb_queue_tail(&sk
->sk_receive_queue
, skb_rec
);
1682 if (!sock_flag(sk
, SOCK_DEAD
)) {
1683 check_sk_callbacks(csk
);
1684 sk
->sk_data_ready(sk
);
1688 static int chtls_rx_cmp(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1690 struct cpl_rx_tls_cmp
*req
= cplhdr(skb
);
1691 unsigned int hwtid
= GET_TID(req
);
1694 sk
= lookup_tid(cdev
->tids
, hwtid
);
1695 if (unlikely(!sk
)) {
1696 pr_err("can't find conn. for hwtid %u.\n", hwtid
);
1699 skb_dst_set(skb
, NULL
);
1700 process_cpl_msg(chtls_rx_hdr
, sk
, skb
);
1705 static void chtls_timewait(struct sock
*sk
)
1707 struct tcp_sock
*tp
= tcp_sk(sk
);
1710 tp
->rx_opt
.ts_recent_stamp
= ktime_get_seconds();
1712 tcp_time_wait(sk
, TCP_TIME_WAIT
, 0);
1715 static void chtls_peer_close(struct sock
*sk
, struct sk_buff
*skb
)
1717 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
1719 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
1720 sock_set_flag(sk
, SOCK_DONE
);
1722 switch (sk
->sk_state
) {
1724 case TCP_ESTABLISHED
:
1725 tcp_set_state(sk
, TCP_CLOSE_WAIT
);
1728 tcp_set_state(sk
, TCP_CLOSING
);
1731 chtls_release_resources(sk
);
1732 if (csk_flag_nochk(csk
, CSK_ABORT_RPL_PENDING
))
1733 chtls_conn_done(sk
);
1738 pr_info("cpl_peer_close in bad state %d\n", sk
->sk_state
);
1741 if (!sock_flag(sk
, SOCK_DEAD
)) {
1742 sk
->sk_state_change(sk
);
1743 /* Do not send POLL_HUP for half duplex close. */
1745 if ((sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
1746 sk
->sk_state
== TCP_CLOSE
)
1747 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_HUP
);
1749 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
1754 static void chtls_close_con_rpl(struct sock
*sk
, struct sk_buff
*skb
)
1756 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
) + RSS_HDR
;
1757 struct chtls_sock
*csk
;
1758 struct tcp_sock
*tp
;
1760 csk
= rcu_dereference_sk_user_data(sk
);
1763 tp
->snd_una
= ntohl(rpl
->snd_nxt
) - 1; /* exclude FIN */
1765 switch (sk
->sk_state
) {
1767 chtls_release_resources(sk
);
1768 if (csk_flag_nochk(csk
, CSK_ABORT_RPL_PENDING
))
1769 chtls_conn_done(sk
);
1774 chtls_release_resources(sk
);
1775 chtls_conn_done(sk
);
1778 tcp_set_state(sk
, TCP_FIN_WAIT2
);
1779 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1781 if (!sock_flag(sk
, SOCK_DEAD
))
1782 sk
->sk_state_change(sk
);
1783 else if (tcp_sk(sk
)->linger2
< 0 &&
1784 !csk_flag_nochk(csk
, CSK_ABORT_SHUTDOWN
))
1785 chtls_abort_conn(sk
, skb
);
1788 pr_info("close_con_rpl in bad state %d\n", sk
->sk_state
);
1793 static struct sk_buff
*get_cpl_skb(struct sk_buff
*skb
,
1794 size_t len
, gfp_t gfp
)
1796 if (likely(!skb_is_nonlinear(skb
) && !skb_cloned(skb
))) {
1797 WARN_ONCE(skb
->len
< len
, "skb alloc error");
1798 __skb_trim(skb
, len
);
1801 skb
= alloc_skb(len
, gfp
);
1803 __skb_put(skb
, len
);
1808 static void set_abort_rpl_wr(struct sk_buff
*skb
, unsigned int tid
,
1811 struct cpl_abort_rpl
*rpl
= cplhdr(skb
);
1813 INIT_TP_WR_CPL(rpl
, CPL_ABORT_RPL
, tid
);
1817 static void send_defer_abort_rpl(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
1819 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1820 struct sk_buff
*reply_skb
;
1822 reply_skb
= alloc_skb(sizeof(struct cpl_abort_rpl
),
1823 GFP_KERNEL
| __GFP_NOFAIL
);
1824 __skb_put(reply_skb
, sizeof(struct cpl_abort_rpl
));
1825 set_abort_rpl_wr(reply_skb
, GET_TID(req
),
1826 (req
->status
& CPL_ABORT_NO_RST
));
1827 set_wr_txq(reply_skb
, CPL_PRIORITY_DATA
, req
->status
>> 1);
1828 cxgb4_ofld_send(cdev
->lldi
->ports
[0], reply_skb
);
1833 * Add an skb to the deferred skb queue for processing from process context.
1835 static void t4_defer_reply(struct sk_buff
*skb
, struct chtls_dev
*cdev
,
1836 defer_handler_t handler
)
1838 DEFERRED_SKB_CB(skb
)->handler
= handler
;
1839 spin_lock_bh(&cdev
->deferq
.lock
);
1840 __skb_queue_tail(&cdev
->deferq
, skb
);
1841 if (skb_queue_len(&cdev
->deferq
) == 1)
1842 schedule_work(&cdev
->deferq_task
);
1843 spin_unlock_bh(&cdev
->deferq
.lock
);
1846 static void send_abort_rpl(struct sock
*sk
, struct sk_buff
*skb
,
1847 struct chtls_dev
*cdev
, int status
, int queue
)
1849 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1850 struct sk_buff
*reply_skb
;
1851 struct chtls_sock
*csk
;
1853 csk
= rcu_dereference_sk_user_data(sk
);
1855 reply_skb
= alloc_skb(sizeof(struct cpl_abort_rpl
),
1859 req
->status
= (queue
<< 1);
1860 t4_defer_reply(skb
, cdev
, send_defer_abort_rpl
);
1864 set_abort_rpl_wr(reply_skb
, GET_TID(req
), status
);
1867 set_wr_txq(reply_skb
, CPL_PRIORITY_DATA
, queue
);
1868 if (csk_conn_inline(csk
)) {
1869 struct l2t_entry
*e
= csk
->l2t_entry
;
1871 if (e
&& sk
->sk_state
!= TCP_SYN_RECV
) {
1872 cxgb4_l2t_send(csk
->egress_dev
, reply_skb
, e
);
1876 cxgb4_ofld_send(cdev
->lldi
->ports
[0], reply_skb
);
1879 static void chtls_send_abort_rpl(struct sock
*sk
, struct sk_buff
*skb
,
1880 struct chtls_dev
*cdev
,
1881 int status
, int queue
)
1883 struct cpl_abort_req_rss
*req
= cplhdr(skb
) + RSS_HDR
;
1884 struct sk_buff
*reply_skb
;
1885 struct chtls_sock
*csk
;
1888 csk
= rcu_dereference_sk_user_data(sk
);
1891 reply_skb
= get_cpl_skb(skb
, sizeof(struct cpl_abort_rpl
), gfp_any());
1893 req
->status
= (queue
<< 1) | status
;
1894 t4_defer_reply(skb
, cdev
, send_defer_abort_rpl
);
1898 set_abort_rpl_wr(reply_skb
, tid
, status
);
1899 set_wr_txq(reply_skb
, CPL_PRIORITY_DATA
, queue
);
1900 if (csk_conn_inline(csk
)) {
1901 struct l2t_entry
*e
= csk
->l2t_entry
;
1903 if (e
&& sk
->sk_state
!= TCP_SYN_RECV
) {
1904 cxgb4_l2t_send(csk
->egress_dev
, reply_skb
, e
);
1908 cxgb4_ofld_send(cdev
->lldi
->ports
[0], reply_skb
);
1913 * This is run from a listener's backlog to abort a child connection in
1914 * SYN_RCV state (i.e., one on the listener's SYN queue).
1916 static void bl_abort_syn_rcv(struct sock
*lsk
, struct sk_buff
*skb
)
1918 struct chtls_sock
*csk
;
1923 csk
= rcu_dereference_sk_user_data(child
);
1924 queue
= csk
->txq_idx
;
1927 do_abort_syn_rcv(child
, lsk
);
1928 send_abort_rpl(child
, skb
, BLOG_SKB_CB(skb
)->cdev
,
1929 CPL_ABORT_NO_RST
, queue
);
1932 static int abort_syn_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1934 const struct request_sock
*oreq
;
1935 struct listen_ctx
*listen_ctx
;
1936 struct chtls_sock
*csk
;
1937 struct chtls_dev
*cdev
;
1941 csk
= sk
->sk_user_data
;
1942 oreq
= csk
->passive_reap_next
;
1948 ctx
= lookup_stid(cdev
->tids
, oreq
->ts_recent
);
1952 listen_ctx
= (struct listen_ctx
*)ctx
;
1953 psk
= listen_ctx
->lsk
;
1956 if (!sock_owned_by_user(psk
)) {
1957 int queue
= csk
->txq_idx
;
1959 do_abort_syn_rcv(sk
, psk
);
1960 send_abort_rpl(sk
, skb
, cdev
, CPL_ABORT_NO_RST
, queue
);
1963 BLOG_SKB_CB(skb
)->backlog_rcv
= bl_abort_syn_rcv
;
1964 __sk_add_backlog(psk
, skb
);
1966 bh_unlock_sock(psk
);
1970 static void chtls_abort_req_rss(struct sock
*sk
, struct sk_buff
*skb
)
1972 const struct cpl_abort_req_rss
*req
= cplhdr(skb
) + RSS_HDR
;
1973 struct chtls_sock
*csk
= sk
->sk_user_data
;
1974 int rst_status
= CPL_ABORT_NO_RST
;
1975 int queue
= csk
->txq_idx
;
1977 if (is_neg_adv(req
->status
)) {
1978 if (sk
->sk_state
== TCP_SYN_RECV
)
1979 chtls_set_tcb_tflag(sk
, 0, 0);
1985 csk_reset_flag(csk
, CSK_ABORT_REQ_RCVD
);
1987 if (!csk_flag_nochk(csk
, CSK_ABORT_SHUTDOWN
) &&
1988 !csk_flag_nochk(csk
, CSK_TX_DATA_SENT
)) {
1989 struct tcp_sock
*tp
= tcp_sk(sk
);
1991 if (send_tx_flowc_wr(sk
, 0, tp
->snd_nxt
, tp
->rcv_nxt
) < 0)
1992 WARN_ONCE(1, "send_tx_flowc error");
1993 csk_set_flag(csk
, CSK_TX_DATA_SENT
);
1996 csk_set_flag(csk
, CSK_ABORT_SHUTDOWN
);
1998 if (!csk_flag_nochk(csk
, CSK_ABORT_RPL_PENDING
)) {
1999 sk
->sk_err
= ETIMEDOUT
;
2001 if (!sock_flag(sk
, SOCK_DEAD
))
2002 sk
->sk_error_report(sk
);
2004 if (sk
->sk_state
== TCP_SYN_RECV
&& !abort_syn_rcv(sk
, skb
))
2007 chtls_release_resources(sk
);
2008 chtls_conn_done(sk
);
2011 chtls_send_abort_rpl(sk
, skb
, csk
->cdev
, rst_status
, queue
);
2014 static void chtls_abort_rpl_rss(struct sock
*sk
, struct sk_buff
*skb
)
2016 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
) + RSS_HDR
;
2017 struct chtls_sock
*csk
;
2018 struct chtls_dev
*cdev
;
2020 csk
= rcu_dereference_sk_user_data(sk
);
2023 if (csk_flag_nochk(csk
, CSK_ABORT_RPL_PENDING
)) {
2024 csk_reset_flag(csk
, CSK_ABORT_RPL_PENDING
);
2025 if (!csk_flag_nochk(csk
, CSK_ABORT_REQ_RCVD
)) {
2026 if (sk
->sk_state
== TCP_SYN_SENT
) {
2027 cxgb4_remove_tid(cdev
->tids
,
2033 chtls_release_resources(sk
);
2034 chtls_conn_done(sk
);
2040 static int chtls_conn_cpl(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
2042 struct cpl_peer_close
*req
= cplhdr(skb
) + RSS_HDR
;
2043 void (*fn
)(struct sock
*sk
, struct sk_buff
*skb
);
2044 unsigned int hwtid
= GET_TID(req
);
2048 opcode
= ((const struct rss_header
*)cplhdr(skb
))->opcode
;
2050 sk
= lookup_tid(cdev
->tids
, hwtid
);
2055 case CPL_PEER_CLOSE
:
2056 fn
= chtls_peer_close
;
2058 case CPL_CLOSE_CON_RPL
:
2059 fn
= chtls_close_con_rpl
;
2061 case CPL_ABORT_REQ_RSS
:
2062 fn
= chtls_abort_req_rss
;
2064 case CPL_ABORT_RPL_RSS
:
2065 fn
= chtls_abort_rpl_rss
;
2071 process_cpl_msg(fn
, sk
, skb
);
2079 static void chtls_rx_ack(struct sock
*sk
, struct sk_buff
*skb
)
2081 struct cpl_fw4_ack
*hdr
= cplhdr(skb
) + RSS_HDR
;
2082 struct chtls_sock
*csk
= sk
->sk_user_data
;
2083 struct tcp_sock
*tp
= tcp_sk(sk
);
2084 u32 credits
= hdr
->credits
;
2087 snd_una
= ntohl(hdr
->snd_una
);
2088 csk
->wr_credits
+= credits
;
2090 if (csk
->wr_unacked
> csk
->wr_max_credits
- csk
->wr_credits
)
2091 csk
->wr_unacked
= csk
->wr_max_credits
- csk
->wr_credits
;
2094 struct sk_buff
*pskb
= csk
->wr_skb_head
;
2097 if (unlikely(!pskb
)) {
2098 if (csk
->wr_nondata
)
2099 csk
->wr_nondata
-= credits
;
2102 csum
= (__force u32
)pskb
->csum
;
2103 if (unlikely(credits
< csum
)) {
2104 pskb
->csum
= (__force __wsum
)(csum
- credits
);
2111 if (hdr
->seq_vld
& CPL_FW4_ACK_FLAGS_SEQVAL
) {
2112 if (unlikely(before(snd_una
, tp
->snd_una
))) {
2117 if (tp
->snd_una
!= snd_una
) {
2118 tp
->snd_una
= snd_una
;
2119 tp
->rcv_tstamp
= tcp_time_stamp(tp
);
2120 if (tp
->snd_una
== tp
->snd_nxt
&&
2121 !csk_flag_nochk(csk
, CSK_TX_FAILOVER
))
2122 csk_reset_flag(csk
, CSK_TX_WAIT_IDLE
);
2126 if (hdr
->seq_vld
& CPL_FW4_ACK_FLAGS_CH
) {
2127 unsigned int fclen16
= roundup(failover_flowc_wr_len
, 16);
2129 csk
->wr_credits
-= fclen16
;
2130 csk_reset_flag(csk
, CSK_TX_WAIT_IDLE
);
2131 csk_reset_flag(csk
, CSK_TX_FAILOVER
);
2133 if (skb_queue_len(&csk
->txq
) && chtls_push_frames(csk
, 0))
2134 sk
->sk_write_space(sk
);
2139 static int chtls_wr_ack(struct chtls_dev
*cdev
, struct sk_buff
*skb
)
2141 struct cpl_fw4_ack
*rpl
= cplhdr(skb
) + RSS_HDR
;
2142 unsigned int hwtid
= GET_TID(rpl
);
2145 sk
= lookup_tid(cdev
->tids
, hwtid
);
2146 if (unlikely(!sk
)) {
2147 pr_err("can't find conn. for hwtid %u.\n", hwtid
);
2150 process_cpl_msg(chtls_rx_ack
, sk
, skb
);
2155 chtls_handler_func chtls_handlers
[NUM_CPL_CMDS
] = {
2156 [CPL_PASS_OPEN_RPL
] = chtls_pass_open_rpl
,
2157 [CPL_CLOSE_LISTSRV_RPL
] = chtls_close_listsrv_rpl
,
2158 [CPL_PASS_ACCEPT_REQ
] = chtls_pass_accept_req
,
2159 [CPL_PASS_ESTABLISH
] = chtls_pass_establish
,
2160 [CPL_RX_DATA
] = chtls_rx_data
,
2161 [CPL_TLS_DATA
] = chtls_rx_pdu
,
2162 [CPL_RX_TLS_CMP
] = chtls_rx_cmp
,
2163 [CPL_PEER_CLOSE
] = chtls_conn_cpl
,
2164 [CPL_CLOSE_CON_RPL
] = chtls_conn_cpl
,
2165 [CPL_ABORT_REQ_RSS
] = chtls_conn_cpl
,
2166 [CPL_ABORT_RPL_RSS
] = chtls_conn_cpl
,
2167 [CPL_FW4_ACK
] = chtls_wr_ack
,