1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2018 Chelsio Communications, Inc.
13 #define TCB_ULP_TYPE_W 0
14 #define TCB_ULP_TYPE_S 0
15 #define TCB_ULP_TYPE_M 0xfULL
16 #define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
19 #define TCB_ULP_RAW_W 0
20 #define TCB_ULP_RAW_S 4
21 #define TCB_ULP_RAW_M 0xffULL
22 #define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
24 #define TF_TLS_KEY_SIZE_S 7
25 #define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
27 #define TF_TLS_CONTROL_S 2
28 #define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
30 #define TF_TLS_ACTIVE_S 1
31 #define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
33 #define TF_TLS_ENABLE_S 0
34 #define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
36 #define TF_RX_QUIESCE_S 15
37 #define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
40 * Max receive window supported by HW in bytes. Only a small part of it can
41 * be set through option0, the rest needs to be set through RX_DATA_ACK.
43 #define MAX_RCV_WND ((1U << 27) - 1)
47 * Min receive window. We want it to be large enough to accommodate receive
48 * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
50 #define MIN_RCV_WND (24 * 1024U)
51 #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
53 /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
54 #define TX_HEADER_LEN \
55 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
56 #define TX_TLSHDR_LEN \
57 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
58 sizeof(struct sge_opaque_hdr))
59 #define TXDATA_SKB_LEN 128
62 CPL_TX_TLS_SFO_TYPE_CCS
,
63 CPL_TX_TLS_SFO_TYPE_ALERT
,
64 CPL_TX_TLS_SFO_TYPE_HANDSHAKE
,
65 CPL_TX_TLS_SFO_TYPE_DATA
,
66 CPL_TX_TLS_SFO_TYPE_HEARTBEAT
,
70 TLS_HDR_TYPE_CCS
= 20,
72 TLS_HDR_TYPE_HANDSHAKE
,
74 TLS_HDR_TYPE_HEARTBEAT
,
77 typedef void (*defer_handler_t
)(struct chtls_dev
*dev
, struct sk_buff
*skb
);
78 extern struct request_sock_ops chtls_rsk_ops
;
79 extern struct request_sock_ops chtls_rsk_opsv6
;
81 struct deferred_skb_cb
{
82 defer_handler_t handler
;
83 struct chtls_dev
*dev
;
86 #define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
87 #define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
88 #define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
89 #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
91 #define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
92 #define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
93 #define USER_MSS(tp) ((tp)->rx_opt.user_mss)
94 #define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
95 #define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
96 #define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
97 #define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
98 #define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
101 #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
102 #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
104 void chtls_defer_reply(struct sk_buff
*skb
, struct chtls_dev
*dev
,
105 defer_handler_t handler
);
108 * Returns true if the socket is in one of the supplied states.
110 static inline unsigned int sk_in_state(const struct sock
*sk
,
113 return states
& (1 << sk
->sk_state
);
116 static void chtls_rsk_destructor(struct request_sock
*req
)
121 static inline void chtls_init_rsk_ops(struct proto
*chtls_tcp_prot
,
122 struct request_sock_ops
*chtls_tcp_ops
,
123 struct proto
*tcp_prot
, int family
)
125 memset(chtls_tcp_ops
, 0, sizeof(*chtls_tcp_ops
));
126 chtls_tcp_ops
->family
= family
;
127 chtls_tcp_ops
->obj_size
= sizeof(struct tcp_request_sock
);
128 chtls_tcp_ops
->destructor
= chtls_rsk_destructor
;
129 chtls_tcp_ops
->slab
= tcp_prot
->rsk_prot
->slab
;
130 chtls_tcp_prot
->rsk_prot
= chtls_tcp_ops
;
133 static inline void chtls_reqsk_free(struct request_sock
*req
)
135 if (req
->rsk_listener
)
136 sock_put(req
->rsk_listener
);
137 kmem_cache_free(req
->rsk_ops
->slab
, req
);
140 #define DECLARE_TASK_FUNC(task, task_param) \
141 static void task(struct work_struct *task_param)
143 static inline void sk_wakeup_sleepers(struct sock
*sk
, bool interruptable
)
145 struct socket_wq
*wq
;
148 wq
= rcu_dereference(sk
->sk_wq
);
149 if (skwq_has_sleeper(wq
)) {
151 wake_up_interruptible(sk_sleep(sk
));
153 wake_up_all(sk_sleep(sk
));
158 static inline void chtls_set_req_port(struct request_sock
*oreq
,
159 __be16 source
, __be16 dest
)
161 inet_rsk(oreq
)->ir_rmt_port
= source
;
162 inet_rsk(oreq
)->ir_num
= ntohs(dest
);
165 static inline void chtls_set_req_addr(struct request_sock
*oreq
,
166 __be32 local_ip
, __be32 peer_ip
)
168 inet_rsk(oreq
)->ir_loc_addr
= local_ip
;
169 inet_rsk(oreq
)->ir_rmt_addr
= peer_ip
;
172 static inline void chtls_free_skb(struct sock
*sk
, struct sk_buff
*skb
)
174 skb_dst_set(skb
, NULL
);
175 __skb_unlink(skb
, &sk
->sk_receive_queue
);
179 static inline void chtls_kfree_skb(struct sock
*sk
, struct sk_buff
*skb
)
181 skb_dst_set(skb
, NULL
);
182 __skb_unlink(skb
, &sk
->sk_receive_queue
);
186 static inline void chtls_reset_wr_list(struct chtls_sock
*csk
)
188 csk
->wr_skb_head
= NULL
;
189 csk
->wr_skb_tail
= NULL
;
192 static inline void enqueue_wr(struct chtls_sock
*csk
, struct sk_buff
*skb
)
194 WR_SKB_CB(skb
)->next_wr
= NULL
;
198 if (!csk
->wr_skb_head
)
199 csk
->wr_skb_head
= skb
;
201 WR_SKB_CB(csk
->wr_skb_tail
)->next_wr
= skb
;
202 csk
->wr_skb_tail
= skb
;
205 static inline struct sk_buff
*dequeue_wr(struct sock
*sk
)
207 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
208 struct sk_buff
*skb
= NULL
;
210 skb
= csk
->wr_skb_head
;
213 /* Don't bother clearing the tail */
214 csk
->wr_skb_head
= WR_SKB_CB(skb
)->next_wr
;
215 WR_SKB_CB(skb
)->next_wr
= NULL
;