1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2018 Chelsio Communications, Inc.
13 #define TCB_ULP_TYPE_W 0
14 #define TCB_ULP_TYPE_S 0
15 #define TCB_ULP_TYPE_M 0xfULL
16 #define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
19 #define TCB_ULP_RAW_W 0
20 #define TCB_ULP_RAW_S 4
21 #define TCB_ULP_RAW_M 0xffULL
22 #define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
24 #define TF_TLS_KEY_SIZE_S 7
25 #define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
27 #define TF_TLS_CONTROL_S 2
28 #define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
30 #define TF_TLS_ACTIVE_S 1
31 #define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
33 #define TF_TLS_ENABLE_S 0
34 #define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
36 #define TF_RX_QUIESCE_S 15
37 #define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
40 * Max receive window supported by HW in bytes. Only a small part of it can
41 * be set through option0, the rest needs to be set through RX_DATA_ACK.
43 #define MAX_RCV_WND ((1U << 27) - 1)
47 * Min receive window. We want it to be large enough to accommodate receive
48 * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
50 #define MIN_RCV_WND (24 * 1024U)
51 #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
53 /* ulp_mem_io + ulptx_idata + payload + padding */
54 #define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
56 /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
57 #define TX_HEADER_LEN \
58 (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
59 #define TX_TLSHDR_LEN \
60 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
61 sizeof(struct sge_opaque_hdr))
62 #define TXDATA_SKB_LEN 128
65 CPL_TX_TLS_SFO_TYPE_CCS
,
66 CPL_TX_TLS_SFO_TYPE_ALERT
,
67 CPL_TX_TLS_SFO_TYPE_HANDSHAKE
,
68 CPL_TX_TLS_SFO_TYPE_DATA
,
69 CPL_TX_TLS_SFO_TYPE_HEARTBEAT
,
73 TLS_HDR_TYPE_CCS
= 20,
75 TLS_HDR_TYPE_HANDSHAKE
,
77 TLS_HDR_TYPE_HEARTBEAT
,
80 typedef void (*defer_handler_t
)(struct chtls_dev
*dev
, struct sk_buff
*skb
);
81 extern struct request_sock_ops chtls_rsk_ops
;
83 struct deferred_skb_cb
{
84 defer_handler_t handler
;
85 struct chtls_dev
*dev
;
88 #define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
89 #define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
90 #define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
91 #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
93 #define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
94 #define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
95 #define USER_MSS(tp) ((tp)->rx_opt.user_mss)
96 #define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
97 #define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
98 #define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
99 #define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
100 #define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
103 #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
104 #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
106 void chtls_defer_reply(struct sk_buff
*skb
, struct chtls_dev
*dev
,
107 defer_handler_t handler
);
110 * Returns true if the socket is in one of the supplied states.
112 static inline unsigned int sk_in_state(const struct sock
*sk
,
115 return states
& (1 << sk
->sk_state
);
118 static void chtls_rsk_destructor(struct request_sock
*req
)
123 static inline void chtls_init_rsk_ops(struct proto
*chtls_tcp_prot
,
124 struct request_sock_ops
*chtls_tcp_ops
,
125 struct proto
*tcp_prot
, int family
)
127 memset(chtls_tcp_ops
, 0, sizeof(*chtls_tcp_ops
));
128 chtls_tcp_ops
->family
= family
;
129 chtls_tcp_ops
->obj_size
= sizeof(struct tcp_request_sock
);
130 chtls_tcp_ops
->destructor
= chtls_rsk_destructor
;
131 chtls_tcp_ops
->slab
= tcp_prot
->rsk_prot
->slab
;
132 chtls_tcp_prot
->rsk_prot
= chtls_tcp_ops
;
135 static inline void chtls_reqsk_free(struct request_sock
*req
)
137 if (req
->rsk_listener
)
138 sock_put(req
->rsk_listener
);
139 kmem_cache_free(req
->rsk_ops
->slab
, req
);
142 #define DECLARE_TASK_FUNC(task, task_param) \
143 static void task(struct work_struct *task_param)
145 static inline void sk_wakeup_sleepers(struct sock
*sk
, bool interruptable
)
147 struct socket_wq
*wq
;
150 wq
= rcu_dereference(sk
->sk_wq
);
151 if (skwq_has_sleeper(wq
)) {
153 wake_up_interruptible(sk_sleep(sk
));
155 wake_up_all(sk_sleep(sk
));
160 static inline void chtls_set_req_port(struct request_sock
*oreq
,
161 __be16 source
, __be16 dest
)
163 inet_rsk(oreq
)->ir_rmt_port
= source
;
164 inet_rsk(oreq
)->ir_num
= ntohs(dest
);
167 static inline void chtls_set_req_addr(struct request_sock
*oreq
,
168 __be32 local_ip
, __be32 peer_ip
)
170 inet_rsk(oreq
)->ir_loc_addr
= local_ip
;
171 inet_rsk(oreq
)->ir_rmt_addr
= peer_ip
;
174 static inline void chtls_free_skb(struct sock
*sk
, struct sk_buff
*skb
)
176 skb_dst_set(skb
, NULL
);
177 __skb_unlink(skb
, &sk
->sk_receive_queue
);
181 static inline void chtls_kfree_skb(struct sock
*sk
, struct sk_buff
*skb
)
183 skb_dst_set(skb
, NULL
);
184 __skb_unlink(skb
, &sk
->sk_receive_queue
);
188 static inline void chtls_reset_wr_list(struct chtls_sock
*csk
)
190 csk
->wr_skb_head
= NULL
;
191 csk
->wr_skb_tail
= NULL
;
194 static inline void enqueue_wr(struct chtls_sock
*csk
, struct sk_buff
*skb
)
196 WR_SKB_CB(skb
)->next_wr
= NULL
;
200 if (!csk
->wr_skb_head
)
201 csk
->wr_skb_head
= skb
;
203 WR_SKB_CB(csk
->wr_skb_tail
)->next_wr
= skb
;
204 csk
->wr_skb_tail
= skb
;
207 static inline struct sk_buff
*dequeue_wr(struct sock
*sk
)
209 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
210 struct sk_buff
*skb
= NULL
;
212 skb
= csk
->wr_skb_head
;
215 /* Don't bother clearing the tail */
216 csk
->wr_skb_head
= WR_SKB_CB(skb
)->next_wr
;
217 WR_SKB_CB(skb
)->next_wr
= NULL
;