2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
12 #include <crypto/aes.h>
13 #include <crypto/algapi.h>
14 #include <crypto/hash.h>
15 #include <crypto/sha.h>
16 #include <crypto/authenc.h>
17 #include <crypto/ctr.h>
18 #include <crypto/gf128mul.h>
19 #include <crypto/internal/aead.h>
20 #include <crypto/null.h>
21 #include <crypto/internal/skcipher.h>
22 #include <crypto/aead.h>
23 #include <crypto/scatterwalk.h>
24 #include <crypto/internal/hash.h>
25 #include <linux/tls.h>
31 #include "cxgb4_uld.h"
33 #include "chcr_algo.h"
34 #include "chcr_core.h"
35 #include "chcr_crypto.h"
37 #define MAX_IVS_PAGE 256
38 #define TLS_KEY_CONTEXT_SZ 64
39 #define CIPHER_BLOCK_SIZE 16
40 #define GCM_TAG_SIZE 16
41 #define KEY_ON_MEM_SZ 16
42 #define AEAD_EXPLICIT_DATA_SIZE 8
43 #define TLS_HEADER_LENGTH 5
44 #define SCMD_CIPH_MODE_AES_GCM 2
45 /* Any MFS size should work and come from openssl */
48 #define RSS_HDR sizeof(struct rss_header)
49 #define TLS_WR_CPL_LEN \
50 (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo))
53 CHTLS_KEY_CONTEXT_DSGL
,
54 CHTLS_KEY_CONTEXT_IMM
,
55 CHTLS_KEY_CONTEXT_DDR
,
63 /* Flags for return value of CPL message handlers */
65 CPL_RET_BUF_DONE
= 1, /* buffer processing done */
66 CPL_RET_BAD_MSG
= 2, /* bad CPL message */
67 CPL_RET_UNKNOWN_TID
= 4 /* unexpected unknown TID */
70 #define LISTEN_INFO_HASH_SIZE 32
71 #define RSPQ_HASH_BITS 5
73 struct listen_info
*next
; /* Link to next entry */
74 struct sock
*sk
; /* The listening socket */
75 unsigned int stid
; /* The server TID */
79 T4_LISTEN_START_PENDING
,
84 CSK_CALLBACKS_CHKD
, /* socket callbacks have been sanitized */
85 CSK_ABORT_REQ_RCVD
, /* received one ABORT_REQ_RSS message */
86 CSK_TX_MORE_DATA
, /* sending ULP data; don't set SHOVE bit */
87 CSK_TX_WAIT_IDLE
, /* suspend Tx until in-flight data is ACKed */
88 CSK_ABORT_SHUTDOWN
, /* shouldn't send more abort requests */
89 CSK_ABORT_RPL_PENDING
, /* expecting an abort reply */
90 CSK_CLOSE_CON_REQUESTED
,/* we've sent a close_conn_req */
91 CSK_TX_DATA_SENT
, /* sent a TX_DATA WR on this connection */
92 CSK_TX_FAILOVER
, /* Tx traffic failing over */
93 CSK_UPDATE_RCV_WND
, /* Need to update rcv window */
94 CSK_RST_ABORTED
, /* outgoing RST was aborted */
95 CSK_TLS_HANDSHK
, /* TLS Handshake */
96 CSK_CONN_INLINE
, /* Connection on HW */
99 enum chtls_cdev_state
{
100 CHTLS_CDEV_STATE_UP
= 1
105 struct chtls_dev
*cdev
;
106 struct sk_buff_head synq
;
113 unsigned int available
;
115 spinlock_t lock
; /* lock for key id request from map */
124 struct tls_device tlsdev
;
125 struct list_head list
;
126 struct cxgb4_lld_info
*lldi
;
127 struct pci_dev
*pdev
;
128 struct listen_info
*listen_hash_tab
[LISTEN_INFO_HASH_SIZE
];
129 spinlock_t listen_lock
; /* lock for listen list */
130 struct net_device
**ports
;
131 struct tid_info
*tids
;
133 const unsigned short *mtus
;
135 struct idr hwtid_idr
;
138 spinlock_t idr_lock ____cacheline_aligned_in_smp
;
140 struct net_device
*egr_dev
[NCHAN
* 2];
141 struct sk_buff
*rspq_skb_cache
[1 << RSPQ_HASH_BITS
];
142 struct sk_buff
*askb
;
144 struct sk_buff_head deferq
;
145 struct work_struct deferq_task
;
147 struct list_head list_node
;
148 struct list_head rcu_node
;
149 struct list_head na_node
;
150 unsigned int send_page_order
;
153 unsigned int cdev_state
;
157 struct sk_buff_head sk_recv_queue
;
178 struct tls_scmd scmd
;
179 struct tls12_crypto_info_aes_gcm_128 crypto_info
;
184 struct chtls_dev
*cdev
;
185 struct l2t_entry
*l2t_entry
; /* pointer to the L2T entry */
186 struct net_device
*egress_dev
; /* TX_CHAN for act open retry */
188 struct sk_buff_head txq
;
189 struct sk_buff
*wr_skb_head
;
190 struct sk_buff
*wr_skb_tail
;
191 struct sk_buff
*ctrl_skb_cache
;
192 struct sk_buff
*txdata_skb_cache
; /* abort path messages */
200 u32 hwtid
; /* TCP Control Block ID */
211 u32 mtu_idx
; /* MTU table index */
219 void *passive_reap_next
; /* placeholder for passive */
220 struct chtls_hws tlshws
;
222 struct sk_buff
*next
;
223 struct sk_buff
*prev
;
225 struct listen_ctx
*listen_ctx
;
234 struct tlsrx_cmp_hdr
{
244 /* res_to_mac_error fields */
245 #define TLSRX_HDR_PKT_INT_ERROR_S 4
246 #define TLSRX_HDR_PKT_INT_ERROR_M 0x1
247 #define TLSRX_HDR_PKT_INT_ERROR_V(x) \
248 ((x) << TLSRX_HDR_PKT_INT_ERROR_S)
249 #define TLSRX_HDR_PKT_INT_ERROR_G(x) \
250 (((x) >> TLSRX_HDR_PKT_INT_ERROR_S) & TLSRX_HDR_PKT_INT_ERROR_M)
251 #define TLSRX_HDR_PKT_INT_ERROR_F TLSRX_HDR_PKT_INT_ERROR_V(1U)
253 #define TLSRX_HDR_PKT_SPP_ERROR_S 3
254 #define TLSRX_HDR_PKT_SPP_ERROR_M 0x1
255 #define TLSRX_HDR_PKT_SPP_ERROR_V(x) ((x) << TLSRX_HDR_PKT_SPP_ERROR)
256 #define TLSRX_HDR_PKT_SPP_ERROR_G(x) \
257 (((x) >> TLSRX_HDR_PKT_SPP_ERROR_S) & TLSRX_HDR_PKT_SPP_ERROR_M)
258 #define TLSRX_HDR_PKT_SPP_ERROR_F TLSRX_HDR_PKT_SPP_ERROR_V(1U)
260 #define TLSRX_HDR_PKT_CCDX_ERROR_S 2
261 #define TLSRX_HDR_PKT_CCDX_ERROR_M 0x1
262 #define TLSRX_HDR_PKT_CCDX_ERROR_V(x) ((x) << TLSRX_HDR_PKT_CCDX_ERROR_S)
263 #define TLSRX_HDR_PKT_CCDX_ERROR_G(x) \
264 (((x) >> TLSRX_HDR_PKT_CCDX_ERROR_S) & TLSRX_HDR_PKT_CCDX_ERROR_M)
265 #define TLSRX_HDR_PKT_CCDX_ERROR_F TLSRX_HDR_PKT_CCDX_ERROR_V(1U)
267 #define TLSRX_HDR_PKT_PAD_ERROR_S 1
268 #define TLSRX_HDR_PKT_PAD_ERROR_M 0x1
269 #define TLSRX_HDR_PKT_PAD_ERROR_V(x) ((x) << TLSRX_HDR_PKT_PAD_ERROR_S)
270 #define TLSRX_HDR_PKT_PAD_ERROR_G(x) \
271 (((x) >> TLSRX_HDR_PKT_PAD_ERROR_S) & TLSRX_HDR_PKT_PAD_ERROR_M)
272 #define TLSRX_HDR_PKT_PAD_ERROR_F TLSRX_HDR_PKT_PAD_ERROR_V(1U)
274 #define TLSRX_HDR_PKT_MAC_ERROR_S 0
275 #define TLSRX_HDR_PKT_MAC_ERROR_M 0x1
276 #define TLSRX_HDR_PKT_MAC_ERROR_V(x) ((x) << TLSRX_HDR_PKT_MAC_ERROR)
277 #define TLSRX_HDR_PKT_MAC_ERROR_G(x) \
278 (((x) >> S_TLSRX_HDR_PKT_MAC_ERROR_S) & TLSRX_HDR_PKT_MAC_ERROR_M)
279 #define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U)
281 #define TLSRX_HDR_PKT_ERROR_M 0x1F
282 #define CONTENT_TYPE_ERROR 0x7F
286 __be32 len16
; /* command length */
287 __be32 dlen
; /* data length in 32-byte units */
295 u8 reneg_to_write_rx
;
301 struct tls_key_wr wr
;
302 struct ulp_mem_rw req
;
303 struct ulptx_idata sc_imm
;
307 * This lives in skb->cb and is used to chain WRs in a linked list.
310 struct l2t_skb_cb l2t
; /* reserve space for l2t CB */
311 struct sk_buff
*next_wr
; /* next write request */
314 /* Per-skb backlog handler. Run when a socket's backlog is processed. */
316 void (*backlog_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
317 struct chtls_dev
*cdev
;
321 * Similar to tcp_skb_cb but with ULP elements added to support TLS,
325 struct wr_skb_cb wr
; /* reserve space for write request */
326 u16 flags
; /* TCP-like flags */
328 u8 ulp_mode
; /* ULP mode/submode of sk_buff */
329 u32 seq
; /* TCP sequence number */
330 union { /* ULP-specific fields */
339 #define ULP_SKB_CB(skb) ((struct ulp_skb_cb *)&((skb)->cb[0]))
340 #define BLOG_SKB_CB(skb) ((struct blog_skb_cb *)(skb)->cb)
343 * Flags for ulp_skb_cb.flags.
346 ULPCB_FLAG_NEED_HDR
= 1 << 0, /* packet needs a TX_DATA_WR header */
347 ULPCB_FLAG_NO_APPEND
= 1 << 1, /* don't grow this skb */
348 ULPCB_FLAG_BARRIER
= 1 << 2, /* set TX_WAIT_IDLE after sending */
349 ULPCB_FLAG_HOLD
= 1 << 3, /* skb not ready for Tx yet */
350 ULPCB_FLAG_COMPL
= 1 << 4, /* request WR completion */
351 ULPCB_FLAG_URG
= 1 << 5, /* urgent data */
352 ULPCB_FLAG_TLS_HDR
= 1 << 6, /* payload with tls hdr */
353 ULPCB_FLAG_NO_HDR
= 1 << 7, /* not a ofld wr */
356 /* The ULP mode/submode of an skbuff */
357 #define skb_ulp_mode(skb) (ULP_SKB_CB(skb)->ulp_mode)
358 #define TCP_PAGE(sk) (sk->sk_frag.page)
359 #define TCP_OFF(sk) (sk->sk_frag.offset)
361 static inline struct chtls_dev
*to_chtls_dev(struct tls_device
*tlsdev
)
363 return container_of(tlsdev
, struct chtls_dev
, tlsdev
);
366 static inline void csk_set_flag(struct chtls_sock
*csk
,
369 __set_bit(flag
, &csk
->flags
);
372 static inline void csk_reset_flag(struct chtls_sock
*csk
,
375 __clear_bit(flag
, &csk
->flags
);
378 static inline bool csk_conn_inline(const struct chtls_sock
*csk
)
380 return test_bit(CSK_CONN_INLINE
, &csk
->flags
);
383 static inline int csk_flag(const struct sock
*sk
, enum csk_flags flag
)
385 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
387 if (!csk_conn_inline(csk
))
389 return test_bit(flag
, &csk
->flags
);
392 static inline int csk_flag_nochk(const struct chtls_sock
*csk
,
395 return test_bit(flag
, &csk
->flags
);
398 static inline void *cplhdr(struct sk_buff
*skb
)
403 static inline int is_neg_adv(unsigned int status
)
405 return status
== CPL_ERR_RTX_NEG_ADVICE
||
406 status
== CPL_ERR_KEEPALV_NEG_ADVICE
||
407 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
410 static inline void process_cpl_msg(void (*fn
)(struct sock
*, struct sk_buff
*),
414 skb_reset_mac_header(skb
);
415 skb_reset_network_header(skb
);
416 skb_reset_transport_header(skb
);
419 if (unlikely(sock_owned_by_user(sk
))) {
420 BLOG_SKB_CB(skb
)->backlog_rcv
= fn
;
421 __sk_add_backlog(sk
, skb
);
428 static inline void chtls_sock_free(struct kref
*ref
)
430 struct chtls_sock
*csk
= container_of(ref
, struct chtls_sock
,
435 static inline void __chtls_sock_put(const char *fn
, struct chtls_sock
*csk
)
437 kref_put(&csk
->kref
, chtls_sock_free
);
440 static inline void __chtls_sock_get(const char *fn
,
441 struct chtls_sock
*csk
)
443 kref_get(&csk
->kref
);
446 static inline void send_or_defer(struct sock
*sk
, struct tcp_sock
*tp
,
447 struct sk_buff
*skb
, int through_l2t
)
449 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
452 /* send through L2T */
453 cxgb4_l2t_send(csk
->egress_dev
, skb
, csk
->l2t_entry
);
456 cxgb4_ofld_send(csk
->egress_dev
, skb
);
460 typedef int (*chtls_handler_func
)(struct chtls_dev
*, struct sk_buff
*);
461 extern chtls_handler_func chtls_handlers
[NUM_CPL_CMDS
];
462 void chtls_install_cpl_ops(struct sock
*sk
);
463 int chtls_init_kmap(struct chtls_dev
*cdev
, struct cxgb4_lld_info
*lldi
);
464 void chtls_listen_stop(struct chtls_dev
*cdev
, struct sock
*sk
);
465 int chtls_listen_start(struct chtls_dev
*cdev
, struct sock
*sk
);
466 void chtls_close(struct sock
*sk
, long timeout
);
467 int chtls_disconnect(struct sock
*sk
, int flags
);
468 void chtls_shutdown(struct sock
*sk
, int how
);
469 void chtls_destroy_sock(struct sock
*sk
);
470 int chtls_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
);
471 int chtls_recvmsg(struct sock
*sk
, struct msghdr
*msg
,
472 size_t len
, int nonblock
, int flags
, int *addr_len
);
473 int chtls_sendpage(struct sock
*sk
, struct page
*page
,
474 int offset
, size_t size
, int flags
);
475 int send_tx_flowc_wr(struct sock
*sk
, int compl,
476 u32 snd_nxt
, u32 rcv_nxt
);
477 void chtls_tcp_push(struct sock
*sk
, int flags
);
478 int chtls_push_frames(struct chtls_sock
*csk
, int comp
);
479 int chtls_set_tcb_tflag(struct sock
*sk
, unsigned int bit_pos
, int val
);
480 int chtls_setkey(struct chtls_sock
*csk
, u32 keylen
, u32 mode
);
481 void skb_entail(struct sock
*sk
, struct sk_buff
*skb
, int flags
);
482 unsigned int keyid_to_addr(int start_addr
, int keyid
);
483 void free_tls_keyid(struct sock
*sk
);