1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/workqueue.h>
11 #include <linux/skbuff.h>
12 #include <linux/timer.h>
13 #include <linux/notifier.h>
14 #include <linux/inetdevice.h>
16 #include <linux/tcp.h>
17 #include <linux/tls.h>
23 static void __set_tcb_field_direct(struct chtls_sock
*csk
,
24 struct cpl_set_tcb_field
*req
, u16 word
,
25 u64 mask
, u64 val
, u8 cookie
, int no_reply
)
27 struct ulptx_idata
*sc
;
29 INIT_TP_WR_CPL(req
, CPL_SET_TCB_FIELD
, csk
->tid
);
30 req
->wr
.wr_mid
|= htonl(FW_WR_FLOWID_V(csk
->tid
));
31 req
->reply_ctrl
= htons(NO_REPLY_V(no_reply
) |
32 QUEUENO_V(csk
->rss_qid
));
33 req
->word_cookie
= htons(TCB_WORD_V(word
) | TCB_COOKIE_V(cookie
));
34 req
->mask
= cpu_to_be64(mask
);
35 req
->val
= cpu_to_be64(val
);
36 sc
= (struct ulptx_idata
*)(req
+ 1);
37 sc
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP
));
41 static void __set_tcb_field(struct sock
*sk
, struct sk_buff
*skb
, u16 word
,
42 u64 mask
, u64 val
, u8 cookie
, int no_reply
)
44 struct cpl_set_tcb_field
*req
;
45 struct chtls_sock
*csk
;
46 struct ulptx_idata
*sc
;
49 wrlen
= roundup(sizeof(*req
) + sizeof(*sc
), 16);
50 csk
= rcu_dereference_sk_user_data(sk
);
52 req
= (struct cpl_set_tcb_field
*)__skb_put(skb
, wrlen
);
53 __set_tcb_field_direct(csk
, req
, word
, mask
, val
, cookie
, no_reply
);
54 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
58 * Send control message to HW, message go as immediate data and packet
59 * is freed immediately.
61 static int chtls_set_tcb_field(struct sock
*sk
, u16 word
, u64 mask
, u64 val
)
63 struct cpl_set_tcb_field
*req
;
64 unsigned int credits_needed
;
65 struct chtls_sock
*csk
;
66 struct ulptx_idata
*sc
;
71 wrlen
= roundup(sizeof(*req
) + sizeof(*sc
), 16);
73 skb
= alloc_skb(wrlen
, GFP_ATOMIC
);
77 credits_needed
= DIV_ROUND_UP(wrlen
, 16);
78 csk
= rcu_dereference_sk_user_data(sk
);
80 __set_tcb_field(sk
, skb
, word
, mask
, val
, 0, 1);
81 skb_set_queue_mapping(skb
, (csk
->txq_idx
<< 1) | CPL_PRIORITY_DATA
);
82 csk
->wr_credits
-= credits_needed
;
83 csk
->wr_unacked
+= credits_needed
;
85 ret
= cxgb4_ofld_send(csk
->egress_dev
, skb
);
88 return ret
< 0 ? ret
: 0;
92 * Set one of the t_flags bits in the TCB.
94 int chtls_set_tcb_tflag(struct sock
*sk
, unsigned int bit_pos
, int val
)
96 return chtls_set_tcb_field(sk
, 1, 1ULL << bit_pos
,
100 static int chtls_set_tcb_keyid(struct sock
*sk
, int keyid
)
102 return chtls_set_tcb_field(sk
, 31, 0xFFFFFFFFULL
, keyid
);
105 static int chtls_set_tcb_seqno(struct sock
*sk
)
107 return chtls_set_tcb_field(sk
, 28, ~0ULL, 0);
110 static int chtls_set_tcb_quiesce(struct sock
*sk
, int val
)
112 return chtls_set_tcb_field(sk
, 1, (1ULL << TF_RX_QUIESCE_S
),
113 TF_RX_QUIESCE_V(val
));
116 /* TLS Key bitmap processing */
117 int chtls_init_kmap(struct chtls_dev
*cdev
, struct cxgb4_lld_info
*lldi
)
119 unsigned int num_key_ctx
, bsize
;
122 num_key_ctx
= (lldi
->vr
->key
.size
/ TLS_KEY_CONTEXT_SZ
);
123 bsize
= BITS_TO_LONGS(num_key_ctx
);
125 cdev
->kmap
.size
= num_key_ctx
;
126 cdev
->kmap
.available
= bsize
;
127 ksize
= sizeof(*cdev
->kmap
.addr
) * bsize
;
128 cdev
->kmap
.addr
= kvzalloc(ksize
, GFP_KERNEL
);
129 if (!cdev
->kmap
.addr
)
132 cdev
->kmap
.start
= lldi
->vr
->key
.start
;
133 spin_lock_init(&cdev
->kmap
.lock
);
137 static int get_new_keyid(struct chtls_sock
*csk
, u32 optname
)
139 struct net_device
*dev
= csk
->egress_dev
;
140 struct chtls_dev
*cdev
= csk
->cdev
;
141 struct chtls_hws
*hws
;
142 struct adapter
*adap
;
145 adap
= netdev2adap(dev
);
148 spin_lock_bh(&cdev
->kmap
.lock
);
149 keyid
= find_first_zero_bit(cdev
->kmap
.addr
, cdev
->kmap
.size
);
150 if (keyid
< cdev
->kmap
.size
) {
151 __set_bit(keyid
, cdev
->kmap
.addr
);
152 if (optname
== TLS_RX
)
156 atomic_inc(&adap
->chcr_stats
.tls_key
);
160 spin_unlock_bh(&cdev
->kmap
.lock
);
164 void free_tls_keyid(struct sock
*sk
)
166 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
167 struct net_device
*dev
= csk
->egress_dev
;
168 struct chtls_dev
*cdev
= csk
->cdev
;
169 struct chtls_hws
*hws
;
170 struct adapter
*adap
;
172 if (!cdev
->kmap
.addr
)
175 adap
= netdev2adap(dev
);
178 spin_lock_bh(&cdev
->kmap
.lock
);
179 if (hws
->rxkey
>= 0) {
180 __clear_bit(hws
->rxkey
, cdev
->kmap
.addr
);
181 atomic_dec(&adap
->chcr_stats
.tls_key
);
184 if (hws
->txkey
>= 0) {
185 __clear_bit(hws
->txkey
, cdev
->kmap
.addr
);
186 atomic_dec(&adap
->chcr_stats
.tls_key
);
189 spin_unlock_bh(&cdev
->kmap
.lock
);
192 unsigned int keyid_to_addr(int start_addr
, int keyid
)
194 return (start_addr
+ (keyid
* TLS_KEY_CONTEXT_SZ
)) >> 5;
197 static void chtls_rxkey_ivauth(struct _key_ctx
*kctx
)
199 kctx
->iv_to_auth
= cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
200 KEYCTX_TX_WR_AAD_V(1ULL) |
201 KEYCTX_TX_WR_AADST_V(5ULL) |
202 KEYCTX_TX_WR_CIPHER_V(14ULL) |
203 KEYCTX_TX_WR_CIPHERST_V(0ULL) |
204 KEYCTX_TX_WR_AUTH_V(14ULL) |
205 KEYCTX_TX_WR_AUTHST_V(16ULL) |
206 KEYCTX_TX_WR_AUTHIN_V(16ULL));
209 static int chtls_key_info(struct chtls_sock
*csk
,
210 struct _key_ctx
*kctx
,
211 u32 keylen
, u32 optname
,
214 unsigned char key
[AES_MAX_KEY_SIZE
];
215 unsigned char *key_p
, *salt
;
216 unsigned char ghash_h
[AEAD_H_SIZE
];
217 int ck_size
, key_ctx_size
, kctx_mackey_size
, salt_size
;
218 struct crypto_aes_ctx aes
;
221 key_ctx_size
= sizeof(struct _key_ctx
) +
222 roundup(keylen
, 16) + AEAD_H_SIZE
;
224 /* GCM mode of AES supports 128 and 256 bit encryption, so
225 * prepare key context base on GCM cipher type
227 switch (cipher_type
) {
228 case TLS_CIPHER_AES_GCM_128
: {
229 struct tls12_crypto_info_aes_gcm_128
*gcm_ctx_128
=
230 (struct tls12_crypto_info_aes_gcm_128
*)
231 &csk
->tlshws
.crypto_info
;
232 memcpy(key
, gcm_ctx_128
->key
, keylen
);
234 key_p
= gcm_ctx_128
->key
;
235 salt
= gcm_ctx_128
->salt
;
236 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
237 salt_size
= TLS_CIPHER_AES_GCM_128_SALT_SIZE
;
238 kctx_mackey_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
241 case TLS_CIPHER_AES_GCM_256
: {
242 struct tls12_crypto_info_aes_gcm_256
*gcm_ctx_256
=
243 (struct tls12_crypto_info_aes_gcm_256
*)
244 &csk
->tlshws
.crypto_info
;
245 memcpy(key
, gcm_ctx_256
->key
, keylen
);
247 key_p
= gcm_ctx_256
->key
;
248 salt
= gcm_ctx_256
->salt
;
249 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
250 salt_size
= TLS_CIPHER_AES_GCM_256_SALT_SIZE
;
251 kctx_mackey_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
255 pr_err("GCM: Invalid key length %d\n", keylen
);
259 /* Calculate the H = CIPH(K, 0 repeated 16 times).
260 * It will go in key context
262 ret
= aes_expandkey(&aes
, key
, keylen
);
266 memset(ghash_h
, 0, AEAD_H_SIZE
);
267 aes_encrypt(&aes
, ghash_h
, ghash_h
);
268 memzero_explicit(&aes
, sizeof(aes
));
269 csk
->tlshws
.keylen
= key_ctx_size
;
271 /* Copy the Key context */
272 if (optname
== TLS_RX
) {
275 key_ctx
= ((key_ctx_size
>> 4) << 3);
276 kctx
->ctx_hdr
= FILL_KEY_CRX_HDR(ck_size
,
279 chtls_rxkey_ivauth(kctx
);
281 kctx
->ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
283 0, 0, key_ctx_size
>> 4);
286 memcpy(kctx
->salt
, salt
, salt_size
);
287 memcpy(kctx
->key
, key_p
, keylen
);
288 memcpy(kctx
->key
+ keylen
, ghash_h
, AEAD_H_SIZE
);
289 /* erase key info from driver */
290 memset(key_p
, 0, keylen
);
295 static void chtls_set_scmd(struct chtls_sock
*csk
)
297 struct chtls_hws
*hws
= &csk
->tlshws
;
299 hws
->scmd
.seqno_numivs
=
300 SCMD_SEQ_NO_CTRL_V(3) |
301 SCMD_PROTO_VERSION_V(0) |
302 SCMD_ENC_DEC_CTRL_V(0) |
303 SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
304 SCMD_CIPH_MODE_V(2) |
305 SCMD_AUTH_MODE_V(4) |
306 SCMD_HMAC_CTRL_V(0) |
310 hws
->scmd
.ivgen_hdrlen
=
311 SCMD_IV_GEN_CTRL_V(1) |
312 SCMD_KEY_CTX_INLINE_V(0) |
313 SCMD_TLS_FRAG_ENABLE_V(1);
316 int chtls_setkey(struct chtls_sock
*csk
, u32 keylen
,
317 u32 optname
, int cipher_type
)
319 struct tls_key_req
*kwr
;
320 struct chtls_dev
*cdev
;
321 struct _key_ctx
*kctx
;
322 int wrlen
, klen
, len
;
332 klen
= roundup((keylen
+ AEAD_H_SIZE
) + sizeof(*kctx
), 32);
333 wrlen
= roundup(sizeof(*kwr
), 16);
336 /* Flush out-standing data before new key takes effect */
337 if (optname
== TLS_TX
) {
339 if (skb_queue_len(&csk
->txq
))
340 chtls_push_frames(csk
, 0);
344 skb
= alloc_skb(len
, GFP_KERNEL
);
348 keyid
= get_new_keyid(csk
, optname
);
354 kaddr
= keyid_to_addr(cdev
->kmap
.start
, keyid
);
355 kwr
= (struct tls_key_req
*)__skb_put_zero(skb
, len
);
356 kwr
->wr
.op_to_compl
=
357 cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR
) | FW_WR_COMPL_F
|
359 kwr
->wr
.flowid_len16
=
360 cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len
, 16) |
361 FW_WR_FLOWID_V(csk
->tid
)));
362 kwr
->wr
.protocol
= 0;
363 kwr
->wr
.mfs
= htons(TLS_MFS
);
364 kwr
->wr
.reneg_to_write_rx
= optname
;
367 kwr
->req
.cmd
= cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
368 T5_ULP_MEMIO_ORDER_V(1) |
369 T5_ULP_MEMIO_IMM_V(1));
370 kwr
->req
.len16
= cpu_to_be32((csk
->tid
<< 8) |
371 DIV_ROUND_UP(len
- sizeof(kwr
->wr
), 16));
372 kwr
->req
.dlen
= cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen
>> 5));
373 kwr
->req
.lock_addr
= cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr
));
376 kwr
->sc_imm
.cmd_more
= cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM
));
377 kwr
->sc_imm
.len
= cpu_to_be32(klen
);
381 kctx
= (struct _key_ctx
*)(kwr
+ 1);
382 ret
= chtls_key_info(csk
, kctx
, keylen
, optname
, cipher_type
);
386 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->tlshws
.txqid
);
387 csk
->wr_credits
-= DIV_ROUND_UP(len
, 16);
388 csk
->wr_unacked
+= DIV_ROUND_UP(len
, 16);
389 enqueue_wr(csk
, skb
);
390 cxgb4_ofld_send(csk
->egress_dev
, skb
);
393 /* Clear quiesce for Rx key */
394 if (optname
== TLS_RX
) {
395 ret
= chtls_set_tcb_keyid(sk
, keyid
);
398 ret
= chtls_set_tcb_field(sk
, 0,
399 TCB_ULP_RAW_V(TCB_ULP_RAW_M
),
400 TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
401 TF_TLS_CONTROL_V(1) |
403 TF_TLS_ENABLE_V(1))));
406 ret
= chtls_set_tcb_seqno(sk
);
409 ret
= chtls_set_tcb_quiesce(sk
, 0);
412 csk
->tlshws
.rxkey
= keyid
;
414 csk
->tlshws
.tx_seq_no
= 0;
415 csk
->tlshws
.txkey
= keyid
;