2 * Copyright (c) 2018 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Written by: Atul Gupta (atul.gupta@chelsio.com)
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/workqueue.h>
14 #include <linux/skbuff.h>
15 #include <linux/timer.h>
16 #include <linux/notifier.h>
17 #include <linux/inetdevice.h>
19 #include <linux/tcp.h>
20 #include <linux/tls.h>
26 static void __set_tcb_field_direct(struct chtls_sock
*csk
,
27 struct cpl_set_tcb_field
*req
, u16 word
,
28 u64 mask
, u64 val
, u8 cookie
, int no_reply
)
30 struct ulptx_idata
*sc
;
32 INIT_TP_WR_CPL(req
, CPL_SET_TCB_FIELD
, csk
->tid
);
33 req
->wr
.wr_mid
|= htonl(FW_WR_FLOWID_V(csk
->tid
));
34 req
->reply_ctrl
= htons(NO_REPLY_V(no_reply
) |
35 QUEUENO_V(csk
->rss_qid
));
36 req
->word_cookie
= htons(TCB_WORD_V(word
) | TCB_COOKIE_V(cookie
));
37 req
->mask
= cpu_to_be64(mask
);
38 req
->val
= cpu_to_be64(val
);
39 sc
= (struct ulptx_idata
*)(req
+ 1);
40 sc
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP
));
44 static void __set_tcb_field(struct sock
*sk
, struct sk_buff
*skb
, u16 word
,
45 u64 mask
, u64 val
, u8 cookie
, int no_reply
)
47 struct cpl_set_tcb_field
*req
;
48 struct chtls_sock
*csk
;
49 struct ulptx_idata
*sc
;
52 wrlen
= roundup(sizeof(*req
) + sizeof(*sc
), 16);
53 csk
= rcu_dereference_sk_user_data(sk
);
55 req
= (struct cpl_set_tcb_field
*)__skb_put(skb
, wrlen
);
56 __set_tcb_field_direct(csk
, req
, word
, mask
, val
, cookie
, no_reply
);
57 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, csk
->port_id
);
61 * Send control message to HW, message go as immediate data and packet
62 * is freed immediately.
64 static int chtls_set_tcb_field(struct sock
*sk
, u16 word
, u64 mask
, u64 val
)
66 struct cpl_set_tcb_field
*req
;
67 unsigned int credits_needed
;
68 struct chtls_sock
*csk
;
69 struct ulptx_idata
*sc
;
74 wrlen
= roundup(sizeof(*req
) + sizeof(*sc
), 16);
76 skb
= alloc_skb(wrlen
, GFP_ATOMIC
);
80 credits_needed
= DIV_ROUND_UP(wrlen
, 16);
81 csk
= rcu_dereference_sk_user_data(sk
);
83 __set_tcb_field(sk
, skb
, word
, mask
, val
, 0, 1);
84 skb_set_queue_mapping(skb
, (csk
->txq_idx
<< 1) | CPL_PRIORITY_DATA
);
85 csk
->wr_credits
-= credits_needed
;
86 csk
->wr_unacked
+= credits_needed
;
88 ret
= cxgb4_ofld_send(csk
->egress_dev
, skb
);
91 return ret
< 0 ? ret
: 0;
95 * Set one of the t_flags bits in the TCB.
97 int chtls_set_tcb_tflag(struct sock
*sk
, unsigned int bit_pos
, int val
)
99 return chtls_set_tcb_field(sk
, 1, 1ULL << bit_pos
,
100 (u64
)val
<< bit_pos
);
103 static int chtls_set_tcb_keyid(struct sock
*sk
, int keyid
)
105 return chtls_set_tcb_field(sk
, 31, 0xFFFFFFFFULL
, keyid
);
108 static int chtls_set_tcb_seqno(struct sock
*sk
)
110 return chtls_set_tcb_field(sk
, 28, ~0ULL, 0);
113 static int chtls_set_tcb_quiesce(struct sock
*sk
, int val
)
115 return chtls_set_tcb_field(sk
, 1, (1ULL << TF_RX_QUIESCE_S
),
116 TF_RX_QUIESCE_V(val
));
119 /* TLS Key bitmap processing */
120 int chtls_init_kmap(struct chtls_dev
*cdev
, struct cxgb4_lld_info
*lldi
)
122 unsigned int num_key_ctx
, bsize
;
125 num_key_ctx
= (lldi
->vr
->key
.size
/ TLS_KEY_CONTEXT_SZ
);
126 bsize
= BITS_TO_LONGS(num_key_ctx
);
128 cdev
->kmap
.size
= num_key_ctx
;
129 cdev
->kmap
.available
= bsize
;
130 ksize
= sizeof(*cdev
->kmap
.addr
) * bsize
;
131 cdev
->kmap
.addr
= kvzalloc(ksize
, GFP_KERNEL
);
132 if (!cdev
->kmap
.addr
)
135 cdev
->kmap
.start
= lldi
->vr
->key
.start
;
136 spin_lock_init(&cdev
->kmap
.lock
);
140 static int get_new_keyid(struct chtls_sock
*csk
, u32 optname
)
142 struct net_device
*dev
= csk
->egress_dev
;
143 struct chtls_dev
*cdev
= csk
->cdev
;
144 struct chtls_hws
*hws
;
145 struct adapter
*adap
;
148 adap
= netdev2adap(dev
);
151 spin_lock_bh(&cdev
->kmap
.lock
);
152 keyid
= find_first_zero_bit(cdev
->kmap
.addr
, cdev
->kmap
.size
);
153 if (keyid
< cdev
->kmap
.size
) {
154 __set_bit(keyid
, cdev
->kmap
.addr
);
155 if (optname
== TLS_RX
)
159 atomic_inc(&adap
->chcr_stats
.tls_key
);
163 spin_unlock_bh(&cdev
->kmap
.lock
);
167 void free_tls_keyid(struct sock
*sk
)
169 struct chtls_sock
*csk
= rcu_dereference_sk_user_data(sk
);
170 struct net_device
*dev
= csk
->egress_dev
;
171 struct chtls_dev
*cdev
= csk
->cdev
;
172 struct chtls_hws
*hws
;
173 struct adapter
*adap
;
175 if (!cdev
->kmap
.addr
)
178 adap
= netdev2adap(dev
);
181 spin_lock_bh(&cdev
->kmap
.lock
);
182 if (hws
->rxkey
>= 0) {
183 __clear_bit(hws
->rxkey
, cdev
->kmap
.addr
);
184 atomic_dec(&adap
->chcr_stats
.tls_key
);
187 if (hws
->txkey
>= 0) {
188 __clear_bit(hws
->txkey
, cdev
->kmap
.addr
);
189 atomic_dec(&adap
->chcr_stats
.tls_key
);
192 spin_unlock_bh(&cdev
->kmap
.lock
);
195 unsigned int keyid_to_addr(int start_addr
, int keyid
)
197 return (start_addr
+ (keyid
* TLS_KEY_CONTEXT_SZ
)) >> 5;
200 static void chtls_rxkey_ivauth(struct _key_ctx
*kctx
)
202 kctx
->iv_to_auth
= cpu_to_be64(KEYCTX_TX_WR_IV_V(6ULL) |
203 KEYCTX_TX_WR_AAD_V(1ULL) |
204 KEYCTX_TX_WR_AADST_V(5ULL) |
205 KEYCTX_TX_WR_CIPHER_V(14ULL) |
206 KEYCTX_TX_WR_CIPHERST_V(0ULL) |
207 KEYCTX_TX_WR_AUTH_V(14ULL) |
208 KEYCTX_TX_WR_AUTHST_V(16ULL) |
209 KEYCTX_TX_WR_AUTHIN_V(16ULL));
212 static int chtls_key_info(struct chtls_sock
*csk
,
213 struct _key_ctx
*kctx
,
214 u32 keylen
, u32 optname
)
216 unsigned char key
[AES_KEYSIZE_128
];
217 struct tls12_crypto_info_aes_gcm_128
*gcm_ctx
;
218 unsigned char ghash_h
[AEAD_H_SIZE
];
219 struct crypto_cipher
*cipher
;
220 int ck_size
, key_ctx_size
;
223 gcm_ctx
= (struct tls12_crypto_info_aes_gcm_128
*)
224 &csk
->tlshws
.crypto_info
;
226 key_ctx_size
= sizeof(struct _key_ctx
) +
227 roundup(keylen
, 16) + AEAD_H_SIZE
;
229 if (keylen
== AES_KEYSIZE_128
) {
230 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
232 pr_err("GCM: Invalid key length %d\n", keylen
);
235 memcpy(key
, gcm_ctx
->key
, keylen
);
237 /* Calculate the H = CIPH(K, 0 repeated 16 times).
238 * It will go in key context
240 cipher
= crypto_alloc_cipher("aes", 0, 0);
241 if (IS_ERR(cipher
)) {
246 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
250 memset(ghash_h
, 0, AEAD_H_SIZE
);
251 crypto_cipher_encrypt_one(cipher
, ghash_h
, ghash_h
);
252 csk
->tlshws
.keylen
= key_ctx_size
;
254 /* Copy the Key context */
255 if (optname
== TLS_RX
) {
258 key_ctx
= ((key_ctx_size
>> 4) << 3);
259 kctx
->ctx_hdr
= FILL_KEY_CRX_HDR(ck_size
,
260 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
262 chtls_rxkey_ivauth(kctx
);
264 kctx
->ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
265 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
266 0, 0, key_ctx_size
>> 4);
269 memcpy(kctx
->salt
, gcm_ctx
->salt
, TLS_CIPHER_AES_GCM_128_SALT_SIZE
);
270 memcpy(kctx
->key
, gcm_ctx
->key
, keylen
);
271 memcpy(kctx
->key
+ keylen
, ghash_h
, AEAD_H_SIZE
);
272 /* erase key info from driver */
273 memset(gcm_ctx
->key
, 0, keylen
);
276 crypto_free_cipher(cipher
);
281 static void chtls_set_scmd(struct chtls_sock
*csk
)
283 struct chtls_hws
*hws
= &csk
->tlshws
;
285 hws
->scmd
.seqno_numivs
=
286 SCMD_SEQ_NO_CTRL_V(3) |
287 SCMD_PROTO_VERSION_V(0) |
288 SCMD_ENC_DEC_CTRL_V(0) |
289 SCMD_CIPH_AUTH_SEQ_CTRL_V(1) |
290 SCMD_CIPH_MODE_V(2) |
291 SCMD_AUTH_MODE_V(4) |
292 SCMD_HMAC_CTRL_V(0) |
296 hws
->scmd
.ivgen_hdrlen
=
297 SCMD_IV_GEN_CTRL_V(1) |
298 SCMD_KEY_CTX_INLINE_V(0) |
299 SCMD_TLS_FRAG_ENABLE_V(1);
302 int chtls_setkey(struct chtls_sock
*csk
, u32 keylen
, u32 optname
)
304 struct tls_key_req
*kwr
;
305 struct chtls_dev
*cdev
;
306 struct _key_ctx
*kctx
;
307 int wrlen
, klen
, len
;
317 klen
= roundup((keylen
+ AEAD_H_SIZE
) + sizeof(*kctx
), 32);
318 wrlen
= roundup(sizeof(*kwr
), 16);
321 /* Flush out-standing data before new key takes effect */
322 if (optname
== TLS_TX
) {
324 if (skb_queue_len(&csk
->txq
))
325 chtls_push_frames(csk
, 0);
329 skb
= alloc_skb(len
, GFP_KERNEL
);
333 keyid
= get_new_keyid(csk
, optname
);
339 kaddr
= keyid_to_addr(cdev
->kmap
.start
, keyid
);
340 kwr
= (struct tls_key_req
*)__skb_put_zero(skb
, len
);
341 kwr
->wr
.op_to_compl
=
342 cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR
) | FW_WR_COMPL_F
|
344 kwr
->wr
.flowid_len16
=
345 cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(len
, 16) |
346 FW_WR_FLOWID_V(csk
->tid
)));
347 kwr
->wr
.protocol
= 0;
348 kwr
->wr
.mfs
= htons(TLS_MFS
);
349 kwr
->wr
.reneg_to_write_rx
= optname
;
352 kwr
->req
.cmd
= cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
353 T5_ULP_MEMIO_ORDER_V(1) |
354 T5_ULP_MEMIO_IMM_V(1));
355 kwr
->req
.len16
= cpu_to_be32((csk
->tid
<< 8) |
356 DIV_ROUND_UP(len
- sizeof(kwr
->wr
), 16));
357 kwr
->req
.dlen
= cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen
>> 5));
358 kwr
->req
.lock_addr
= cpu_to_be32(ULP_MEMIO_ADDR_V(kaddr
));
361 kwr
->sc_imm
.cmd_more
= cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM
));
362 kwr
->sc_imm
.len
= cpu_to_be32(klen
);
365 kctx
= (struct _key_ctx
*)(kwr
+ 1);
366 ret
= chtls_key_info(csk
, kctx
, keylen
, optname
);
370 set_wr_txq(skb
, CPL_PRIORITY_DATA
, csk
->tlshws
.txqid
);
371 csk
->wr_credits
-= DIV_ROUND_UP(len
, 16);
372 csk
->wr_unacked
+= DIV_ROUND_UP(len
, 16);
373 enqueue_wr(csk
, skb
);
374 cxgb4_ofld_send(csk
->egress_dev
, skb
);
377 /* Clear quiesce for Rx key */
378 if (optname
== TLS_RX
) {
379 ret
= chtls_set_tcb_keyid(sk
, keyid
);
382 ret
= chtls_set_tcb_field(sk
, 0,
383 TCB_ULP_RAW_V(TCB_ULP_RAW_M
),
384 TCB_ULP_RAW_V((TF_TLS_KEY_SIZE_V(1) |
385 TF_TLS_CONTROL_V(1) |
387 TF_TLS_ENABLE_V(1))));
390 ret
= chtls_set_tcb_seqno(sk
);
393 ret
= chtls_set_tcb_quiesce(sk
, 0);
396 csk
->tlshws
.rxkey
= keyid
;
398 csk
->tlshws
.tx_seq_no
= 0;
399 csk
->tlshws
.txkey
= keyid
;