1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/hash.h>
13 #include <linux/net.h>
15 #include <linux/tcp.h>
22 #define DRV_NAME "chtls"
25 * chtls device management
26 * maintains a list of the chtls devices
28 static LIST_HEAD(cdev_list
);
29 static DEFINE_MUTEX(cdev_mutex
);
31 static DEFINE_MUTEX(notify_mutex
);
32 static RAW_NOTIFIER_HEAD(listen_notify_list
);
33 static struct proto chtls_cpl_prot
;
34 struct request_sock_ops chtls_rsk_ops
;
35 static uint send_page_order
= (14 - PAGE_SHIFT
< 0) ? 0 : 14 - PAGE_SHIFT
;
37 static void register_listen_notifier(struct notifier_block
*nb
)
39 mutex_lock(¬ify_mutex
);
40 raw_notifier_chain_register(&listen_notify_list
, nb
);
41 mutex_unlock(¬ify_mutex
);
44 static void unregister_listen_notifier(struct notifier_block
*nb
)
46 mutex_lock(¬ify_mutex
);
47 raw_notifier_chain_unregister(&listen_notify_list
, nb
);
48 mutex_unlock(¬ify_mutex
);
51 static int listen_notify_handler(struct notifier_block
*this,
52 unsigned long event
, void *data
)
54 struct chtls_listen
*clisten
;
55 int ret
= NOTIFY_DONE
;
57 clisten
= (struct chtls_listen
*)data
;
60 case CHTLS_LISTEN_START
:
61 ret
= chtls_listen_start(clisten
->cdev
, clisten
->sk
);
64 case CHTLS_LISTEN_STOP
:
65 chtls_listen_stop(clisten
->cdev
, clisten
->sk
);
72 static struct notifier_block listen_notifier
= {
73 .notifier_call
= listen_notify_handler
76 static int listen_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
)
78 if (likely(skb_transport_header(skb
) != skb_network_header(skb
)))
79 return tcp_v4_do_rcv(sk
, skb
);
80 BLOG_SKB_CB(skb
)->backlog_rcv(sk
, skb
);
84 static int chtls_start_listen(struct chtls_dev
*cdev
, struct sock
*sk
)
86 struct chtls_listen
*clisten
;
88 if (sk
->sk_protocol
!= IPPROTO_TCP
)
89 return -EPROTONOSUPPORT
;
91 if (sk
->sk_family
== PF_INET
&&
92 LOOPBACK(inet_sk(sk
)->inet_rcv_saddr
))
93 return -EADDRNOTAVAIL
;
95 sk
->sk_backlog_rcv
= listen_backlog_rcv
;
96 clisten
= kmalloc(sizeof(*clisten
), GFP_KERNEL
);
101 mutex_lock(¬ify_mutex
);
102 raw_notifier_call_chain(&listen_notify_list
,
103 CHTLS_LISTEN_START
, clisten
);
104 mutex_unlock(¬ify_mutex
);
108 static void chtls_stop_listen(struct chtls_dev
*cdev
, struct sock
*sk
)
110 struct chtls_listen
*clisten
;
112 if (sk
->sk_protocol
!= IPPROTO_TCP
)
115 clisten
= kmalloc(sizeof(*clisten
), GFP_KERNEL
);
118 clisten
->cdev
= cdev
;
120 mutex_lock(¬ify_mutex
);
121 raw_notifier_call_chain(&listen_notify_list
,
122 CHTLS_LISTEN_STOP
, clisten
);
123 mutex_unlock(¬ify_mutex
);
126 static int chtls_inline_feature(struct tls_toe_device
*dev
)
128 struct net_device
*netdev
;
129 struct chtls_dev
*cdev
;
132 cdev
= to_chtls_dev(dev
);
134 for (i
= 0; i
< cdev
->lldi
->nports
; i
++) {
135 netdev
= cdev
->ports
[i
];
136 if (netdev
->features
& NETIF_F_HW_TLS_RECORD
)
142 static int chtls_create_hash(struct tls_toe_device
*dev
, struct sock
*sk
)
144 struct chtls_dev
*cdev
= to_chtls_dev(dev
);
146 if (sk
->sk_state
== TCP_LISTEN
)
147 return chtls_start_listen(cdev
, sk
);
151 static void chtls_destroy_hash(struct tls_toe_device
*dev
, struct sock
*sk
)
153 struct chtls_dev
*cdev
= to_chtls_dev(dev
);
155 if (sk
->sk_state
== TCP_LISTEN
)
156 chtls_stop_listen(cdev
, sk
);
159 static void chtls_free_uld(struct chtls_dev
*cdev
)
163 tls_toe_unregister_device(&cdev
->tlsdev
);
164 kvfree(cdev
->kmap
.addr
);
165 idr_destroy(&cdev
->hwtid_idr
);
166 for (i
= 0; i
< (1 << RSPQ_HASH_BITS
); i
++)
167 kfree_skb(cdev
->rspq_skb_cache
[i
]);
169 kfree_skb(cdev
->askb
);
173 static inline void chtls_dev_release(struct kref
*kref
)
175 struct tls_toe_device
*dev
;
176 struct chtls_dev
*cdev
;
178 dev
= container_of(kref
, struct tls_toe_device
, kref
);
179 cdev
= to_chtls_dev(dev
);
180 chtls_free_uld(cdev
);
183 static void chtls_register_dev(struct chtls_dev
*cdev
)
185 struct tls_toe_device
*tlsdev
= &cdev
->tlsdev
;
187 strlcpy(tlsdev
->name
, "chtls", TLS_TOE_DEVICE_NAME_MAX
);
188 strlcat(tlsdev
->name
, cdev
->lldi
->ports
[0]->name
,
189 TLS_TOE_DEVICE_NAME_MAX
);
190 tlsdev
->feature
= chtls_inline_feature
;
191 tlsdev
->hash
= chtls_create_hash
;
192 tlsdev
->unhash
= chtls_destroy_hash
;
193 tlsdev
->release
= chtls_dev_release
;
194 kref_init(&tlsdev
->kref
);
195 tls_toe_register_device(tlsdev
);
196 cdev
->cdev_state
= CHTLS_CDEV_STATE_UP
;
199 static void process_deferq(struct work_struct
*task_param
)
201 struct chtls_dev
*cdev
= container_of(task_param
,
202 struct chtls_dev
, deferq_task
);
205 spin_lock_bh(&cdev
->deferq
.lock
);
206 while ((skb
= __skb_dequeue(&cdev
->deferq
)) != NULL
) {
207 spin_unlock_bh(&cdev
->deferq
.lock
);
208 DEFERRED_SKB_CB(skb
)->handler(cdev
, skb
);
209 spin_lock_bh(&cdev
->deferq
.lock
);
211 spin_unlock_bh(&cdev
->deferq
.lock
);
214 static int chtls_get_skb(struct chtls_dev
*cdev
)
216 cdev
->askb
= alloc_skb(sizeof(struct tcphdr
), GFP_KERNEL
);
220 skb_put(cdev
->askb
, sizeof(struct tcphdr
));
221 skb_reset_transport_header(cdev
->askb
);
222 memset(cdev
->askb
->data
, 0, cdev
->askb
->len
);
226 static void *chtls_uld_add(const struct cxgb4_lld_info
*info
)
228 struct cxgb4_lld_info
*lldi
;
229 struct chtls_dev
*cdev
;
232 cdev
= kzalloc(sizeof(*cdev
) + info
->nports
*
233 (sizeof(struct net_device
*)), GFP_KERNEL
);
237 lldi
= kzalloc(sizeof(*lldi
), GFP_KERNEL
);
241 if (chtls_get_skb(cdev
))
246 cdev
->pdev
= lldi
->pdev
;
247 cdev
->tids
= lldi
->tids
;
248 cdev
->ports
= lldi
->ports
;
249 cdev
->mtus
= lldi
->mtus
;
250 cdev
->tids
= lldi
->tids
;
251 cdev
->pfvf
= FW_VIID_PFN_G(cxgb4_port_viid(lldi
->ports
[0]))
254 for (i
= 0; i
< (1 << RSPQ_HASH_BITS
); i
++) {
255 unsigned int size
= 64 - sizeof(struct rsp_ctrl
) - 8;
257 cdev
->rspq_skb_cache
[i
] = __alloc_skb(size
,
260 if (unlikely(!cdev
->rspq_skb_cache
[i
]))
264 idr_init(&cdev
->hwtid_idr
);
265 INIT_WORK(&cdev
->deferq_task
, process_deferq
);
266 spin_lock_init(&cdev
->listen_lock
);
267 spin_lock_init(&cdev
->idr_lock
);
268 cdev
->send_page_order
= min_t(uint
, get_order(32768),
270 cdev
->max_host_sndbuf
= 48 * 1024;
272 if (lldi
->vr
->key
.size
)
273 if (chtls_init_kmap(cdev
, lldi
))
276 mutex_lock(&cdev_mutex
);
277 list_add_tail(&cdev
->list
, &cdev_list
);
278 mutex_unlock(&cdev_mutex
);
282 for (j
= 0; j
< i
; j
++)
283 kfree_skb(cdev
->rspq_skb_cache
[j
]);
284 kfree_skb(cdev
->askb
);
293 static void chtls_free_all_uld(void)
295 struct chtls_dev
*cdev
, *tmp
;
297 mutex_lock(&cdev_mutex
);
298 list_for_each_entry_safe(cdev
, tmp
, &cdev_list
, list
) {
299 if (cdev
->cdev_state
== CHTLS_CDEV_STATE_UP
) {
300 list_del(&cdev
->list
);
301 kref_put(&cdev
->tlsdev
.kref
, cdev
->tlsdev
.release
);
304 mutex_unlock(&cdev_mutex
);
307 static int chtls_uld_state_change(void *handle
, enum cxgb4_state new_state
)
309 struct chtls_dev
*cdev
= handle
;
313 chtls_register_dev(cdev
);
315 case CXGB4_STATE_DOWN
:
317 case CXGB4_STATE_START_RECOVERY
:
319 case CXGB4_STATE_DETACH
:
320 mutex_lock(&cdev_mutex
);
321 list_del(&cdev
->list
);
322 mutex_unlock(&cdev_mutex
);
323 kref_put(&cdev
->tlsdev
.kref
, cdev
->tlsdev
.release
);
331 static struct sk_buff
*copy_gl_to_skb_pkt(const struct pkt_gl
*gl
,
337 /* Allocate space for cpl_pass_accpet_req which will be synthesized by
338 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
339 * through the regular cpl_pass_accept_req processing in TOM.
341 skb
= alloc_skb(gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
)
342 - pktshift
, GFP_ATOMIC
);
345 __skb_put(skb
, gl
->tot_len
+ sizeof(struct cpl_pass_accept_req
)
347 /* For now we will copy cpl_rx_pkt in the skb */
348 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct cpl_rx_pkt
));
349 skb_copy_to_linear_data_offset(skb
, sizeof(struct cpl_pass_accept_req
)
351 gl
->tot_len
- pktshift
);
356 static int chtls_recv_packet(struct chtls_dev
*cdev
,
357 const struct pkt_gl
*gl
, const __be64
*rsp
)
359 unsigned int opcode
= *(u8
*)rsp
;
363 skb
= copy_gl_to_skb_pkt(gl
, rsp
, cdev
->lldi
->sge_pktshift
);
367 ret
= chtls_handlers
[opcode
](cdev
, skb
);
368 if (ret
& CPL_RET_BUF_DONE
)
374 static int chtls_recv_rsp(struct chtls_dev
*cdev
, const __be64
*rsp
)
376 unsigned long rspq_bin
;
382 len
= 64 - sizeof(struct rsp_ctrl
) - 8;
385 rspq_bin
= hash_ptr((void *)rsp
, RSPQ_HASH_BITS
);
386 skb
= cdev
->rspq_skb_cache
[rspq_bin
];
387 if (skb
&& !skb_is_nonlinear(skb
) &&
388 !skb_shared(skb
) && !skb_cloned(skb
)) {
389 refcount_inc(&skb
->users
);
390 if (refcount_read(&skb
->users
) == 2) {
392 if (skb_tailroom(skb
) >= len
)
395 refcount_dec(&skb
->users
);
397 skb
= alloc_skb(len
, GFP_ATOMIC
);
403 skb_copy_to_linear_data(skb
, rsp
, len
);
404 skb_reset_network_header(skb
);
405 skb_reset_transport_header(skb
);
406 ret
= chtls_handlers
[opcode
](cdev
, skb
);
408 if (ret
& CPL_RET_BUF_DONE
)
413 static void chtls_recv(struct chtls_dev
*cdev
,
414 struct sk_buff
**skbs
, const __be64
*rsp
)
416 struct sk_buff
*skb
= *skbs
;
422 __skb_push(skb
, sizeof(struct rss_header
));
423 skb_copy_to_linear_data(skb
, rsp
, sizeof(struct rss_header
));
425 ret
= chtls_handlers
[opcode
](cdev
, skb
);
426 if (ret
& CPL_RET_BUF_DONE
)
430 static int chtls_uld_rx_handler(void *handle
, const __be64
*rsp
,
431 const struct pkt_gl
*gl
)
433 struct chtls_dev
*cdev
= handle
;
439 if (unlikely(opcode
== CPL_RX_PKT
)) {
440 if (chtls_recv_packet(cdev
, gl
, rsp
) < 0)
446 return chtls_recv_rsp(cdev
, rsp
);
448 #define RX_PULL_LEN 128
449 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
452 chtls_recv(cdev
, &skb
, rsp
);
459 static int do_chtls_getsockopt(struct sock
*sk
, char __user
*optval
,
462 struct tls_crypto_info crypto_info
= { 0 };
464 crypto_info
.version
= TLS_1_2_VERSION
;
465 if (copy_to_user(optval
, &crypto_info
, sizeof(struct tls_crypto_info
)))
470 static int chtls_getsockopt(struct sock
*sk
, int level
, int optname
,
471 char __user
*optval
, int __user
*optlen
)
473 struct tls_context
*ctx
= tls_get_ctx(sk
);
475 if (level
!= SOL_TLS
)
476 return ctx
->sk_proto
->getsockopt(sk
, level
,
477 optname
, optval
, optlen
);
479 return do_chtls_getsockopt(sk
, optval
, optlen
);
482 static int do_chtls_setsockopt(struct sock
*sk
, int optname
,
483 char __user
*optval
, unsigned int optlen
)
485 struct tls_crypto_info
*crypto_info
, tmp_crypto_info
;
486 struct chtls_sock
*csk
;
491 csk
= rcu_dereference_sk_user_data(sk
);
493 if (!optval
|| optlen
< sizeof(*crypto_info
)) {
498 rc
= copy_from_user(&tmp_crypto_info
, optval
, sizeof(*crypto_info
));
505 if (tmp_crypto_info
.version
!= TLS_1_2_VERSION
) {
510 crypto_info
= (struct tls_crypto_info
*)&csk
->tlshws
.crypto_info
;
512 /* GCM mode of AES supports 128 and 256 bit encryption, so
513 * copy keys from user based on GCM cipher type.
515 switch (tmp_crypto_info
.cipher_type
) {
516 case TLS_CIPHER_AES_GCM_128
: {
517 /* Obtain version and type from previous copy */
518 crypto_info
[0] = tmp_crypto_info
;
519 /* Now copy the following data */
520 rc
= copy_from_user((char *)crypto_info
+ sizeof(*crypto_info
),
521 optval
+ sizeof(*crypto_info
),
522 sizeof(struct tls12_crypto_info_aes_gcm_128
)
523 - sizeof(*crypto_info
));
530 keylen
= TLS_CIPHER_AES_GCM_128_KEY_SIZE
;
531 cipher_type
= TLS_CIPHER_AES_GCM_128
;
534 case TLS_CIPHER_AES_GCM_256
: {
535 crypto_info
[0] = tmp_crypto_info
;
536 rc
= copy_from_user((char *)crypto_info
+ sizeof(*crypto_info
),
537 optval
+ sizeof(*crypto_info
),
538 sizeof(struct tls12_crypto_info_aes_gcm_256
)
539 - sizeof(*crypto_info
));
546 keylen
= TLS_CIPHER_AES_GCM_256_KEY_SIZE
;
547 cipher_type
= TLS_CIPHER_AES_GCM_256
;
554 rc
= chtls_setkey(csk
, keylen
, optname
, cipher_type
);
559 static int chtls_setsockopt(struct sock
*sk
, int level
, int optname
,
560 char __user
*optval
, unsigned int optlen
)
562 struct tls_context
*ctx
= tls_get_ctx(sk
);
564 if (level
!= SOL_TLS
)
565 return ctx
->sk_proto
->setsockopt(sk
, level
,
566 optname
, optval
, optlen
);
568 return do_chtls_setsockopt(sk
, optname
, optval
, optlen
);
571 static struct cxgb4_uld_info chtls_uld_info
= {
573 .nrxq
= MAX_ULD_QSETS
,
574 .ntxq
= MAX_ULD_QSETS
,
576 .add
= chtls_uld_add
,
577 .state_change
= chtls_uld_state_change
,
578 .rx_handler
= chtls_uld_rx_handler
,
581 void chtls_install_cpl_ops(struct sock
*sk
)
583 sk
->sk_prot
= &chtls_cpl_prot
;
586 static void __init
chtls_init_ulp_ops(void)
588 chtls_cpl_prot
= tcp_prot
;
589 chtls_init_rsk_ops(&chtls_cpl_prot
, &chtls_rsk_ops
,
591 chtls_cpl_prot
.close
= chtls_close
;
592 chtls_cpl_prot
.disconnect
= chtls_disconnect
;
593 chtls_cpl_prot
.destroy
= chtls_destroy_sock
;
594 chtls_cpl_prot
.shutdown
= chtls_shutdown
;
595 chtls_cpl_prot
.sendmsg
= chtls_sendmsg
;
596 chtls_cpl_prot
.sendpage
= chtls_sendpage
;
597 chtls_cpl_prot
.recvmsg
= chtls_recvmsg
;
598 chtls_cpl_prot
.setsockopt
= chtls_setsockopt
;
599 chtls_cpl_prot
.getsockopt
= chtls_getsockopt
;
602 static int __init
chtls_register(void)
604 chtls_init_ulp_ops();
605 register_listen_notifier(&listen_notifier
);
606 cxgb4_register_uld(CXGB4_ULD_TLS
, &chtls_uld_info
);
610 static void __exit
chtls_unregister(void)
612 unregister_listen_notifier(&listen_notifier
);
613 chtls_free_all_uld();
614 cxgb4_unregister_uld(CXGB4_ULD_TLS
);
617 module_init(chtls_register
);
618 module_exit(chtls_unregister
);
620 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
621 MODULE_LICENSE("GPL");
622 MODULE_AUTHOR("Chelsio Communications");
623 MODULE_VERSION(DRV_VERSION
);