treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / chelsio / chtls / chtls_main.c
bloba038de90b2ea0f432da377d2e383a21ca9236edf
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/hash.h>
12 #include <linux/in.h>
13 #include <linux/net.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <net/tcp.h>
17 #include <net/tls.h>
19 #include "chtls.h"
20 #include "chtls_cm.h"
22 #define DRV_NAME "chtls"
25 * chtls device management
26 * maintains a list of the chtls devices
28 static LIST_HEAD(cdev_list);
29 static DEFINE_MUTEX(cdev_mutex);
31 static DEFINE_MUTEX(notify_mutex);
32 static RAW_NOTIFIER_HEAD(listen_notify_list);
33 static struct proto chtls_cpl_prot;
34 struct request_sock_ops chtls_rsk_ops;
35 static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
37 static void register_listen_notifier(struct notifier_block *nb)
39 mutex_lock(&notify_mutex);
40 raw_notifier_chain_register(&listen_notify_list, nb);
41 mutex_unlock(&notify_mutex);
44 static void unregister_listen_notifier(struct notifier_block *nb)
46 mutex_lock(&notify_mutex);
47 raw_notifier_chain_unregister(&listen_notify_list, nb);
48 mutex_unlock(&notify_mutex);
51 static int listen_notify_handler(struct notifier_block *this,
52 unsigned long event, void *data)
54 struct chtls_listen *clisten;
55 int ret = NOTIFY_DONE;
57 clisten = (struct chtls_listen *)data;
59 switch (event) {
60 case CHTLS_LISTEN_START:
61 ret = chtls_listen_start(clisten->cdev, clisten->sk);
62 kfree(clisten);
63 break;
64 case CHTLS_LISTEN_STOP:
65 chtls_listen_stop(clisten->cdev, clisten->sk);
66 kfree(clisten);
67 break;
69 return ret;
72 static struct notifier_block listen_notifier = {
73 .notifier_call = listen_notify_handler
76 static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
78 if (likely(skb_transport_header(skb) != skb_network_header(skb)))
79 return tcp_v4_do_rcv(sk, skb);
80 BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
81 return 0;
84 static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
86 struct chtls_listen *clisten;
88 if (sk->sk_protocol != IPPROTO_TCP)
89 return -EPROTONOSUPPORT;
91 if (sk->sk_family == PF_INET &&
92 LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
93 return -EADDRNOTAVAIL;
95 sk->sk_backlog_rcv = listen_backlog_rcv;
96 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
97 if (!clisten)
98 return -ENOMEM;
99 clisten->cdev = cdev;
100 clisten->sk = sk;
101 mutex_lock(&notify_mutex);
102 raw_notifier_call_chain(&listen_notify_list,
103 CHTLS_LISTEN_START, clisten);
104 mutex_unlock(&notify_mutex);
105 return 0;
108 static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
110 struct chtls_listen *clisten;
112 if (sk->sk_protocol != IPPROTO_TCP)
113 return;
115 clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
116 if (!clisten)
117 return;
118 clisten->cdev = cdev;
119 clisten->sk = sk;
120 mutex_lock(&notify_mutex);
121 raw_notifier_call_chain(&listen_notify_list,
122 CHTLS_LISTEN_STOP, clisten);
123 mutex_unlock(&notify_mutex);
126 static int chtls_inline_feature(struct tls_toe_device *dev)
128 struct net_device *netdev;
129 struct chtls_dev *cdev;
130 int i;
132 cdev = to_chtls_dev(dev);
134 for (i = 0; i < cdev->lldi->nports; i++) {
135 netdev = cdev->ports[i];
136 if (netdev->features & NETIF_F_HW_TLS_RECORD)
137 return 1;
139 return 0;
142 static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
144 struct chtls_dev *cdev = to_chtls_dev(dev);
146 if (sk->sk_state == TCP_LISTEN)
147 return chtls_start_listen(cdev, sk);
148 return 0;
151 static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
153 struct chtls_dev *cdev = to_chtls_dev(dev);
155 if (sk->sk_state == TCP_LISTEN)
156 chtls_stop_listen(cdev, sk);
159 static void chtls_free_uld(struct chtls_dev *cdev)
161 int i;
163 tls_toe_unregister_device(&cdev->tlsdev);
164 kvfree(cdev->kmap.addr);
165 idr_destroy(&cdev->hwtid_idr);
166 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
167 kfree_skb(cdev->rspq_skb_cache[i]);
168 kfree(cdev->lldi);
169 kfree_skb(cdev->askb);
170 kfree(cdev);
173 static inline void chtls_dev_release(struct kref *kref)
175 struct tls_toe_device *dev;
176 struct chtls_dev *cdev;
178 dev = container_of(kref, struct tls_toe_device, kref);
179 cdev = to_chtls_dev(dev);
180 chtls_free_uld(cdev);
183 static void chtls_register_dev(struct chtls_dev *cdev)
185 struct tls_toe_device *tlsdev = &cdev->tlsdev;
187 strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
188 strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
189 TLS_TOE_DEVICE_NAME_MAX);
190 tlsdev->feature = chtls_inline_feature;
191 tlsdev->hash = chtls_create_hash;
192 tlsdev->unhash = chtls_destroy_hash;
193 tlsdev->release = chtls_dev_release;
194 kref_init(&tlsdev->kref);
195 tls_toe_register_device(tlsdev);
196 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
199 static void process_deferq(struct work_struct *task_param)
201 struct chtls_dev *cdev = container_of(task_param,
202 struct chtls_dev, deferq_task);
203 struct sk_buff *skb;
205 spin_lock_bh(&cdev->deferq.lock);
206 while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
207 spin_unlock_bh(&cdev->deferq.lock);
208 DEFERRED_SKB_CB(skb)->handler(cdev, skb);
209 spin_lock_bh(&cdev->deferq.lock);
211 spin_unlock_bh(&cdev->deferq.lock);
214 static int chtls_get_skb(struct chtls_dev *cdev)
216 cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
217 if (!cdev->askb)
218 return -ENOMEM;
220 skb_put(cdev->askb, sizeof(struct tcphdr));
221 skb_reset_transport_header(cdev->askb);
222 memset(cdev->askb->data, 0, cdev->askb->len);
223 return 0;
226 static void *chtls_uld_add(const struct cxgb4_lld_info *info)
228 struct cxgb4_lld_info *lldi;
229 struct chtls_dev *cdev;
230 int i, j;
232 cdev = kzalloc(sizeof(*cdev) + info->nports *
233 (sizeof(struct net_device *)), GFP_KERNEL);
234 if (!cdev)
235 goto out;
237 lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
238 if (!lldi)
239 goto out_lldi;
241 if (chtls_get_skb(cdev))
242 goto out_skb;
244 *lldi = *info;
245 cdev->lldi = lldi;
246 cdev->pdev = lldi->pdev;
247 cdev->tids = lldi->tids;
248 cdev->ports = lldi->ports;
249 cdev->mtus = lldi->mtus;
250 cdev->tids = lldi->tids;
251 cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
252 << FW_VIID_PFN_S;
254 for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
255 unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
257 cdev->rspq_skb_cache[i] = __alloc_skb(size,
258 gfp_any(), 0,
259 lldi->nodeid);
260 if (unlikely(!cdev->rspq_skb_cache[i]))
261 goto out_rspq_skb;
264 idr_init(&cdev->hwtid_idr);
265 INIT_WORK(&cdev->deferq_task, process_deferq);
266 spin_lock_init(&cdev->listen_lock);
267 spin_lock_init(&cdev->idr_lock);
268 cdev->send_page_order = min_t(uint, get_order(32768),
269 send_page_order);
270 cdev->max_host_sndbuf = 48 * 1024;
272 if (lldi->vr->key.size)
273 if (chtls_init_kmap(cdev, lldi))
274 goto out_rspq_skb;
276 mutex_lock(&cdev_mutex);
277 list_add_tail(&cdev->list, &cdev_list);
278 mutex_unlock(&cdev_mutex);
280 return cdev;
281 out_rspq_skb:
282 for (j = 0; j < i; j++)
283 kfree_skb(cdev->rspq_skb_cache[j]);
284 kfree_skb(cdev->askb);
285 out_skb:
286 kfree(lldi);
287 out_lldi:
288 kfree(cdev);
289 out:
290 return NULL;
293 static void chtls_free_all_uld(void)
295 struct chtls_dev *cdev, *tmp;
297 mutex_lock(&cdev_mutex);
298 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
299 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
300 list_del(&cdev->list);
301 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
304 mutex_unlock(&cdev_mutex);
307 static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
309 struct chtls_dev *cdev = handle;
311 switch (new_state) {
312 case CXGB4_STATE_UP:
313 chtls_register_dev(cdev);
314 break;
315 case CXGB4_STATE_DOWN:
316 break;
317 case CXGB4_STATE_START_RECOVERY:
318 break;
319 case CXGB4_STATE_DETACH:
320 mutex_lock(&cdev_mutex);
321 list_del(&cdev->list);
322 mutex_unlock(&cdev_mutex);
323 kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
324 break;
325 default:
326 break;
328 return 0;
331 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
332 const __be64 *rsp,
333 u32 pktshift)
335 struct sk_buff *skb;
337 /* Allocate space for cpl_pass_accpet_req which will be synthesized by
338 * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
339 * through the regular cpl_pass_accept_req processing in TOM.
341 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
342 - pktshift, GFP_ATOMIC);
343 if (unlikely(!skb))
344 return NULL;
345 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
346 - pktshift);
347 /* For now we will copy cpl_rx_pkt in the skb */
348 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
349 skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
350 , gl->va + pktshift,
351 gl->tot_len - pktshift);
353 return skb;
356 static int chtls_recv_packet(struct chtls_dev *cdev,
357 const struct pkt_gl *gl, const __be64 *rsp)
359 unsigned int opcode = *(u8 *)rsp;
360 struct sk_buff *skb;
361 int ret;
363 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
364 if (!skb)
365 return -ENOMEM;
367 ret = chtls_handlers[opcode](cdev, skb);
368 if (ret & CPL_RET_BUF_DONE)
369 kfree_skb(skb);
371 return 0;
374 static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
376 unsigned long rspq_bin;
377 unsigned int opcode;
378 struct sk_buff *skb;
379 unsigned int len;
380 int ret;
382 len = 64 - sizeof(struct rsp_ctrl) - 8;
383 opcode = *(u8 *)rsp;
385 rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
386 skb = cdev->rspq_skb_cache[rspq_bin];
387 if (skb && !skb_is_nonlinear(skb) &&
388 !skb_shared(skb) && !skb_cloned(skb)) {
389 refcount_inc(&skb->users);
390 if (refcount_read(&skb->users) == 2) {
391 __skb_trim(skb, 0);
392 if (skb_tailroom(skb) >= len)
393 goto copy_out;
395 refcount_dec(&skb->users);
397 skb = alloc_skb(len, GFP_ATOMIC);
398 if (unlikely(!skb))
399 return -ENOMEM;
401 copy_out:
402 __skb_put(skb, len);
403 skb_copy_to_linear_data(skb, rsp, len);
404 skb_reset_network_header(skb);
405 skb_reset_transport_header(skb);
406 ret = chtls_handlers[opcode](cdev, skb);
408 if (ret & CPL_RET_BUF_DONE)
409 kfree_skb(skb);
410 return 0;
413 static void chtls_recv(struct chtls_dev *cdev,
414 struct sk_buff **skbs, const __be64 *rsp)
416 struct sk_buff *skb = *skbs;
417 unsigned int opcode;
418 int ret;
420 opcode = *(u8 *)rsp;
422 __skb_push(skb, sizeof(struct rss_header));
423 skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
425 ret = chtls_handlers[opcode](cdev, skb);
426 if (ret & CPL_RET_BUF_DONE)
427 kfree_skb(skb);
430 static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
431 const struct pkt_gl *gl)
433 struct chtls_dev *cdev = handle;
434 unsigned int opcode;
435 struct sk_buff *skb;
437 opcode = *(u8 *)rsp;
439 if (unlikely(opcode == CPL_RX_PKT)) {
440 if (chtls_recv_packet(cdev, gl, rsp) < 0)
441 goto nomem;
442 return 0;
445 if (!gl)
446 return chtls_recv_rsp(cdev, rsp);
448 #define RX_PULL_LEN 128
449 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
450 if (unlikely(!skb))
451 goto nomem;
452 chtls_recv(cdev, &skb, rsp);
453 return 0;
455 nomem:
456 return -ENOMEM;
459 static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
460 int __user *optlen)
462 struct tls_crypto_info crypto_info = { 0 };
464 crypto_info.version = TLS_1_2_VERSION;
465 if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
466 return -EFAULT;
467 return 0;
470 static int chtls_getsockopt(struct sock *sk, int level, int optname,
471 char __user *optval, int __user *optlen)
473 struct tls_context *ctx = tls_get_ctx(sk);
475 if (level != SOL_TLS)
476 return ctx->sk_proto->getsockopt(sk, level,
477 optname, optval, optlen);
479 return do_chtls_getsockopt(sk, optval, optlen);
482 static int do_chtls_setsockopt(struct sock *sk, int optname,
483 char __user *optval, unsigned int optlen)
485 struct tls_crypto_info *crypto_info, tmp_crypto_info;
486 struct chtls_sock *csk;
487 int keylen;
488 int cipher_type;
489 int rc = 0;
491 csk = rcu_dereference_sk_user_data(sk);
493 if (!optval || optlen < sizeof(*crypto_info)) {
494 rc = -EINVAL;
495 goto out;
498 rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
499 if (rc) {
500 rc = -EFAULT;
501 goto out;
504 /* check version */
505 if (tmp_crypto_info.version != TLS_1_2_VERSION) {
506 rc = -ENOTSUPP;
507 goto out;
510 crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
512 /* GCM mode of AES supports 128 and 256 bit encryption, so
513 * copy keys from user based on GCM cipher type.
515 switch (tmp_crypto_info.cipher_type) {
516 case TLS_CIPHER_AES_GCM_128: {
517 /* Obtain version and type from previous copy */
518 crypto_info[0] = tmp_crypto_info;
519 /* Now copy the following data */
520 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
521 optval + sizeof(*crypto_info),
522 sizeof(struct tls12_crypto_info_aes_gcm_128)
523 - sizeof(*crypto_info));
525 if (rc) {
526 rc = -EFAULT;
527 goto out;
530 keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
531 cipher_type = TLS_CIPHER_AES_GCM_128;
532 break;
534 case TLS_CIPHER_AES_GCM_256: {
535 crypto_info[0] = tmp_crypto_info;
536 rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
537 optval + sizeof(*crypto_info),
538 sizeof(struct tls12_crypto_info_aes_gcm_256)
539 - sizeof(*crypto_info));
541 if (rc) {
542 rc = -EFAULT;
543 goto out;
546 keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
547 cipher_type = TLS_CIPHER_AES_GCM_256;
548 break;
550 default:
551 rc = -EINVAL;
552 goto out;
554 rc = chtls_setkey(csk, keylen, optname, cipher_type);
555 out:
556 return rc;
559 static int chtls_setsockopt(struct sock *sk, int level, int optname,
560 char __user *optval, unsigned int optlen)
562 struct tls_context *ctx = tls_get_ctx(sk);
564 if (level != SOL_TLS)
565 return ctx->sk_proto->setsockopt(sk, level,
566 optname, optval, optlen);
568 return do_chtls_setsockopt(sk, optname, optval, optlen);
571 static struct cxgb4_uld_info chtls_uld_info = {
572 .name = DRV_NAME,
573 .nrxq = MAX_ULD_QSETS,
574 .ntxq = MAX_ULD_QSETS,
575 .rxq_size = 1024,
576 .add = chtls_uld_add,
577 .state_change = chtls_uld_state_change,
578 .rx_handler = chtls_uld_rx_handler,
581 void chtls_install_cpl_ops(struct sock *sk)
583 sk->sk_prot = &chtls_cpl_prot;
586 static void __init chtls_init_ulp_ops(void)
588 chtls_cpl_prot = tcp_prot;
589 chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
590 &tcp_prot, PF_INET);
591 chtls_cpl_prot.close = chtls_close;
592 chtls_cpl_prot.disconnect = chtls_disconnect;
593 chtls_cpl_prot.destroy = chtls_destroy_sock;
594 chtls_cpl_prot.shutdown = chtls_shutdown;
595 chtls_cpl_prot.sendmsg = chtls_sendmsg;
596 chtls_cpl_prot.sendpage = chtls_sendpage;
597 chtls_cpl_prot.recvmsg = chtls_recvmsg;
598 chtls_cpl_prot.setsockopt = chtls_setsockopt;
599 chtls_cpl_prot.getsockopt = chtls_getsockopt;
602 static int __init chtls_register(void)
604 chtls_init_ulp_ops();
605 register_listen_notifier(&listen_notifier);
606 cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
607 return 0;
610 static void __exit chtls_unregister(void)
612 unregister_listen_notifier(&listen_notifier);
613 chtls_free_all_uld();
614 cxgb4_unregister_uld(CXGB4_ULD_TLS);
617 module_init(chtls_register);
618 module_exit(chtls_unregister);
620 MODULE_DESCRIPTION("Chelsio TLS Inline driver");
621 MODULE_LICENSE("GPL");
622 MODULE_AUTHOR("Chelsio Communications");
623 MODULE_VERSION(DRV_VERSION);