2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written and Maintained by:
11 * Manoj Malviya (manojmalviya@chelsio.com)
12 * Atul Gupta (atul.gupta@chelsio.com)
13 * Jitendra Lulla (jlulla@chelsio.com)
14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15 * Harsh Jain (harsh@chelsio.com)
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
29 static LIST_HEAD(uld_ctx_list
);
30 static DEFINE_MUTEX(dev_mutex
);
31 static atomic_t dev_count
;
32 static struct uld_ctx
*ctx_rr
;
34 typedef int (*chcr_handler_func
)(struct chcr_dev
*dev
, unsigned char *input
);
35 static int cpl_fw6_pld_handler(struct chcr_dev
*dev
, unsigned char *input
);
36 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
);
37 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
);
39 static chcr_handler_func work_handlers
[NUM_CPL_CMDS
] = {
40 [CPL_FW6_PLD
] = cpl_fw6_pld_handler
,
43 static struct cxgb4_uld_info chcr_uld_info
= {
44 .name
= DRV_MODULE_NAME
,
45 .nrxq
= MAX_ULD_QSETS
,
46 .ntxq
= MAX_ULD_QSETS
,
49 .state_change
= chcr_uld_state_change
,
50 .rx_handler
= chcr_uld_rx_handler
,
53 struct uld_ctx
*assign_chcr_device(void)
55 struct uld_ctx
*u_ctx
= NULL
;
58 * When multiple devices are present in system select
59 * device in round-robin fashion for crypto operations
60 * Although One session must use the same device to
61 * maintain request-response ordering.
63 mutex_lock(&dev_mutex
);
64 if (!list_empty(&uld_ctx_list
)) {
66 if (list_is_last(&ctx_rr
->entry
, &uld_ctx_list
))
67 ctx_rr
= list_first_entry(&uld_ctx_list
,
71 ctx_rr
= list_next_entry(ctx_rr
, entry
);
73 mutex_unlock(&dev_mutex
);
77 static int chcr_dev_add(struct uld_ctx
*u_ctx
)
81 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
85 spin_lock_init(&dev
->lock_chcr_dev
);
88 atomic_inc(&dev_count
);
89 mutex_lock(&dev_mutex
);
90 list_add_tail(&u_ctx
->entry
, &uld_ctx_list
);
93 mutex_unlock(&dev_mutex
);
97 static int chcr_dev_remove(struct uld_ctx
*u_ctx
)
99 if (ctx_rr
== u_ctx
) {
100 if (list_is_last(&ctx_rr
->entry
, &uld_ctx_list
))
101 ctx_rr
= list_first_entry(&uld_ctx_list
,
105 ctx_rr
= list_next_entry(ctx_rr
, entry
);
107 list_del(&u_ctx
->entry
);
108 if (list_empty(&uld_ctx_list
))
112 atomic_dec(&dev_count
);
116 static int cpl_fw6_pld_handler(struct chcr_dev
*dev
,
117 unsigned char *input
)
119 struct crypto_async_request
*req
;
120 struct cpl_fw6_pld
*fw6_pld
;
121 u32 ack_err_status
= 0;
122 int error_status
= 0;
123 struct adapter
*adap
= padap(dev
);
125 fw6_pld
= (struct cpl_fw6_pld
*)input
;
126 req
= (struct crypto_async_request
*)(uintptr_t)be64_to_cpu(
130 ntohl(*(__be32
*)((unsigned char *)&fw6_pld
->data
[0] + 4));
131 if (ack_err_status
) {
132 if (CHK_MAC_ERR_BIT(ack_err_status
) ||
133 CHK_PAD_ERR_BIT(ack_err_status
))
134 error_status
= -EBADMSG
;
135 atomic_inc(&adap
->chcr_stats
.error
);
137 /* call completion callback with failure status */
139 error_status
= chcr_handle_resp(req
, input
, error_status
);
141 pr_err("Incorrect request address from the firmware\n");
147 int chcr_send_wr(struct sk_buff
*skb
)
149 return cxgb4_crypto_send(skb
->dev
, skb
);
152 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
)
154 struct uld_ctx
*u_ctx
;
156 /* Create the device and add it in the device list */
157 u_ctx
= kzalloc(sizeof(*u_ctx
), GFP_KERNEL
);
159 u_ctx
= ERR_PTR(-ENOMEM
);
162 if (!(lld
->ulp_crypto
& ULP_CRYPTO_LOOKASIDE
)) {
163 u_ctx
= ERR_PTR(-ENOMEM
);
171 int chcr_uld_rx_handler(void *handle
, const __be64
*rsp
,
172 const struct pkt_gl
*pgl
)
174 struct uld_ctx
*u_ctx
= (struct uld_ctx
*)handle
;
175 struct chcr_dev
*dev
= u_ctx
->dev
;
176 const struct cpl_fw6_pld
*rpl
= (struct cpl_fw6_pld
*)rsp
;
178 if (rpl
->opcode
!= CPL_FW6_PLD
) {
179 pr_err("Unsupported opcode\n");
184 work_handlers
[rpl
->opcode
](dev
, (unsigned char *)&rsp
[1]);
186 work_handlers
[rpl
->opcode
](dev
, pgl
->va
);
190 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
)
192 struct uld_ctx
*u_ctx
= handle
;
198 ret
= chcr_dev_add(u_ctx
);
202 if (atomic_read(&dev_count
) == 1)
203 ret
= start_crypto();
206 case CXGB4_STATE_DETACH
:
208 mutex_lock(&dev_mutex
);
209 chcr_dev_remove(u_ctx
);
210 mutex_unlock(&dev_mutex
);
212 if (!atomic_read(&dev_count
))
216 case CXGB4_STATE_START_RECOVERY
:
217 case CXGB4_STATE_DOWN
:
224 static int __init
chcr_crypto_init(void)
226 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO
, &chcr_uld_info
))
227 pr_err("ULD register fail: No chcr crypto support in cxgb4");
232 static void __exit
chcr_crypto_exit(void)
234 struct uld_ctx
*u_ctx
, *tmp
;
236 if (atomic_read(&dev_count
))
239 /* Remove all devices from list */
240 mutex_lock(&dev_mutex
);
241 list_for_each_entry_safe(u_ctx
, tmp
, &uld_ctx_list
, entry
) {
243 chcr_dev_remove(u_ctx
);
246 mutex_unlock(&dev_mutex
);
247 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO
);
250 module_init(chcr_crypto_init
);
251 module_exit(chcr_crypto_exit
);
253 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
254 MODULE_LICENSE("GPL");
255 MODULE_AUTHOR("Chelsio Communications");
256 MODULE_VERSION(DRV_VERSION
);