2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written and Maintained by:
11 * Manoj Malviya (manojmalviya@chelsio.com)
12 * Atul Gupta (atul.gupta@chelsio.com)
13 * Jitendra Lulla (jlulla@chelsio.com)
14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15 * Harsh Jain (harsh@chelsio.com)
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
29 static struct chcr_driver_data drv_data
;
31 typedef int (*chcr_handler_func
)(struct adapter
*adap
, unsigned char *input
);
32 static int cpl_fw6_pld_handler(struct adapter
*adap
, unsigned char *input
);
33 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
);
34 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
);
36 static chcr_handler_func work_handlers
[NUM_CPL_CMDS
] = {
37 [CPL_FW6_PLD
] = cpl_fw6_pld_handler
,
40 static struct cxgb4_uld_info chcr_uld_info
= {
41 .name
= DRV_MODULE_NAME
,
42 .nrxq
= MAX_ULD_QSETS
,
43 /* Max ntxq will be derived from fw config file*/
46 .state_change
= chcr_uld_state_change
,
47 .rx_handler
= chcr_uld_rx_handler
,
50 static void detach_work_fn(struct work_struct
*work
)
54 dev
= container_of(work
, struct chcr_dev
, detach_work
.work
);
56 if (atomic_read(&dev
->inflight
)) {
59 pr_debug("Request Inflight Count %d\n",
60 atomic_read(&dev
->inflight
));
62 schedule_delayed_work(&dev
->detach_work
, WQ_DETACH_TM
);
64 WARN(1, "CHCR:%d request Still Pending\n",
65 atomic_read(&dev
->inflight
));
66 complete(&dev
->detach_comp
);
69 complete(&dev
->detach_comp
);
73 struct uld_ctx
*assign_chcr_device(void)
75 struct uld_ctx
*u_ctx
= NULL
;
78 * When multiple devices are present in system select
79 * device in round-robin fashion for crypto operations
80 * Although One session must use the same device to
81 * maintain request-response ordering.
83 mutex_lock(&drv_data
.drv_mutex
);
84 if (!list_empty(&drv_data
.act_dev
)) {
85 u_ctx
= drv_data
.last_dev
;
86 if (list_is_last(&drv_data
.last_dev
->entry
, &drv_data
.act_dev
))
87 drv_data
.last_dev
= list_first_entry(&drv_data
.act_dev
,
88 struct uld_ctx
, entry
);
91 list_next_entry(drv_data
.last_dev
, entry
);
93 mutex_unlock(&drv_data
.drv_mutex
);
97 static void chcr_dev_add(struct uld_ctx
*u_ctx
)
102 dev
->state
= CHCR_ATTACH
;
103 atomic_set(&dev
->inflight
, 0);
104 mutex_lock(&drv_data
.drv_mutex
);
105 list_move(&u_ctx
->entry
, &drv_data
.act_dev
);
106 if (!drv_data
.last_dev
)
107 drv_data
.last_dev
= u_ctx
;
108 mutex_unlock(&drv_data
.drv_mutex
);
111 static void chcr_dev_init(struct uld_ctx
*u_ctx
)
113 struct chcr_dev
*dev
;
116 spin_lock_init(&dev
->lock_chcr_dev
);
117 INIT_DELAYED_WORK(&dev
->detach_work
, detach_work_fn
);
118 init_completion(&dev
->detach_comp
);
119 dev
->state
= CHCR_INIT
;
120 dev
->wqretry
= WQ_RETRY
;
121 atomic_inc(&drv_data
.dev_count
);
122 atomic_set(&dev
->inflight
, 0);
123 mutex_lock(&drv_data
.drv_mutex
);
124 list_add_tail(&u_ctx
->entry
, &drv_data
.inact_dev
);
125 mutex_unlock(&drv_data
.drv_mutex
);
128 static int chcr_dev_move(struct uld_ctx
*u_ctx
)
130 mutex_lock(&drv_data
.drv_mutex
);
131 if (drv_data
.last_dev
== u_ctx
) {
132 if (list_is_last(&drv_data
.last_dev
->entry
, &drv_data
.act_dev
))
133 drv_data
.last_dev
= list_first_entry(&drv_data
.act_dev
,
134 struct uld_ctx
, entry
);
137 list_next_entry(drv_data
.last_dev
, entry
);
139 list_move(&u_ctx
->entry
, &drv_data
.inact_dev
);
140 if (list_empty(&drv_data
.act_dev
))
141 drv_data
.last_dev
= NULL
;
142 atomic_dec(&drv_data
.dev_count
);
143 mutex_unlock(&drv_data
.drv_mutex
);
148 static int cpl_fw6_pld_handler(struct adapter
*adap
,
149 unsigned char *input
)
151 struct crypto_async_request
*req
;
152 struct cpl_fw6_pld
*fw6_pld
;
153 u32 ack_err_status
= 0;
154 int error_status
= 0;
156 fw6_pld
= (struct cpl_fw6_pld
*)input
;
157 req
= (struct crypto_async_request
*)(uintptr_t)be64_to_cpu(
161 ntohl(*(__be32
*)((unsigned char *)&fw6_pld
->data
[0] + 4));
162 if (CHK_MAC_ERR_BIT(ack_err_status
) || CHK_PAD_ERR_BIT(ack_err_status
))
163 error_status
= -EBADMSG
;
164 /* call completion callback with failure status */
166 error_status
= chcr_handle_resp(req
, input
, error_status
);
168 pr_err("Incorrect request address from the firmware\n");
172 atomic_inc(&adap
->chcr_stats
.error
);
177 int chcr_send_wr(struct sk_buff
*skb
)
179 return cxgb4_crypto_send(skb
->dev
, skb
);
182 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
)
184 struct uld_ctx
*u_ctx
;
186 /* Create the device and add it in the device list */
187 pr_info_once("%s - version %s\n", DRV_DESC
, DRV_VERSION
);
188 if (!(lld
->ulp_crypto
& ULP_CRYPTO_LOOKASIDE
))
189 return ERR_PTR(-EOPNOTSUPP
);
191 /* Create the device and add it in the device list */
192 u_ctx
= kzalloc(sizeof(*u_ctx
), GFP_KERNEL
);
194 u_ctx
= ERR_PTR(-ENOMEM
);
198 chcr_dev_init(u_ctx
);
203 int chcr_uld_rx_handler(void *handle
, const __be64
*rsp
,
204 const struct pkt_gl
*pgl
)
206 struct uld_ctx
*u_ctx
= (struct uld_ctx
*)handle
;
207 struct chcr_dev
*dev
= &u_ctx
->dev
;
208 struct adapter
*adap
= padap(dev
);
209 const struct cpl_fw6_pld
*rpl
= (struct cpl_fw6_pld
*)rsp
;
211 if (!work_handlers
[rpl
->opcode
]) {
212 pr_err("Unsupported opcode %d received\n", rpl
->opcode
);
217 work_handlers
[rpl
->opcode
](adap
, (unsigned char *)&rsp
[1]);
219 work_handlers
[rpl
->opcode
](adap
, pgl
->va
);
223 static void chcr_detach_device(struct uld_ctx
*u_ctx
)
225 struct chcr_dev
*dev
= &u_ctx
->dev
;
227 if (dev
->state
== CHCR_DETACH
) {
228 pr_debug("Detached Event received for already detach device\n");
231 dev
->state
= CHCR_DETACH
;
232 if (atomic_read(&dev
->inflight
) != 0) {
233 schedule_delayed_work(&dev
->detach_work
, WQ_DETACH_TM
);
234 wait_for_completion(&dev
->detach_comp
);
237 // Move u_ctx to inactive_dev list
238 chcr_dev_move(u_ctx
);
241 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
)
243 struct uld_ctx
*u_ctx
= handle
;
248 if (u_ctx
->dev
.state
!= CHCR_INIT
) {
249 // ALready Initialised.
253 ret
= start_crypto();
256 case CXGB4_STATE_DETACH
:
257 chcr_detach_device(u_ctx
);
258 if (!atomic_read(&drv_data
.dev_count
))
262 case CXGB4_STATE_START_RECOVERY
:
263 case CXGB4_STATE_DOWN
:
270 static int __init
chcr_crypto_init(void)
272 INIT_LIST_HEAD(&drv_data
.act_dev
);
273 INIT_LIST_HEAD(&drv_data
.inact_dev
);
274 atomic_set(&drv_data
.dev_count
, 0);
275 mutex_init(&drv_data
.drv_mutex
);
276 drv_data
.last_dev
= NULL
;
277 cxgb4_register_uld(CXGB4_ULD_CRYPTO
, &chcr_uld_info
);
282 static void __exit
chcr_crypto_exit(void)
284 struct uld_ctx
*u_ctx
, *tmp
;
285 struct adapter
*adap
;
288 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO
);
289 /* Remove all devices from list */
290 mutex_lock(&drv_data
.drv_mutex
);
291 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.act_dev
, entry
) {
292 adap
= padap(&u_ctx
->dev
);
293 memset(&adap
->chcr_stats
, 0, sizeof(adap
->chcr_stats
));
294 list_del(&u_ctx
->entry
);
297 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.inact_dev
, entry
) {
298 adap
= padap(&u_ctx
->dev
);
299 memset(&adap
->chcr_stats
, 0, sizeof(adap
->chcr_stats
));
300 list_del(&u_ctx
->entry
);
303 mutex_unlock(&drv_data
.drv_mutex
);
306 module_init(chcr_crypto_init
);
307 module_exit(chcr_crypto_exit
);
309 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
310 MODULE_LICENSE("GPL");
311 MODULE_AUTHOR("Chelsio Communications");
312 MODULE_VERSION(DRV_VERSION
);