2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written and Maintained by:
11 * Manoj Malviya (manojmalviya@chelsio.com)
12 * Atul Gupta (atul.gupta@chelsio.com)
13 * Jitendra Lulla (jlulla@chelsio.com)
14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15 * Harsh Jain (harsh@chelsio.com)
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
29 static struct chcr_driver_data drv_data
;
31 typedef int (*chcr_handler_func
)(struct adapter
*adap
, unsigned char *input
);
32 static int cpl_fw6_pld_handler(struct adapter
*adap
, unsigned char *input
);
33 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
);
34 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
);
36 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
37 static void update_netdev_features(void);
38 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
40 static chcr_handler_func work_handlers
[NUM_CPL_CMDS
] = {
41 [CPL_FW6_PLD
] = cpl_fw6_pld_handler
,
42 #ifdef CONFIG_CHELSIO_TLS_DEVICE
43 [CPL_ACT_OPEN_RPL
] = chcr_ktls_cpl_act_open_rpl
,
44 [CPL_SET_TCB_RPL
] = chcr_ktls_cpl_set_tcb_rpl
,
48 static struct cxgb4_uld_info chcr_uld_info
= {
49 .name
= DRV_MODULE_NAME
,
50 .nrxq
= MAX_ULD_QSETS
,
51 /* Max ntxq will be derived from fw config file*/
54 .state_change
= chcr_uld_state_change
,
55 .rx_handler
= chcr_uld_rx_handler
,
56 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
57 .tx_handler
= chcr_uld_tx_handler
,
58 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
61 static void detach_work_fn(struct work_struct
*work
)
65 dev
= container_of(work
, struct chcr_dev
, detach_work
.work
);
67 if (atomic_read(&dev
->inflight
)) {
70 pr_debug("Request Inflight Count %d\n",
71 atomic_read(&dev
->inflight
));
73 schedule_delayed_work(&dev
->detach_work
, WQ_DETACH_TM
);
75 WARN(1, "CHCR:%d request Still Pending\n",
76 atomic_read(&dev
->inflight
));
77 complete(&dev
->detach_comp
);
80 complete(&dev
->detach_comp
);
84 struct uld_ctx
*assign_chcr_device(void)
86 struct uld_ctx
*u_ctx
= NULL
;
89 * When multiple devices are present in system select
90 * device in round-robin fashion for crypto operations
91 * Although One session must use the same device to
92 * maintain request-response ordering.
94 mutex_lock(&drv_data
.drv_mutex
);
95 if (!list_empty(&drv_data
.act_dev
)) {
96 u_ctx
= drv_data
.last_dev
;
97 if (list_is_last(&drv_data
.last_dev
->entry
, &drv_data
.act_dev
))
98 drv_data
.last_dev
= list_first_entry(&drv_data
.act_dev
,
99 struct uld_ctx
, entry
);
102 list_next_entry(drv_data
.last_dev
, entry
);
104 mutex_unlock(&drv_data
.drv_mutex
);
108 static void chcr_dev_add(struct uld_ctx
*u_ctx
)
110 struct chcr_dev
*dev
;
113 dev
->state
= CHCR_ATTACH
;
114 atomic_set(&dev
->inflight
, 0);
115 mutex_lock(&drv_data
.drv_mutex
);
116 list_move(&u_ctx
->entry
, &drv_data
.act_dev
);
117 if (!drv_data
.last_dev
)
118 drv_data
.last_dev
= u_ctx
;
119 mutex_unlock(&drv_data
.drv_mutex
);
122 static void chcr_dev_init(struct uld_ctx
*u_ctx
)
124 struct chcr_dev
*dev
;
127 spin_lock_init(&dev
->lock_chcr_dev
);
128 INIT_DELAYED_WORK(&dev
->detach_work
, detach_work_fn
);
129 init_completion(&dev
->detach_comp
);
130 dev
->state
= CHCR_INIT
;
131 dev
->wqretry
= WQ_RETRY
;
132 atomic_inc(&drv_data
.dev_count
);
133 atomic_set(&dev
->inflight
, 0);
134 mutex_lock(&drv_data
.drv_mutex
);
135 list_add_tail(&u_ctx
->entry
, &drv_data
.inact_dev
);
136 mutex_unlock(&drv_data
.drv_mutex
);
139 static int chcr_dev_move(struct uld_ctx
*u_ctx
)
141 mutex_lock(&drv_data
.drv_mutex
);
142 if (drv_data
.last_dev
== u_ctx
) {
143 if (list_is_last(&drv_data
.last_dev
->entry
, &drv_data
.act_dev
))
144 drv_data
.last_dev
= list_first_entry(&drv_data
.act_dev
,
145 struct uld_ctx
, entry
);
148 list_next_entry(drv_data
.last_dev
, entry
);
150 list_move(&u_ctx
->entry
, &drv_data
.inact_dev
);
151 if (list_empty(&drv_data
.act_dev
))
152 drv_data
.last_dev
= NULL
;
153 atomic_dec(&drv_data
.dev_count
);
154 mutex_unlock(&drv_data
.drv_mutex
);
159 static int cpl_fw6_pld_handler(struct adapter
*adap
,
160 unsigned char *input
)
162 struct crypto_async_request
*req
;
163 struct cpl_fw6_pld
*fw6_pld
;
164 u32 ack_err_status
= 0;
165 int error_status
= 0;
167 fw6_pld
= (struct cpl_fw6_pld
*)input
;
168 req
= (struct crypto_async_request
*)(uintptr_t)be64_to_cpu(
172 ntohl(*(__be32
*)((unsigned char *)&fw6_pld
->data
[0] + 4));
173 if (CHK_MAC_ERR_BIT(ack_err_status
) || CHK_PAD_ERR_BIT(ack_err_status
))
174 error_status
= -EBADMSG
;
175 /* call completion callback with failure status */
177 error_status
= chcr_handle_resp(req
, input
, error_status
);
179 pr_err("Incorrect request address from the firmware\n");
183 atomic_inc(&adap
->chcr_stats
.error
);
188 int chcr_send_wr(struct sk_buff
*skb
)
190 return cxgb4_crypto_send(skb
->dev
, skb
);
193 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
)
195 struct uld_ctx
*u_ctx
;
197 /* Create the device and add it in the device list */
198 pr_info_once("%s - version %s\n", DRV_DESC
, DRV_VERSION
);
199 if (!(lld
->ulp_crypto
& ULP_CRYPTO_LOOKASIDE
))
200 return ERR_PTR(-EOPNOTSUPP
);
202 /* Create the device and add it in the device list */
203 u_ctx
= kzalloc(sizeof(*u_ctx
), GFP_KERNEL
);
205 u_ctx
= ERR_PTR(-ENOMEM
);
209 chcr_dev_init(u_ctx
);
211 #ifdef CONFIG_CHELSIO_TLS_DEVICE
212 if (lld
->ulp_crypto
& ULP_CRYPTO_KTLS_INLINE
)
213 chcr_enable_ktls(padap(&u_ctx
->dev
));
219 int chcr_uld_rx_handler(void *handle
, const __be64
*rsp
,
220 const struct pkt_gl
*pgl
)
222 struct uld_ctx
*u_ctx
= (struct uld_ctx
*)handle
;
223 struct chcr_dev
*dev
= &u_ctx
->dev
;
224 struct adapter
*adap
= padap(dev
);
225 const struct cpl_fw6_pld
*rpl
= (struct cpl_fw6_pld
*)rsp
;
227 if (!work_handlers
[rpl
->opcode
]) {
228 pr_err("Unsupported opcode %d received\n", rpl
->opcode
);
233 work_handlers
[rpl
->opcode
](adap
, (unsigned char *)&rsp
[1]);
235 work_handlers
[rpl
->opcode
](adap
, pgl
->va
);
239 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
240 int chcr_uld_tx_handler(struct sk_buff
*skb
, struct net_device
*dev
)
242 /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
245 #ifdef CONFIG_CHELSIO_TLS_DEVICE
247 return chcr_ktls_xmit(skb
, dev
);
249 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
250 return chcr_ipsec_xmit(skb
, dev
);
254 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
256 static void chcr_detach_device(struct uld_ctx
*u_ctx
)
258 struct chcr_dev
*dev
= &u_ctx
->dev
;
260 if (dev
->state
== CHCR_DETACH
) {
261 pr_debug("Detached Event received for already detach device\n");
264 dev
->state
= CHCR_DETACH
;
265 if (atomic_read(&dev
->inflight
) != 0) {
266 schedule_delayed_work(&dev
->detach_work
, WQ_DETACH_TM
);
267 wait_for_completion(&dev
->detach_comp
);
270 // Move u_ctx to inactive_dev list
271 chcr_dev_move(u_ctx
);
274 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
)
276 struct uld_ctx
*u_ctx
= handle
;
281 if (u_ctx
->dev
.state
!= CHCR_INIT
) {
282 // ALready Initialised.
286 ret
= start_crypto();
289 case CXGB4_STATE_DETACH
:
290 chcr_detach_device(u_ctx
);
291 if (!atomic_read(&drv_data
.dev_count
))
295 case CXGB4_STATE_START_RECOVERY
:
296 case CXGB4_STATE_DOWN
:
303 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
304 static void update_netdev_features(void)
306 struct uld_ctx
*u_ctx
, *tmp
;
308 mutex_lock(&drv_data
.drv_mutex
);
309 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.inact_dev
, entry
) {
310 if (u_ctx
->lldi
.crypto
& ULP_CRYPTO_IPSEC_INLINE
)
311 chcr_add_xfrmops(&u_ctx
->lldi
);
313 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.act_dev
, entry
) {
314 if (u_ctx
->lldi
.crypto
& ULP_CRYPTO_IPSEC_INLINE
)
315 chcr_add_xfrmops(&u_ctx
->lldi
);
317 mutex_unlock(&drv_data
.drv_mutex
);
319 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
321 static int __init
chcr_crypto_init(void)
323 INIT_LIST_HEAD(&drv_data
.act_dev
);
324 INIT_LIST_HEAD(&drv_data
.inact_dev
);
325 atomic_set(&drv_data
.dev_count
, 0);
326 mutex_init(&drv_data
.drv_mutex
);
327 drv_data
.last_dev
= NULL
;
328 cxgb4_register_uld(CXGB4_ULD_CRYPTO
, &chcr_uld_info
);
330 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
332 update_netdev_features();
334 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
339 static void __exit
chcr_crypto_exit(void)
341 struct uld_ctx
*u_ctx
, *tmp
;
342 struct adapter
*adap
;
345 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO
);
346 /* Remove all devices from list */
347 mutex_lock(&drv_data
.drv_mutex
);
348 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.act_dev
, entry
) {
349 adap
= padap(&u_ctx
->dev
);
350 memset(&adap
->chcr_stats
, 0, sizeof(adap
->chcr_stats
));
351 #ifdef CONFIG_CHELSIO_TLS_DEVICE
352 if (u_ctx
->lldi
.ulp_crypto
& ULP_CRYPTO_KTLS_INLINE
)
353 chcr_disable_ktls(adap
);
355 list_del(&u_ctx
->entry
);
358 list_for_each_entry_safe(u_ctx
, tmp
, &drv_data
.inact_dev
, entry
) {
359 adap
= padap(&u_ctx
->dev
);
360 memset(&adap
->chcr_stats
, 0, sizeof(adap
->chcr_stats
));
361 #ifdef CONFIG_CHELSIO_TLS_DEVICE
362 if (u_ctx
->lldi
.ulp_crypto
& ULP_CRYPTO_KTLS_INLINE
)
363 chcr_disable_ktls(adap
);
365 list_del(&u_ctx
->entry
);
368 mutex_unlock(&drv_data
.drv_mutex
);
371 module_init(chcr_crypto_init
);
372 module_exit(chcr_crypto_exit
);
374 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
375 MODULE_LICENSE("GPL");
376 MODULE_AUTHOR("Chelsio Communications");
377 MODULE_VERSION(DRV_VERSION
);