2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written and Maintained by:
11 * Manoj Malviya (manojmalviya@chelsio.com)
12 * Atul Gupta (atul.gupta@chelsio.com)
13 * Jitendra Lulla (jlulla@chelsio.com)
14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15 * Harsh Jain (harsh@chelsio.com)
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
29 static LIST_HEAD(uld_ctx_list
);
30 static DEFINE_MUTEX(dev_mutex
);
31 static atomic_t dev_count
;
32 static struct uld_ctx
*ctx_rr
;
34 typedef int (*chcr_handler_func
)(struct chcr_dev
*dev
, unsigned char *input
);
35 static int cpl_fw6_pld_handler(struct chcr_dev
*dev
, unsigned char *input
);
36 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
);
37 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
);
39 static chcr_handler_func work_handlers
[NUM_CPL_CMDS
] = {
40 [CPL_FW6_PLD
] = cpl_fw6_pld_handler
,
43 static struct cxgb4_uld_info chcr_uld_info
= {
44 .name
= DRV_MODULE_NAME
,
45 .nrxq
= MAX_ULD_QSETS
,
46 .ntxq
= MAX_ULD_QSETS
,
49 .state_change
= chcr_uld_state_change
,
50 .rx_handler
= chcr_uld_rx_handler
,
51 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
52 .tx_handler
= chcr_uld_tx_handler
,
53 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
56 struct uld_ctx
*assign_chcr_device(void)
58 struct uld_ctx
*u_ctx
= NULL
;
61 * When multiple devices are present in system select
62 * device in round-robin fashion for crypto operations
63 * Although One session must use the same device to
64 * maintain request-response ordering.
66 mutex_lock(&dev_mutex
);
67 if (!list_empty(&uld_ctx_list
)) {
69 if (list_is_last(&ctx_rr
->entry
, &uld_ctx_list
))
70 ctx_rr
= list_first_entry(&uld_ctx_list
,
74 ctx_rr
= list_next_entry(ctx_rr
, entry
);
76 mutex_unlock(&dev_mutex
);
80 static int chcr_dev_add(struct uld_ctx
*u_ctx
)
84 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
88 spin_lock_init(&dev
->lock_chcr_dev
);
91 atomic_inc(&dev_count
);
92 mutex_lock(&dev_mutex
);
93 list_add_tail(&u_ctx
->entry
, &uld_ctx_list
);
96 mutex_unlock(&dev_mutex
);
100 static int chcr_dev_remove(struct uld_ctx
*u_ctx
)
102 if (ctx_rr
== u_ctx
) {
103 if (list_is_last(&ctx_rr
->entry
, &uld_ctx_list
))
104 ctx_rr
= list_first_entry(&uld_ctx_list
,
108 ctx_rr
= list_next_entry(ctx_rr
, entry
);
110 list_del(&u_ctx
->entry
);
111 if (list_empty(&uld_ctx_list
))
115 atomic_dec(&dev_count
);
119 static int cpl_fw6_pld_handler(struct chcr_dev
*dev
,
120 unsigned char *input
)
122 struct crypto_async_request
*req
;
123 struct cpl_fw6_pld
*fw6_pld
;
124 u32 ack_err_status
= 0;
125 int error_status
= 0;
126 struct adapter
*adap
= padap(dev
);
128 fw6_pld
= (struct cpl_fw6_pld
*)input
;
129 req
= (struct crypto_async_request
*)(uintptr_t)be64_to_cpu(
133 ntohl(*(__be32
*)((unsigned char *)&fw6_pld
->data
[0] + 4));
134 if (ack_err_status
) {
135 if (CHK_MAC_ERR_BIT(ack_err_status
) ||
136 CHK_PAD_ERR_BIT(ack_err_status
))
137 error_status
= -EBADMSG
;
138 atomic_inc(&adap
->chcr_stats
.error
);
140 /* call completion callback with failure status */
142 error_status
= chcr_handle_resp(req
, input
, error_status
);
144 pr_err("Incorrect request address from the firmware\n");
150 int chcr_send_wr(struct sk_buff
*skb
)
152 return cxgb4_crypto_send(skb
->dev
, skb
);
155 static void *chcr_uld_add(const struct cxgb4_lld_info
*lld
)
157 struct uld_ctx
*u_ctx
;
159 /* Create the device and add it in the device list */
160 if (!(lld
->ulp_crypto
& ULP_CRYPTO_LOOKASIDE
))
161 return ERR_PTR(-EOPNOTSUPP
);
163 /* Create the device and add it in the device list */
164 u_ctx
= kzalloc(sizeof(*u_ctx
), GFP_KERNEL
);
166 u_ctx
= ERR_PTR(-ENOMEM
);
170 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
171 if (lld
->crypto
& ULP_CRYPTO_IPSEC_INLINE
)
172 chcr_add_xfrmops(lld
);
173 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
178 int chcr_uld_rx_handler(void *handle
, const __be64
*rsp
,
179 const struct pkt_gl
*pgl
)
181 struct uld_ctx
*u_ctx
= (struct uld_ctx
*)handle
;
182 struct chcr_dev
*dev
= u_ctx
->dev
;
183 const struct cpl_fw6_pld
*rpl
= (struct cpl_fw6_pld
*)rsp
;
185 if (rpl
->opcode
!= CPL_FW6_PLD
) {
186 pr_err("Unsupported opcode\n");
191 work_handlers
[rpl
->opcode
](dev
, (unsigned char *)&rsp
[1]);
193 work_handlers
[rpl
->opcode
](dev
, pgl
->va
);
197 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
198 int chcr_uld_tx_handler(struct sk_buff
*skb
, struct net_device
*dev
)
200 return chcr_ipsec_xmit(skb
, dev
);
202 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
204 static int chcr_uld_state_change(void *handle
, enum cxgb4_state state
)
206 struct uld_ctx
*u_ctx
= handle
;
212 ret
= chcr_dev_add(u_ctx
);
216 if (atomic_read(&dev_count
) == 1)
217 ret
= start_crypto();
220 case CXGB4_STATE_DETACH
:
222 mutex_lock(&dev_mutex
);
223 chcr_dev_remove(u_ctx
);
224 mutex_unlock(&dev_mutex
);
226 if (!atomic_read(&dev_count
))
230 case CXGB4_STATE_START_RECOVERY
:
231 case CXGB4_STATE_DOWN
:
238 static int __init
chcr_crypto_init(void)
240 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO
, &chcr_uld_info
))
241 pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
246 static void __exit
chcr_crypto_exit(void)
248 struct uld_ctx
*u_ctx
, *tmp
;
250 if (atomic_read(&dev_count
))
253 /* Remove all devices from list */
254 mutex_lock(&dev_mutex
);
255 list_for_each_entry_safe(u_ctx
, tmp
, &uld_ctx_list
, entry
) {
257 chcr_dev_remove(u_ctx
);
260 mutex_unlock(&dev_mutex
);
261 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO
);
264 module_init(chcr_crypto_init
);
265 module_exit(chcr_crypto_exit
);
267 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
268 MODULE_LICENSE("GPL");
269 MODULE_AUTHOR("Chelsio Communications");
270 MODULE_VERSION(DRV_VERSION
);