dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / crypto / chelsio / chcr_core.h
blob2c09672e00a4b36fb8673e96d0bcbe169b5d4457
1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
36 #ifndef __CHCR_CORE_H__
37 #define __CHCR_CORE_H__
39 #include <crypto/algapi.h>
40 #include "t4_hw.h"
41 #include "cxgb4.h"
42 #include "t4_msg.h"
43 #include "cxgb4_uld.h"
45 #define DRV_MODULE_NAME "chcr"
46 #define DRV_VERSION "1.0.0.0-ko"
47 #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
49 #define MAX_PENDING_REQ_TO_HW 20
50 #define CHCR_TEST_RESPONSE_TIMEOUT 1000
51 #define WQ_DETACH_TM (msecs_to_jiffies(50))
52 #define PAD_ERROR_BIT 1
53 #define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
55 #define MAC_ERROR_BIT 0
56 #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
57 #define MAX_SALT 4
58 #define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
59 sizeof(struct cpl_rx_phys_dsgl) + \
60 sizeof(struct ulptx_sgl) + 16) //IV
62 #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
63 DUMMY_BYTES + \
64 sizeof(struct ulptx_sgl))
65 struct uld_ctx;
67 struct _key_ctx {
68 __be32 ctx_hdr;
69 u8 salt[MAX_SALT];
70 __be64 iv_to_auth;
71 unsigned char key[];
74 #define KEYCTX_TX_WR_IV_S 55
75 #define KEYCTX_TX_WR_IV_M 0x1ffULL
76 #define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
77 #define KEYCTX_TX_WR_IV_G(x) \
78 (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
80 #define KEYCTX_TX_WR_AAD_S 47
81 #define KEYCTX_TX_WR_AAD_M 0xffULL
82 #define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
83 #define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
84 KEYCTX_TX_WR_AAD_M)
86 #define KEYCTX_TX_WR_AADST_S 39
87 #define KEYCTX_TX_WR_AADST_M 0xffULL
88 #define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
89 #define KEYCTX_TX_WR_AADST_G(x) \
90 (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
92 #define KEYCTX_TX_WR_CIPHER_S 30
93 #define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
94 #define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
95 #define KEYCTX_TX_WR_CIPHER_G(x) \
96 (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
98 #define KEYCTX_TX_WR_CIPHERST_S 23
99 #define KEYCTX_TX_WR_CIPHERST_M 0x7f
100 #define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
101 #define KEYCTX_TX_WR_CIPHERST_G(x) \
102 (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
104 #define KEYCTX_TX_WR_AUTH_S 14
105 #define KEYCTX_TX_WR_AUTH_M 0x1ff
106 #define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
107 #define KEYCTX_TX_WR_AUTH_G(x) \
108 (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
110 #define KEYCTX_TX_WR_AUTHST_S 7
111 #define KEYCTX_TX_WR_AUTHST_M 0x7f
112 #define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
113 #define KEYCTX_TX_WR_AUTHST_G(x) \
114 (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
116 #define KEYCTX_TX_WR_AUTHIN_S 0
117 #define KEYCTX_TX_WR_AUTHIN_M 0x7f
118 #define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
119 #define KEYCTX_TX_WR_AUTHIN_G(x) \
120 (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
122 #define WQ_RETRY 5
123 struct chcr_driver_data {
124 struct list_head act_dev;
125 struct list_head inact_dev;
126 atomic_t dev_count;
127 struct mutex drv_mutex;
128 struct uld_ctx *last_dev;
131 enum chcr_state {
132 CHCR_INIT = 0,
133 CHCR_ATTACH,
134 CHCR_DETACH,
136 struct chcr_wr {
137 struct fw_crypto_lookaside_wr wreq;
138 struct ulp_txpkt ulptx;
139 struct ulptx_idata sc_imm;
140 struct cpl_tx_sec_pdu sec_cpl;
141 struct _key_ctx key_ctx;
144 struct chcr_dev {
145 spinlock_t lock_chcr_dev;
146 enum chcr_state state;
147 atomic_t inflight;
148 int wqretry;
149 struct delayed_work detach_work;
150 struct completion detach_comp;
153 struct uld_ctx {
154 struct list_head entry;
155 struct cxgb4_lld_info lldi;
156 struct chcr_dev dev;
159 struct sge_opaque_hdr {
160 void *dev;
161 dma_addr_t addr[MAX_SKB_FRAGS + 1];
164 struct chcr_ipsec_req {
165 struct ulp_txpkt ulptx;
166 struct ulptx_idata sc_imm;
167 struct cpl_tx_sec_pdu sec_cpl;
168 struct _key_ctx key_ctx;
171 struct chcr_ipsec_wr {
172 struct fw_ulptx_wr wreq;
173 struct chcr_ipsec_req req;
176 #define ESN_IV_INSERT_OFFSET 12
177 struct chcr_ipsec_aadiv {
178 __be32 spi;
179 u8 seq_no[8];
180 u8 iv[8];
183 struct ipsec_sa_entry {
184 int hmac_ctrl;
185 u16 esn;
186 u16 resv;
187 unsigned int enckey_len;
188 unsigned int kctx_len;
189 unsigned int authsize;
190 __be32 key_ctx_hdr;
191 char salt[MAX_SALT];
192 char key[2 * AES_MAX_KEY_SIZE];
196 * sgl_len - calculates the size of an SGL of the given capacity
197 * @n: the number of SGL entries
198 * Calculates the number of flits needed for a scatter/gather list that
199 * can hold the given number of entries.
201 static inline unsigned int sgl_len(unsigned int n)
203 n--;
204 return (3 * n) / 2 + (n & 1) + 2;
207 static inline void *padap(struct chcr_dev *dev)
209 struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
211 return pci_get_drvdata(u_ctx->lldi.pdev);
214 struct uld_ctx *assign_chcr_device(void);
215 int chcr_send_wr(struct sk_buff *skb);
216 int start_crypto(void);
217 int stop_crypto(void);
218 int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
219 const struct pkt_gl *pgl);
220 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
221 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
222 int err);
223 int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
224 void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
225 #ifdef CONFIG_CHELSIO_TLS_DEVICE
226 void chcr_enable_ktls(struct adapter *adap);
227 void chcr_disable_ktls(struct adapter *adap);
228 int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
229 int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
230 int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
231 #endif
232 #endif /* __CHCR_CORE_H__ */