i2c: mxs: use MXS_DMA_CTRL_WAIT4END instead of DMA_CTRL_ACK
[linux/fpc-iii.git] / include / crypto / algapi.h
blobe5bd302f2c49e3a9012230e668ffee42fe754cff
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Cryptographic API for algorithms (i.e., low-level API).
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7 #ifndef _CRYPTO_ALGAPI_H
8 #define _CRYPTO_ALGAPI_H
10 #include <linux/crypto.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
16 * Maximum values for blocksize and alignmask, used to allocate
17 * static buffers that are big enough for any combination of
18 * algs and architectures. Ciphers have a lower maximum size.
20 #define MAX_ALGAPI_BLOCKSIZE 160
21 #define MAX_ALGAPI_ALIGNMASK 63
22 #define MAX_CIPHER_BLOCKSIZE 16
23 #define MAX_CIPHER_ALIGNMASK 15
25 struct crypto_aead;
26 struct crypto_instance;
27 struct module;
28 struct rtattr;
29 struct seq_file;
31 struct crypto_type {
32 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
33 unsigned int (*extsize)(struct crypto_alg *alg);
34 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
35 int (*init_tfm)(struct crypto_tfm *tfm);
36 void (*show)(struct seq_file *m, struct crypto_alg *alg);
37 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
38 void (*free)(struct crypto_instance *inst);
40 unsigned int type;
41 unsigned int maskclear;
42 unsigned int maskset;
43 unsigned int tfmsize;
46 struct crypto_instance {
47 struct crypto_alg alg;
49 struct crypto_template *tmpl;
50 struct hlist_node list;
52 void *__ctx[] CRYPTO_MINALIGN_ATTR;
55 struct crypto_template {
56 struct list_head list;
57 struct hlist_head instances;
58 struct module *module;
60 struct crypto_instance *(*alloc)(struct rtattr **tb);
61 void (*free)(struct crypto_instance *inst);
62 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
64 char name[CRYPTO_MAX_ALG_NAME];
67 struct crypto_spawn {
68 struct list_head list;
69 struct crypto_alg *alg;
70 struct crypto_instance *inst;
71 const struct crypto_type *frontend;
72 u32 mask;
75 struct crypto_queue {
76 struct list_head list;
77 struct list_head *backlog;
79 unsigned int qlen;
80 unsigned int max_qlen;
83 struct scatter_walk {
84 struct scatterlist *sg;
85 unsigned int offset;
88 struct blkcipher_walk {
89 union {
90 struct {
91 struct page *page;
92 unsigned long offset;
93 } phys;
95 struct {
96 u8 *page;
97 u8 *addr;
98 } virt;
99 } src, dst;
101 struct scatter_walk in;
102 unsigned int nbytes;
104 struct scatter_walk out;
105 unsigned int total;
107 void *page;
108 u8 *buffer;
109 u8 *iv;
110 unsigned int ivsize;
112 int flags;
113 unsigned int walk_blocksize;
114 unsigned int cipher_blocksize;
115 unsigned int alignmask;
118 struct ablkcipher_walk {
119 struct {
120 struct page *page;
121 unsigned int offset;
122 } src, dst;
124 struct scatter_walk in;
125 unsigned int nbytes;
126 struct scatter_walk out;
127 unsigned int total;
128 struct list_head buffers;
129 u8 *iv_buffer;
130 u8 *iv;
131 int flags;
132 unsigned int blocksize;
135 extern const struct crypto_type crypto_ablkcipher_type;
136 extern const struct crypto_type crypto_blkcipher_type;
138 void crypto_mod_put(struct crypto_alg *alg);
140 int crypto_register_template(struct crypto_template *tmpl);
141 int crypto_register_templates(struct crypto_template *tmpls, int count);
142 void crypto_unregister_template(struct crypto_template *tmpl);
143 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
144 struct crypto_template *crypto_lookup_template(const char *name);
146 int crypto_register_instance(struct crypto_template *tmpl,
147 struct crypto_instance *inst);
148 int crypto_unregister_instance(struct crypto_instance *inst);
150 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
151 struct crypto_instance *inst, u32 mask);
152 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
153 struct crypto_instance *inst,
154 const struct crypto_type *frontend);
155 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
156 u32 type, u32 mask);
158 void crypto_drop_spawn(struct crypto_spawn *spawn);
159 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
160 u32 mask);
161 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
163 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
164 struct crypto_instance *inst)
166 spawn->inst = inst;
169 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
170 int crypto_check_attr_type(struct rtattr **tb, u32 type);
171 const char *crypto_attr_alg_name(struct rtattr *rta);
172 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
173 const struct crypto_type *frontend,
174 u32 type, u32 mask);
176 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
177 u32 type, u32 mask)
179 return crypto_attr_alg2(rta, NULL, type, mask);
182 int crypto_attr_u32(struct rtattr *rta, u32 *num);
183 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
184 struct crypto_alg *alg);
185 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
186 unsigned int head);
188 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
189 int crypto_enqueue_request(struct crypto_queue *queue,
190 struct crypto_async_request *request);
191 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
192 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
194 return queue->qlen;
197 void crypto_inc(u8 *a, unsigned int size);
198 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
200 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
202 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
203 __builtin_constant_p(size) &&
204 (size % sizeof(unsigned long)) == 0) {
205 unsigned long *d = (unsigned long *)dst;
206 unsigned long *s = (unsigned long *)src;
208 while (size > 0) {
209 *d++ ^= *s++;
210 size -= sizeof(unsigned long);
212 } else {
213 __crypto_xor(dst, dst, src, size);
217 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
218 unsigned int size)
220 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
221 __builtin_constant_p(size) &&
222 (size % sizeof(unsigned long)) == 0) {
223 unsigned long *d = (unsigned long *)dst;
224 unsigned long *s1 = (unsigned long *)src1;
225 unsigned long *s2 = (unsigned long *)src2;
227 while (size > 0) {
228 *d++ = *s1++ ^ *s2++;
229 size -= sizeof(unsigned long);
231 } else {
232 __crypto_xor(dst, src1, src2, size);
236 int blkcipher_walk_done(struct blkcipher_desc *desc,
237 struct blkcipher_walk *walk, int err);
238 int blkcipher_walk_virt(struct blkcipher_desc *desc,
239 struct blkcipher_walk *walk);
240 int blkcipher_walk_phys(struct blkcipher_desc *desc,
241 struct blkcipher_walk *walk);
242 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
243 struct blkcipher_walk *walk,
244 unsigned int blocksize);
245 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
246 struct blkcipher_walk *walk,
247 struct crypto_aead *tfm,
248 unsigned int blocksize);
250 int ablkcipher_walk_done(struct ablkcipher_request *req,
251 struct ablkcipher_walk *walk, int err);
252 int ablkcipher_walk_phys(struct ablkcipher_request *req,
253 struct ablkcipher_walk *walk);
254 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
256 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
258 return PTR_ALIGN(crypto_tfm_ctx(tfm),
259 crypto_tfm_alg_alignmask(tfm) + 1);
262 static inline struct crypto_instance *crypto_tfm_alg_instance(
263 struct crypto_tfm *tfm)
265 return container_of(tfm->__crt_alg, struct crypto_instance, alg);
268 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
270 return inst->__ctx;
273 static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
274 struct crypto_ablkcipher *tfm)
276 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
279 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
281 return crypto_tfm_ctx(&tfm->base);
284 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
286 return crypto_tfm_ctx_aligned(&tfm->base);
289 static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
290 struct crypto_spawn *spawn)
292 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
293 u32 mask = CRYPTO_ALG_TYPE_MASK;
295 return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
298 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
300 return crypto_tfm_ctx(&tfm->base);
303 static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
305 return crypto_tfm_ctx_aligned(&tfm->base);
308 static inline struct crypto_cipher *crypto_spawn_cipher(
309 struct crypto_spawn *spawn)
311 u32 type = CRYPTO_ALG_TYPE_CIPHER;
312 u32 mask = CRYPTO_ALG_TYPE_MASK;
314 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
317 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
319 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
322 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
323 struct scatterlist *dst,
324 struct scatterlist *src,
325 unsigned int nbytes)
327 walk->in.sg = src;
328 walk->out.sg = dst;
329 walk->total = nbytes;
332 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
333 struct scatterlist *dst,
334 struct scatterlist *src,
335 unsigned int nbytes)
337 walk->in.sg = src;
338 walk->out.sg = dst;
339 walk->total = nbytes;
340 INIT_LIST_HEAD(&walk->buffers);
343 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
345 if (unlikely(!list_empty(&walk->buffers)))
346 __ablkcipher_walk_complete(walk);
349 static inline struct crypto_async_request *crypto_get_backlog(
350 struct crypto_queue *queue)
352 return queue->backlog == &queue->list ? NULL :
353 container_of(queue->backlog, struct crypto_async_request, list);
356 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
357 struct ablkcipher_request *request)
359 return crypto_enqueue_request(queue, &request->base);
362 static inline struct ablkcipher_request *ablkcipher_dequeue_request(
363 struct crypto_queue *queue)
365 return ablkcipher_request_cast(crypto_dequeue_request(queue));
368 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
370 return req->__ctx;
373 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
374 u32 type, u32 mask)
376 return crypto_attr_alg(tb[1], type, mask);
379 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
381 return (type ^ off) & mask & off;
385 * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
386 * Otherwise returns zero.
388 static inline int crypto_requires_sync(u32 type, u32 mask)
390 return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
393 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
396 * crypto_memneq - Compare two areas of memory without leaking
397 * timing information.
399 * @a: One area of memory
400 * @b: Another area of memory
401 * @size: The size of the area.
403 * Returns 0 when data is equal, 1 otherwise.
405 static inline int crypto_memneq(const void *a, const void *b, size_t size)
407 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
410 static inline void crypto_yield(u32 flags)
412 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
413 cond_resched();
416 int crypto_register_notifier(struct notifier_block *nb);
417 int crypto_unregister_notifier(struct notifier_block *nb);
419 /* Crypto notification events. */
420 enum {
421 CRYPTO_MSG_ALG_REQUEST,
422 CRYPTO_MSG_ALG_REGISTER,
423 CRYPTO_MSG_ALG_LOADED,
426 #endif /* _CRYPTO_ALGAPI_H */