x86: local.h fix checkpatch warnings
[linux-2.6/openmoko-kernel/knife-kernel.git] / include / crypto / scatterwalk.h
blob224658b8d80689560e6ebf7c7bd94c41a2ea76bf
1 /*
2 * Cryptographic scatter and gather helpers.
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
6 * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
16 #ifndef _CRYPTO_SCATTERWALK_H
17 #define _CRYPTO_SCATTERWALK_H
19 #include <asm/kmap_types.h>
20 #include <crypto/algapi.h>
21 #include <linux/hardirq.h>
22 #include <linux/highmem.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
28 static inline enum km_type crypto_kmap_type(int out)
30 enum km_type type;
32 if (in_softirq())
33 type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
34 else
35 type = out * (KM_USER1 - KM_USER0) + KM_USER0;
37 return type;
40 static inline void *crypto_kmap(struct page *page, int out)
42 return kmap_atomic(page, crypto_kmap_type(out));
45 static inline void crypto_kunmap(void *vaddr, int out)
47 kunmap_atomic(vaddr, crypto_kmap_type(out));
50 static inline void crypto_yield(u32 flags)
52 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
53 cond_resched();
56 static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
57 struct scatterlist *sg2)
59 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
62 static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
64 return (++sg)->length ? sg : (void *)sg_page(sg);
67 static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
68 struct scatter_walk *walk_out)
70 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
71 (int)(walk_in->offset - walk_out->offset));
74 static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
76 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
77 unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
78 return len_this_page > len ? len : len_this_page;
81 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
82 unsigned int nbytes)
84 unsigned int len_this_page = scatterwalk_pagelen(walk);
85 return nbytes > len_this_page ? len_this_page : nbytes;
88 static inline void scatterwalk_advance(struct scatter_walk *walk,
89 unsigned int nbytes)
91 walk->offset += nbytes;
94 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
95 unsigned int alignmask)
97 return !(walk->offset & alignmask);
100 static inline struct page *scatterwalk_page(struct scatter_walk *walk)
102 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
105 static inline void scatterwalk_unmap(void *vaddr, int out)
107 crypto_kunmap(vaddr, out);
110 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
111 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
112 size_t nbytes, int out);
113 void *scatterwalk_map(struct scatter_walk *walk, int out);
114 void scatterwalk_done(struct scatter_walk *walk, int out, int more);
116 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
117 unsigned int start, unsigned int nbytes, int out);
119 #endif /* _CRYPTO_SCATTERWALK_H */