Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux/fpc-iii.git] / drivers / crypto / caam / caamrng.c
blob28486b19fc36b3e837d774ace08a3aa041b8d1e7
1 /*
2 * caam - Freescale FSL CAAM support for hw_random
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship between job descriptors to shared descriptors:
10 * --------------- --------------
11 * | JobDesc #0 |-------------------->| ShareDesc |
12 * | *(buffer 0) | |------------->| (generate) |
13 * --------------- | | (move) |
14 * | | (store) |
15 * --------------- | --------------
16 * | JobDesc #1 |------|
17 * | *(buffer 1) |
18 * ---------------
20 * A job desc looks like this:
22 * ---------------------
23 * | Header |
24 * | ShareDesc Pointer |
25 * | SEQ_OUT_PTR |
26 * | (output buffer) |
27 * ---------------------
29 * The SharedDesc never changes, and each job descriptor points to one of two
30 * buffers for each device, from which the data will be copied into the
31 * requested destination
34 #include <linux/hw_random.h>
35 #include <linux/completion.h>
36 #include <linux/atomic.h>
38 #include "compat.h"
40 #include "regs.h"
41 #include "intern.h"
42 #include "desc_constr.h"
43 #include "jr.h"
44 #include "error.h"
47 * Maximum buffer size: maximum number of random, cache-aligned bytes that
48 * will be generated and moved to seq out ptr (extlen not allowed)
50 #define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
51 L1_CACHE_BYTES)
53 /* length of descriptors */
54 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
55 #define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
57 /* Buffer, its dma address and lock */
58 struct buf_data {
59 u8 buf[RN_BUF_SIZE];
60 dma_addr_t addr;
61 struct completion filled;
62 u32 hw_desc[DESC_JOB_O_LEN];
63 #define BUF_NOT_EMPTY 0
64 #define BUF_EMPTY 1
65 #define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
66 atomic_t empty;
69 /* rng per-device context */
70 struct caam_rng_ctx {
71 struct device *jrdev;
72 dma_addr_t sh_desc_dma;
73 u32 sh_desc[DESC_RNG_LEN];
74 unsigned int cur_buf_idx;
75 int current_buf;
76 struct buf_data bufs[2];
79 static struct caam_rng_ctx rng_ctx;
81 static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
83 if (bd->addr)
84 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
85 DMA_FROM_DEVICE);
88 static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
90 struct device *jrdev = ctx->jrdev;
92 if (ctx->sh_desc_dma)
93 dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
94 DMA_TO_DEVICE);
95 rng_unmap_buf(jrdev, &ctx->bufs[0]);
96 rng_unmap_buf(jrdev, &ctx->bufs[1]);
99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
101 struct buf_data *bd;
103 bd = (struct buf_data *)((char *)desc -
104 offsetof(struct buf_data, hw_desc));
106 if (err) {
107 char tmp[CAAM_ERROR_STR_MAX];
109 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
112 atomic_set(&bd->empty, BUF_NOT_EMPTY);
113 complete(&bd->filled);
114 #ifdef DEBUG
115 print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
116 DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
117 #endif
120 static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
122 struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
123 struct device *jrdev = ctx->jrdev;
124 u32 *desc = bd->hw_desc;
125 int err;
127 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
128 init_completion(&bd->filled);
129 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
130 if (err)
131 complete(&bd->filled); /* don't wait on failed job*/
132 else
133 atomic_inc(&bd->empty); /* note if pending */
135 return err;
138 static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
140 struct caam_rng_ctx *ctx = &rng_ctx;
141 struct buf_data *bd = &ctx->bufs[ctx->current_buf];
142 int next_buf_idx, copied_idx;
143 int err;
145 if (atomic_read(&bd->empty)) {
146 /* try to submit job if there wasn't one */
147 if (atomic_read(&bd->empty) == BUF_EMPTY) {
148 err = submit_job(ctx, 1);
149 /* if can't submit job, can't even wait */
150 if (err)
151 return 0;
153 /* no immediate data, so exit if not waiting */
154 if (!wait)
155 return 0;
157 /* waiting for pending job */
158 if (atomic_read(&bd->empty))
159 wait_for_completion(&bd->filled);
162 next_buf_idx = ctx->cur_buf_idx + max;
163 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
164 __func__, ctx->current_buf, ctx->cur_buf_idx);
166 /* if enough data in current buffer */
167 if (next_buf_idx < RN_BUF_SIZE) {
168 memcpy(data, bd->buf + ctx->cur_buf_idx, max);
169 ctx->cur_buf_idx = next_buf_idx;
170 return max;
173 /* else, copy what's left... */
174 copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
175 memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
176 ctx->cur_buf_idx = 0;
177 atomic_set(&bd->empty, BUF_EMPTY);
179 /* ...refill... */
180 submit_job(ctx, 1);
182 /* and use next buffer */
183 ctx->current_buf = !ctx->current_buf;
184 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
186 /* since there already is some data read, don't wait */
187 return copied_idx + caam_read(rng, data + copied_idx,
188 max - copied_idx, false);
191 static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
193 struct device *jrdev = ctx->jrdev;
194 u32 *desc = ctx->sh_desc;
196 init_sh_desc(desc, HDR_SHARE_SERIAL);
198 /* Propagate errors from shared to job descriptor */
199 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
201 /* Generate random bytes */
202 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
204 /* Store bytes */
205 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
207 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
208 DMA_TO_DEVICE);
209 #ifdef DEBUG
210 print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
211 desc, desc_bytes(desc), 1);
212 #endif
215 static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
217 struct device *jrdev = ctx->jrdev;
218 struct buf_data *bd = &ctx->bufs[buf_id];
219 u32 *desc = bd->hw_desc;
220 int sh_len = desc_len(ctx->sh_desc);
222 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
223 HDR_REVERSE);
225 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
227 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
228 #ifdef DEBUG
229 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
230 desc, desc_bytes(desc), 1);
231 #endif
234 static void caam_cleanup(struct hwrng *rng)
236 int i;
237 struct buf_data *bd;
239 for (i = 0; i < 2; i++) {
240 bd = &rng_ctx.bufs[i];
241 if (atomic_read(&bd->empty) == BUF_PENDING)
242 wait_for_completion(&bd->filled);
245 rng_unmap_ctx(&rng_ctx);
248 static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
250 struct buf_data *bd = &ctx->bufs[buf_id];
252 rng_create_job_desc(ctx, buf_id);
253 atomic_set(&bd->empty, BUF_EMPTY);
254 submit_job(ctx, buf_id == ctx->current_buf);
255 wait_for_completion(&bd->filled);
258 static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
260 ctx->jrdev = jrdev;
261 rng_create_sh_desc(ctx);
262 ctx->current_buf = 0;
263 ctx->cur_buf_idx = 0;
264 caam_init_buf(ctx, 0);
265 caam_init_buf(ctx, 1);
268 static struct hwrng caam_rng = {
269 .name = "rng-caam",
270 .cleanup = caam_cleanup,
271 .read = caam_read,
274 static void __exit caam_rng_exit(void)
276 caam_jr_free(rng_ctx.jrdev);
277 hwrng_unregister(&caam_rng);
280 static int __init caam_rng_init(void)
282 struct device *dev;
284 dev = caam_jr_alloc();
285 if (IS_ERR(dev)) {
286 pr_err("Job Ring Device allocation for transform failed\n");
287 return PTR_ERR(dev);
290 caam_init_rng(&rng_ctx, dev);
292 dev_info(dev, "registering rng-caam\n");
293 return hwrng_register(&caam_rng);
296 module_init(caam_rng_init);
297 module_exit(caam_rng_exit);
299 MODULE_LICENSE("GPL");
300 MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
301 MODULE_AUTHOR("Freescale Semiconductor - NMG");