1 // SPDX-License-Identifier: GPL-2.0+
3 * caam - Freescale FSL CAAM support for hw_random
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
8 * Based on caamalg.c crypto API driver.
12 #include <linux/hw_random.h>
13 #include <linux/completion.h>
14 #include <linux/atomic.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/kernel.h>
17 #include <linux/kfifo.h>
23 #include "desc_constr.h"
27 #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
30 * Length of used descriptors, see caam_init_desc()
32 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
34 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
36 /* rng per-device context */
40 struct device
*ctrldev
;
43 struct work_struct worker
;
47 struct caam_rng_job_ctx
{
48 struct completion
*done
;
52 static struct caam_rng_ctx
*to_caam_rng_ctx(struct hwrng
*r
)
54 return (struct caam_rng_ctx
*)r
->priv
;
57 static void caam_rng_done(struct device
*jrdev
, u32
*desc
, u32 err
,
60 struct caam_rng_job_ctx
*jctx
= context
;
63 *jctx
->err
= caam_jr_strstatus(jrdev
, err
);
68 static u32
*caam_init_desc(u32
*desc
, dma_addr_t dst_dma
)
70 init_job_desc(desc
, 0); /* + 1 cmd_sz */
71 /* Generate random bytes: + 1 cmd_sz */
72 append_operation(desc
, OP_ALG_ALGSEL_RNG
| OP_TYPE_CLASS1_ALG
|
74 /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
75 append_fifo_store(desc
, dst_dma
,
76 CAAM_RNG_MAX_FIFO_STORE_SIZE
, FIFOST_TYPE_RNGSTORE
);
78 print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS
,
79 16, 4, desc
, desc_bytes(desc
), 1);
84 static int caam_rng_read_one(struct device
*jrdev
,
87 struct completion
*done
)
91 struct caam_rng_job_ctx jctx
= {
96 len
= CAAM_RNG_MAX_FIFO_STORE_SIZE
;
98 dst_dma
= dma_map_single(jrdev
, dst
, len
, DMA_FROM_DEVICE
);
99 if (dma_mapping_error(jrdev
, dst_dma
)) {
100 dev_err(jrdev
, "unable to map destination memory\n");
104 init_completion(done
);
105 err
= caam_jr_enqueue(jrdev
,
106 caam_init_desc(desc
, dst_dma
),
107 caam_rng_done
, &jctx
);
108 if (err
== -EINPROGRESS
) {
109 wait_for_completion(done
);
113 dma_unmap_single(jrdev
, dst_dma
, len
, DMA_FROM_DEVICE
);
115 return err
?: (ret
?: len
);
118 static void caam_rng_fill_async(struct caam_rng_ctx
*ctx
)
120 struct scatterlist sg
[1];
121 struct completion done
;
124 sg_init_table(sg
, ARRAY_SIZE(sg
));
125 nents
= kfifo_dma_in_prepare(&ctx
->fifo
, sg
, ARRAY_SIZE(sg
),
126 CAAM_RNG_MAX_FIFO_STORE_SIZE
);
130 len
= caam_rng_read_one(ctx
->jrdev
, sg_virt(&sg
[0]),
137 kfifo_dma_in_finish(&ctx
->fifo
, len
);
140 static void caam_rng_worker(struct work_struct
*work
)
142 struct caam_rng_ctx
*ctx
= container_of(work
, struct caam_rng_ctx
,
144 caam_rng_fill_async(ctx
);
147 static int caam_read(struct hwrng
*rng
, void *dst
, size_t max
, bool wait
)
149 struct caam_rng_ctx
*ctx
= to_caam_rng_ctx(rng
);
153 struct completion done
;
155 return caam_rng_read_one(ctx
->jrdev
, dst
, max
,
156 ctx
->desc_sync
, &done
);
159 out
= kfifo_out(&ctx
->fifo
, dst
, max
);
160 if (kfifo_is_empty(&ctx
->fifo
))
161 schedule_work(&ctx
->worker
);
166 static void caam_cleanup(struct hwrng
*rng
)
168 struct caam_rng_ctx
*ctx
= to_caam_rng_ctx(rng
);
170 flush_work(&ctx
->worker
);
171 caam_jr_free(ctx
->jrdev
);
172 kfifo_free(&ctx
->fifo
);
175 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
176 static inline void test_len(struct hwrng
*rng
, size_t len
, bool wait
)
180 struct caam_rng_ctx
*ctx
= to_caam_rng_ctx(rng
);
181 struct device
*dev
= ctx
->ctrldev
;
183 buf
= kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE
, sizeof(u8
), GFP_KERNEL
);
186 read_len
= rng
->read(rng
, buf
, len
, wait
);
188 if (read_len
< 0 || (read_len
== 0 && wait
)) {
189 dev_err(dev
, "RNG Read FAILED received %d bytes\n",
195 print_hex_dump_debug("random bytes@: ",
196 DUMP_PREFIX_ADDRESS
, 16, 4,
199 len
= len
- read_len
;
205 static inline void test_mode_once(struct hwrng
*rng
, bool wait
)
207 test_len(rng
, 32, wait
);
208 test_len(rng
, 64, wait
);
209 test_len(rng
, 128, wait
);
212 static void self_test(struct hwrng
*rng
)
214 pr_info("Executing RNG SELF-TEST with wait\n");
215 test_mode_once(rng
, true);
219 static int caam_init(struct hwrng
*rng
)
221 struct caam_rng_ctx
*ctx
= to_caam_rng_ctx(rng
);
224 ctx
->desc_sync
= devm_kzalloc(ctx
->ctrldev
, CAAM_RNG_DESC_LEN
,
229 ctx
->desc_async
= devm_kzalloc(ctx
->ctrldev
, CAAM_RNG_DESC_LEN
,
231 if (!ctx
->desc_async
)
234 if (kfifo_alloc(&ctx
->fifo
, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE
,
235 dma_get_cache_alignment()),
239 INIT_WORK(&ctx
->worker
, caam_rng_worker
);
241 ctx
->jrdev
= caam_jr_alloc();
242 err
= PTR_ERR_OR_ZERO(ctx
->jrdev
);
244 kfifo_free(&ctx
->fifo
);
245 pr_err("Job Ring Device allocation for transform failed\n");
250 * Fill async buffer to have early randomness data for
253 caam_rng_fill_async(ctx
);
258 int caam_rng_init(struct device
*ctrldev
);
260 void caam_rng_exit(struct device
*ctrldev
)
262 devres_release_group(ctrldev
, caam_rng_init
);
265 int caam_rng_init(struct device
*ctrldev
)
267 struct caam_rng_ctx
*ctx
;
269 struct caam_drv_private
*priv
= dev_get_drvdata(ctrldev
);
272 /* Check for an instantiated RNG before registration */
274 rng_inst
= (rd_reg32(&priv
->jr
[0]->perfmon
.cha_num_ls
) &
275 CHA_ID_LS_RNG_MASK
) >> CHA_ID_LS_RNG_SHIFT
;
277 rng_inst
= rd_reg32(&priv
->jr
[0]->vreg
.rng
) & CHA_VER_NUM_MASK
;
282 if (!devres_open_group(ctrldev
, caam_rng_init
, GFP_KERNEL
))
285 ctx
= devm_kzalloc(ctrldev
, sizeof(*ctx
), GFP_KERNEL
);
289 ctx
->ctrldev
= ctrldev
;
291 ctx
->rng
.name
= "rng-caam";
292 ctx
->rng
.init
= caam_init
;
293 ctx
->rng
.cleanup
= caam_cleanup
;
294 ctx
->rng
.read
= caam_read
;
295 ctx
->rng
.priv
= (unsigned long)ctx
;
297 dev_info(ctrldev
, "registering rng-caam\n");
299 ret
= devm_hwrng_register(ctrldev
, &ctx
->rng
);
301 caam_rng_exit(ctrldev
);
305 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
306 self_test(&ctx
->rng
);
309 devres_close_group(ctrldev
, caam_rng_init
);