Revert "unicode: Don't special case ignorable code points"
[linux.git] / drivers / crypto / caam / caamprng.c
blob6e4c1191cb280e7d8a79b9681edef426219a503f
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver to expose SEC4 PRNG via crypto RNG API
5 * Copyright 2022 NXP
7 */
9 #include <linux/completion.h>
10 #include <crypto/internal/rng.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/kernel.h>
13 #include "compat.h"
14 #include "regs.h"
15 #include "intern.h"
16 #include "desc_constr.h"
17 #include "jr.h"
18 #include "error.h"
21 * Length of used descriptors, see caam_init_desc()
23 #define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ + \
24 CAAM_CMD_SZ + \
25 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
27 /* prng per-device context */
28 struct caam_prng_ctx {
29 int err;
30 struct completion done;
33 struct caam_prng_alg {
34 struct rng_alg rng;
35 bool registered;
38 static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err,
39 void *context)
41 struct caam_prng_ctx *jctx = context;
43 jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0;
45 complete(&jctx->done);
48 static u32 *caam_init_reseed_desc(u32 *desc)
50 init_job_desc(desc, 0); /* + 1 cmd_sz */
51 /* Generate random bytes: + 1 cmd_sz */
52 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
53 OP_ALG_AS_FINALIZE);
55 print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS,
56 16, 4, desc, desc_bytes(desc), 1);
58 return desc;
61 static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len)
63 init_job_desc(desc, 0); /* + 1 cmd_sz */
64 /* Generate random bytes: + 1 cmd_sz */
65 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
66 /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
67 append_fifo_store(desc, dst_dma,
68 len, FIFOST_TYPE_RNGSTORE);
70 print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS,
71 16, 4, desc, desc_bytes(desc), 1);
73 return desc;
76 static int caam_prng_generate(struct crypto_rng *tfm,
77 const u8 *src, unsigned int slen,
78 u8 *dst, unsigned int dlen)
80 unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment());
81 struct caam_prng_ctx ctx;
82 struct device *jrdev;
83 dma_addr_t dst_dma;
84 u32 *desc;
85 u8 *buf;
86 int ret;
88 if (aligned_dlen < dlen)
89 return -EOVERFLOW;
91 buf = kzalloc(aligned_dlen, GFP_KERNEL);
92 if (!buf)
93 return -ENOMEM;
95 jrdev = caam_jr_alloc();
96 ret = PTR_ERR_OR_ZERO(jrdev);
97 if (ret) {
98 pr_err("Job Ring Device allocation failed\n");
99 kfree(buf);
100 return ret;
103 desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
104 if (!desc) {
105 ret = -ENOMEM;
106 goto out1;
109 dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE);
110 if (dma_mapping_error(jrdev, dst_dma)) {
111 dev_err(jrdev, "Failed to map destination buffer memory\n");
112 ret = -ENOMEM;
113 goto out;
116 init_completion(&ctx.done);
117 ret = caam_jr_enqueue(jrdev,
118 caam_init_prng_desc(desc, dst_dma, dlen),
119 caam_prng_done, &ctx);
121 if (ret == -EINPROGRESS) {
122 wait_for_completion(&ctx.done);
123 ret = ctx.err;
126 dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE);
128 if (!ret)
129 memcpy(dst, buf, dlen);
130 out:
131 kfree(desc);
132 out1:
133 caam_jr_free(jrdev);
134 kfree(buf);
135 return ret;
138 static void caam_prng_exit(struct crypto_tfm *tfm) {}
140 static int caam_prng_init(struct crypto_tfm *tfm)
142 return 0;
145 static int caam_prng_seed(struct crypto_rng *tfm,
146 const u8 *seed, unsigned int slen)
148 struct caam_prng_ctx ctx;
149 struct device *jrdev;
150 u32 *desc;
151 int ret;
153 if (slen) {
154 pr_err("Seed length should be zero\n");
155 return -EINVAL;
158 jrdev = caam_jr_alloc();
159 ret = PTR_ERR_OR_ZERO(jrdev);
160 if (ret) {
161 pr_err("Job Ring Device allocation failed\n");
162 return ret;
165 desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
166 if (!desc) {
167 caam_jr_free(jrdev);
168 return -ENOMEM;
171 init_completion(&ctx.done);
172 ret = caam_jr_enqueue(jrdev,
173 caam_init_reseed_desc(desc),
174 caam_prng_done, &ctx);
176 if (ret == -EINPROGRESS) {
177 wait_for_completion(&ctx.done);
178 ret = ctx.err;
181 kfree(desc);
182 caam_jr_free(jrdev);
183 return ret;
186 static struct caam_prng_alg caam_prng_alg = {
187 .rng = {
188 .generate = caam_prng_generate,
189 .seed = caam_prng_seed,
190 .seedsize = 0,
191 .base = {
192 .cra_name = "stdrng",
193 .cra_driver_name = "prng-caam",
194 .cra_priority = 500,
195 .cra_ctxsize = sizeof(struct caam_prng_ctx),
196 .cra_module = THIS_MODULE,
197 .cra_init = caam_prng_init,
198 .cra_exit = caam_prng_exit,
203 void caam_prng_unregister(void *data)
205 if (caam_prng_alg.registered)
206 crypto_unregister_rng(&caam_prng_alg.rng);
209 int caam_prng_register(struct device *ctrldev)
211 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
212 u32 rng_inst;
213 int ret = 0;
215 /* Check for available RNG blocks before registration */
216 if (priv->era < 10)
217 rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
218 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
219 else
220 rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
222 if (!rng_inst) {
223 dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n");
224 return ret;
227 ret = crypto_register_rng(&caam_prng_alg.rng);
228 if (ret) {
229 dev_err(ctrldev,
230 "couldn't register rng crypto alg: %d\n",
231 ret);
232 return ret;
235 caam_prng_alg.registered = true;
237 dev_info(ctrldev,
238 "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name);
240 return 0;