serial: tegra: drop bogus NULL tty-port checks
[linux/fpc-iii.git] / drivers / char / hw_random / cctrng.c
blob619148fb2dc9442d45eb6f1322698a124e2dbd69
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/clk.h>
7 #include <linux/hw_random.h>
8 #include <linux/io.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqreturn.h>
13 #include <linux/workqueue.h>
14 #include <linux/circ_buf.h>
15 #include <linux/completion.h>
16 #include <linux/of.h>
17 #include <linux/bitfield.h>
18 #include <linux/fips.h>
20 #include "cctrng.h"
22 #define CC_REG_LOW(name) (name ## _BIT_SHIFT)
23 #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
24 #define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
26 #define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \
27 (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
29 #define CC_HW_RESET_LOOP_COUNT 10
30 #define CC_TRNG_SUSPEND_TIMEOUT 3000
32 /* data circular buffer in words must be:
33 * - of a power-of-2 size (limitation of circ_buf.h macros)
34 * - at least 6, the size generated in the EHR according to HW implementation
36 #define CCTRNG_DATA_BUF_WORDS 32
38 /* The timeout for the TRNG operation should be calculated with the formula:
39 * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
40 * while:
41 * - SAMPLE_CNT is input value from the characterisation process
42 * - all the rest are constants
44 #define EHR_NUM 1
45 #define VN_COEFF 4
46 #define EHR_LENGTH CC_TRNG_EHR_IN_BITS
47 #define SCALE_VALUE 2
48 #define CCTRNG_TIMEOUT(smpl_cnt) \
49 (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
51 struct cctrng_drvdata {
52 struct platform_device *pdev;
53 void __iomem *cc_base;
54 struct clk *clk;
55 struct hwrng rng;
56 u32 active_rosc;
57 /* Sampling interval for each ring oscillator:
58 * count of ring oscillator cycles between consecutive bits sampling.
59 * Value of 0 indicates non-valid rosc
61 u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
63 u32 data_buf[CCTRNG_DATA_BUF_WORDS];
64 struct circ_buf circ;
65 struct work_struct compwork;
66 struct work_struct startwork;
68 /* pending_hw - 1 when HW is pending, 0 when it is idle */
69 atomic_t pending_hw;
71 /* protects against multiple concurrent consumers of data_buf */
72 spinlock_t read_lock;
76 /* functions for write/read CC registers */
77 static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
79 iowrite32(val, (drvdata->cc_base + reg));
81 static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
83 return ioread32(drvdata->cc_base + reg);
87 static int cc_trng_pm_get(struct device *dev)
89 int rc = 0;
91 rc = pm_runtime_get_sync(dev);
93 /* pm_runtime_get_sync() can return 1 as a valid return code */
94 return (rc == 1 ? 0 : rc);
97 static void cc_trng_pm_put_suspend(struct device *dev)
99 int rc = 0;
101 pm_runtime_mark_last_busy(dev);
102 rc = pm_runtime_put_autosuspend(dev);
103 if (rc)
104 dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
107 static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
109 struct device *dev = &(drvdata->pdev->dev);
111 /* must be before the enabling to avoid redundant suspending */
112 pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
113 pm_runtime_use_autosuspend(dev);
114 /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
115 return pm_runtime_set_active(dev);
118 static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
120 struct device *dev = &(drvdata->pdev->dev);
122 /* enable the PM module*/
123 pm_runtime_enable(dev);
126 static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
128 struct device *dev = &(drvdata->pdev->dev);
130 pm_runtime_disable(dev);
134 static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
136 struct device *dev = &(drvdata->pdev->dev);
137 struct device_node *np = drvdata->pdev->dev.of_node;
138 int rc;
139 int i;
140 /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
141 int ret = -EINVAL;
143 rc = of_property_read_u32_array(np, "arm,rosc-ratio",
144 drvdata->smpl_ratio,
145 CC_TRNG_NUM_OF_ROSCS);
146 if (rc) {
147 /* arm,rosc-ratio was not found in device tree */
148 return rc;
151 /* verify that at least one rosc has (sampling ratio > 0) */
152 for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
153 dev_dbg(dev, "rosc %d sampling ratio %u",
154 i, drvdata->smpl_ratio[i]);
156 if (drvdata->smpl_ratio[i] > 0)
157 ret = 0;
160 return ret;
163 static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
165 struct device *dev = &(drvdata->pdev->dev);
167 dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
168 drvdata->active_rosc += 1;
170 while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
171 if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
172 return 0;
174 drvdata->active_rosc += 1;
176 return -EINVAL;
180 static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
182 u32 max_cycles;
184 /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
185 max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
186 cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
188 /* enable the RND source */
189 cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
191 /* unmask RNG interrupts */
192 cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
196 /* increase circular data buffer index (head/tail) */
197 static inline void circ_idx_inc(int *idx, int bytes)
199 *idx += (bytes + 3) >> 2;
200 *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
203 static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
205 return CIRC_SPACE(drvdata->circ.head,
206 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
210 static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
212 /* current implementation ignores "wait" */
214 struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
215 struct device *dev = &(drvdata->pdev->dev);
216 u32 *buf = (u32 *)drvdata->circ.buf;
217 size_t copied = 0;
218 size_t cnt_w;
219 size_t size;
220 size_t left;
222 if (!spin_trylock(&drvdata->read_lock)) {
223 /* concurrent consumers from data_buf cannot be served */
224 dev_dbg_ratelimited(dev, "unable to hold lock\n");
225 return 0;
228 /* copy till end of data buffer (without wrap back) */
229 cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
230 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
231 size = min((cnt_w<<2), max);
232 memcpy(data, &(buf[drvdata->circ.tail]), size);
233 copied = size;
234 circ_idx_inc(&drvdata->circ.tail, size);
235 /* copy rest of data in data buffer */
236 left = max - copied;
237 if (left > 0) {
238 cnt_w = CIRC_CNT(drvdata->circ.head,
239 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
240 size = min((cnt_w<<2), left);
241 memcpy(data, &(buf[drvdata->circ.tail]), size);
242 copied += size;
243 circ_idx_inc(&drvdata->circ.tail, size);
246 spin_unlock(&drvdata->read_lock);
248 if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
249 if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
250 /* re-check space in buffer to avoid potential race */
251 if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
252 /* increment device's usage counter */
253 int rc = cc_trng_pm_get(dev);
255 if (rc) {
256 dev_err(dev,
257 "cc_trng_pm_get returned %x\n",
258 rc);
259 return rc;
262 /* schedule execution of deferred work handler
263 * for filling of data buffer
265 schedule_work(&drvdata->startwork);
266 } else {
267 atomic_set(&drvdata->pending_hw, 0);
272 return copied;
275 static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
277 u32 tmp_smpl_cnt = 0;
278 struct device *dev = &(drvdata->pdev->dev);
280 dev_dbg(dev, "cctrng hw trigger.\n");
282 /* enable the HW RND clock */
283 cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
285 /* do software reset */
286 cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
287 /* in order to verify that the reset has completed,
288 * the sample count need to be verified
290 do {
291 /* enable the HW RND clock */
292 cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
294 /* set sampling ratio (rng_clocks) between consecutive bits */
295 cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
296 drvdata->smpl_ratio[drvdata->active_rosc]);
298 /* read the sampling ratio */
299 tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
301 } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
303 /* disable the RND source for setting new parameters in HW */
304 cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
306 cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
308 cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
310 /* Debug Control register: set to 0 - no bypasses */
311 cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
313 cc_trng_enable_rnd_source(drvdata);
316 static void cc_trng_compwork_handler(struct work_struct *w)
318 u32 isr = 0;
319 u32 ehr_valid = 0;
320 struct cctrng_drvdata *drvdata =
321 container_of(w, struct cctrng_drvdata, compwork);
322 struct device *dev = &(drvdata->pdev->dev);
323 int i;
325 /* stop DMA and the RNG source */
326 cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
327 cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
329 /* read RNG_ISR and check for errors */
330 isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
331 ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
332 dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
334 if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
335 fips_fail_notify();
336 /* FIPS error is fatal */
337 panic("Got HW CRNGT error while fips is enabled!\n");
340 /* Clear all pending RNG interrupts */
341 cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
344 if (!ehr_valid) {
345 /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
346 if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
347 CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
348 dev_dbg(dev, "cctrng autocorr/timeout error.\n");
349 goto next_rosc;
352 /* in case of VN error, ignore it */
355 /* read EHR data from registers */
356 for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
357 /* calc word ptr in data_buf */
358 u32 *buf = (u32 *)drvdata->circ.buf;
360 buf[drvdata->circ.head] = cc_ioread(drvdata,
361 CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
363 /* EHR_DATA registers are cleared on read. In case 0 value was
364 * returned, restart the entropy collection.
366 if (buf[drvdata->circ.head] == 0) {
367 dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
368 drvdata->active_rosc);
369 goto next_rosc;
372 circ_idx_inc(&drvdata->circ.head, 1<<2);
375 atomic_set(&drvdata->pending_hw, 0);
377 /* continue to fill data buffer if needed */
378 if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
379 if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
380 /* Re-enable rnd source */
381 cc_trng_enable_rnd_source(drvdata);
382 return;
386 cc_trng_pm_put_suspend(dev);
388 dev_dbg(dev, "compwork handler done\n");
389 return;
391 next_rosc:
392 if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
393 (cc_trng_change_rosc(drvdata) == 0)) {
394 /* trigger trng hw with next rosc */
395 cc_trng_hw_trigger(drvdata);
396 } else {
397 atomic_set(&drvdata->pending_hw, 0);
398 cc_trng_pm_put_suspend(dev);
402 static irqreturn_t cc_isr(int irq, void *dev_id)
404 struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
405 struct device *dev = &(drvdata->pdev->dev);
406 u32 irr;
408 /* if driver suspended return, probably shared interrupt */
409 if (pm_runtime_suspended(dev))
410 return IRQ_NONE;
412 /* read the interrupt status */
413 irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
414 dev_dbg(dev, "Got IRR=0x%08X\n", irr);
416 if (irr == 0) /* Probably shared interrupt line */
417 return IRQ_NONE;
419 /* clear interrupt - must be before processing events */
420 cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
422 /* RNG interrupt - most probable */
423 if (irr & CC_HOST_RNG_IRQ_MASK) {
424 /* Mask RNG interrupts - will be unmasked in deferred work */
425 cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
427 /* We clear RNG interrupt here,
428 * to avoid it from firing as we'll unmask RNG interrupts.
430 cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
431 CC_HOST_RNG_IRQ_MASK);
433 irr &= ~CC_HOST_RNG_IRQ_MASK;
435 /* schedule execution of deferred work handler */
436 schedule_work(&drvdata->compwork);
439 if (irr) {
440 dev_dbg_ratelimited(dev,
441 "IRR includes unknown cause bits (0x%08X)\n",
442 irr);
443 /* Just warning */
446 return IRQ_HANDLED;
449 static void cc_trng_startwork_handler(struct work_struct *w)
451 struct cctrng_drvdata *drvdata =
452 container_of(w, struct cctrng_drvdata, startwork);
454 drvdata->active_rosc = 0;
455 cc_trng_hw_trigger(drvdata);
459 static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
461 struct clk *clk;
462 struct device *dev = &(drvdata->pdev->dev);
463 int rc = 0;
465 clk = devm_clk_get_optional(dev, NULL);
466 if (IS_ERR(clk)) {
467 if (PTR_ERR(clk) != -EPROBE_DEFER)
468 dev_err(dev, "Error getting clock: %pe\n", clk);
469 return PTR_ERR(clk);
471 drvdata->clk = clk;
473 rc = clk_prepare_enable(drvdata->clk);
474 if (rc) {
475 dev_err(dev, "Failed to enable clock\n");
476 return rc;
479 return 0;
482 static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
484 clk_disable_unprepare(drvdata->clk);
488 static int cctrng_probe(struct platform_device *pdev)
490 struct resource *req_mem_cc_regs = NULL;
491 struct cctrng_drvdata *drvdata;
492 struct device *dev = &pdev->dev;
493 int rc = 0;
494 u32 val;
495 int irq;
497 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
498 if (!drvdata)
499 return -ENOMEM;
501 drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
502 if (!drvdata->rng.name)
503 return -ENOMEM;
505 drvdata->rng.read = cctrng_read;
506 drvdata->rng.priv = (unsigned long)drvdata;
507 drvdata->rng.quality = CC_TRNG_QUALITY;
509 platform_set_drvdata(pdev, drvdata);
510 drvdata->pdev = pdev;
512 drvdata->circ.buf = (char *)drvdata->data_buf;
514 /* Get device resources */
515 /* First CC registers space */
516 req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 /* Map registers space */
518 drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
519 if (IS_ERR(drvdata->cc_base)) {
520 dev_err(dev, "Failed to ioremap registers");
521 return PTR_ERR(drvdata->cc_base);
524 dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
525 req_mem_cc_regs);
526 dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
527 &req_mem_cc_regs->start, drvdata->cc_base);
529 /* Then IRQ */
530 irq = platform_get_irq(pdev, 0);
531 if (irq < 0) {
532 dev_err(dev, "Failed getting IRQ resource\n");
533 return irq;
536 /* parse sampling rate from device tree */
537 rc = cc_trng_parse_sampling_ratio(drvdata);
538 if (rc) {
539 dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
540 return rc;
543 rc = cc_trng_clk_init(drvdata);
544 if (rc) {
545 dev_err(dev, "cc_trng_clk_init failed\n");
546 return rc;
549 INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
550 INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
551 spin_lock_init(&drvdata->read_lock);
553 /* register the driver isr function */
554 rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
555 if (rc) {
556 dev_err(dev, "Could not register to interrupt %d\n", irq);
557 goto post_clk_err;
559 dev_dbg(dev, "Registered to IRQ: %d\n", irq);
561 /* Clear all pending interrupts */
562 val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
563 dev_dbg(dev, "IRR=0x%08X\n", val);
564 cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
566 /* unmask HOST RNG interrupt */
567 cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
568 cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
569 ~CC_HOST_RNG_IRQ_MASK);
571 /* init PM */
572 rc = cc_trng_pm_init(drvdata);
573 if (rc) {
574 dev_err(dev, "cc_trng_pm_init failed\n");
575 goto post_clk_err;
578 /* increment device's usage counter */
579 rc = cc_trng_pm_get(dev);
580 if (rc) {
581 dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
582 goto post_pm_err;
585 /* set pending_hw to verify that HW won't be triggered from read */
586 atomic_set(&drvdata->pending_hw, 1);
588 /* registration of the hwrng device */
589 rc = hwrng_register(&drvdata->rng);
590 if (rc) {
591 dev_err(dev, "Could not register hwrng device.\n");
592 goto post_pm_err;
595 /* trigger HW to start generate data */
596 drvdata->active_rosc = 0;
597 cc_trng_hw_trigger(drvdata);
599 /* All set, we can allow auto-suspend */
600 cc_trng_pm_go(drvdata);
602 dev_info(dev, "ARM cctrng device initialized\n");
604 return 0;
606 post_pm_err:
607 cc_trng_pm_fini(drvdata);
609 post_clk_err:
610 cc_trng_clk_fini(drvdata);
612 return rc;
615 static int cctrng_remove(struct platform_device *pdev)
617 struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
618 struct device *dev = &pdev->dev;
620 dev_dbg(dev, "Releasing cctrng resources...\n");
622 hwrng_unregister(&drvdata->rng);
624 cc_trng_pm_fini(drvdata);
626 cc_trng_clk_fini(drvdata);
628 dev_info(dev, "ARM cctrng device terminated\n");
630 return 0;
633 static int __maybe_unused cctrng_suspend(struct device *dev)
635 struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
637 dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
638 cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
639 POWER_DOWN_ENABLE);
641 clk_disable_unprepare(drvdata->clk);
643 return 0;
646 static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
648 unsigned int val;
649 unsigned int i;
651 for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
652 /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
653 * completed and device is fully functional
655 val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
656 if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
657 /* hw indicate reset completed */
658 return true;
660 /* allow scheduling other process on the processor */
661 schedule();
663 /* reset not completed */
664 return false;
667 static int __maybe_unused cctrng_resume(struct device *dev)
669 struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
670 int rc;
672 dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
673 /* Enables the device source clk */
674 rc = clk_prepare_enable(drvdata->clk);
675 if (rc) {
676 dev_err(dev, "failed getting clock back on. We're toast.\n");
677 return rc;
680 /* wait for Cryptocell reset completion */
681 if (!cctrng_wait_for_reset_completion(drvdata)) {
682 dev_err(dev, "Cryptocell reset not completed");
683 return -EBUSY;
686 /* unmask HOST RNG interrupt */
687 cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
688 cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
689 ~CC_HOST_RNG_IRQ_MASK);
691 cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
692 POWER_DOWN_DISABLE);
694 return 0;
697 static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
699 static const struct of_device_id arm_cctrng_dt_match[] = {
700 { .compatible = "arm,cryptocell-713-trng", },
701 { .compatible = "arm,cryptocell-703-trng", },
704 MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
706 static struct platform_driver cctrng_driver = {
707 .driver = {
708 .name = "cctrng",
709 .of_match_table = arm_cctrng_dt_match,
710 .pm = &cctrng_pm,
712 .probe = cctrng_probe,
713 .remove = cctrng_remove,
716 static int __init cctrng_mod_init(void)
718 /* Compile time assertion checks */
719 BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
720 BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
722 return platform_driver_register(&cctrng_driver);
724 module_init(cctrng_mod_init);
726 static void __exit cctrng_mod_exit(void)
728 platform_driver_unregister(&cctrng_driver);
730 module_exit(cctrng_mod_exit);
732 /* Module description */
733 MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
734 MODULE_AUTHOR("ARM");
735 MODULE_LICENSE("GPL v2");