1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/crypto.h>
8 #include <linux/moduleparam.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
15 #include <linux/clk.h>
16 #include <linux/of_address.h>
18 #include "cc_driver.h"
19 #include "cc_request_mgr.h"
20 #include "cc_buffer_mgr.h"
21 #include "cc_debugfs.h"
22 #include "cc_cipher.h"
26 #include "cc_sram_mgr.h"
31 module_param_named(dump_desc
, cc_dump_desc
, bool, 0600);
32 MODULE_PARM_DESC(cc_dump_desc
, "Dump descriptors to kernel log as debugging aid");
35 module_param_named(dump_bytes
, cc_dump_bytes
, bool, 0600);
36 MODULE_PARM_DESC(cc_dump_bytes
, "Dump buffers to kernel log as debugging aid");
44 /* Hardware revisions defs. */
46 static const struct cc_hw_data cc712_hw
= {
47 .name
= "712", .rev
= CC_HW_REV_712
, .sig
= 0xDCC71200U
50 static const struct cc_hw_data cc710_hw
= {
51 .name
= "710", .rev
= CC_HW_REV_710
, .sig
= 0xDCC63200U
54 static const struct cc_hw_data cc630p_hw
= {
55 .name
= "630P", .rev
= CC_HW_REV_630
, .sig
= 0xDCC63000U
58 static const struct of_device_id arm_ccree_dev_of_match
[] = {
59 { .compatible
= "arm,cryptocell-712-ree", .data
= &cc712_hw
},
60 { .compatible
= "arm,cryptocell-710-ree", .data
= &cc710_hw
},
61 { .compatible
= "arm,cryptocell-630p-ree", .data
= &cc630p_hw
},
64 MODULE_DEVICE_TABLE(of
, arm_ccree_dev_of_match
);
66 void __dump_byte_array(const char *name
, const u8
*buf
, size_t len
)
73 snprintf(prefix
, sizeof(prefix
), "%s[%zu]: ", name
, len
);
75 print_hex_dump(KERN_DEBUG
, prefix
, DUMP_PREFIX_ADDRESS
, 16, 1, buf
,
79 static irqreturn_t
cc_isr(int irq
, void *dev_id
)
81 struct cc_drvdata
*drvdata
= (struct cc_drvdata
*)dev_id
;
82 struct device
*dev
= drvdata_to_dev(drvdata
);
86 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
88 /* read the interrupt status */
89 irr
= cc_ioread(drvdata
, CC_REG(HOST_IRR
));
90 dev_dbg(dev
, "Got IRR=0x%08X\n", irr
);
91 if (irr
== 0) { /* Probably shared interrupt line */
92 dev_err(dev
, "Got interrupt with empty IRR\n");
95 imr
= cc_ioread(drvdata
, CC_REG(HOST_IMR
));
97 /* clear interrupt - must be before processing events */
98 cc_iowrite(drvdata
, CC_REG(HOST_ICR
), irr
);
101 /* Completion interrupt - most probable */
102 if (irr
& CC_COMP_IRQ_MASK
) {
103 /* Mask AXI completion interrupt - will be unmasked in
104 * Deferred service handler
106 cc_iowrite(drvdata
, CC_REG(HOST_IMR
), imr
| CC_COMP_IRQ_MASK
);
107 irr
&= ~CC_COMP_IRQ_MASK
;
108 complete_request(drvdata
);
110 #ifdef CONFIG_CRYPTO_FIPS
111 /* TEE FIPS interrupt */
112 if (irr
& CC_GPR0_IRQ_MASK
) {
113 /* Mask interrupt - will be unmasked in Deferred service
116 cc_iowrite(drvdata
, CC_REG(HOST_IMR
), imr
| CC_GPR0_IRQ_MASK
);
117 irr
&= ~CC_GPR0_IRQ_MASK
;
118 fips_handler(drvdata
);
121 /* AXI error interrupt */
122 if (irr
& CC_AXI_ERR_IRQ_MASK
) {
125 /* Read the AXI error ID */
126 axi_err
= cc_ioread(drvdata
, CC_REG(AXIM_MON_ERR
));
127 dev_dbg(dev
, "AXI completion error: axim_mon_err=0x%08X\n",
130 irr
&= ~CC_AXI_ERR_IRQ_MASK
;
134 dev_dbg_ratelimited(dev
, "IRR includes unknown cause bits (0x%08X)\n",
142 int init_cc_regs(struct cc_drvdata
*drvdata
, bool is_probe
)
144 unsigned int val
, cache_params
;
145 struct device
*dev
= drvdata_to_dev(drvdata
);
147 /* Unmask all AXI interrupt sources AXI_CFG1 register */
148 val
= cc_ioread(drvdata
, CC_REG(AXIM_CFG
));
149 cc_iowrite(drvdata
, CC_REG(AXIM_CFG
), val
& ~CC_AXI_IRQ_MASK
);
150 dev_dbg(dev
, "AXIM_CFG=0x%08X\n",
151 cc_ioread(drvdata
, CC_REG(AXIM_CFG
)));
153 /* Clear all pending interrupts */
154 val
= cc_ioread(drvdata
, CC_REG(HOST_IRR
));
155 dev_dbg(dev
, "IRR=0x%08X\n", val
);
156 cc_iowrite(drvdata
, CC_REG(HOST_ICR
), val
);
158 /* Unmask relevant interrupt cause */
159 val
= CC_COMP_IRQ_MASK
| CC_AXI_ERR_IRQ_MASK
;
161 if (drvdata
->hw_rev
>= CC_HW_REV_712
)
162 val
|= CC_GPR0_IRQ_MASK
;
164 cc_iowrite(drvdata
, CC_REG(HOST_IMR
), ~val
);
166 cache_params
= (drvdata
->coherent
? CC_COHERENT_CACHE_PARAMS
: 0x0);
168 val
= cc_ioread(drvdata
, CC_REG(AXIM_CACHE_PARAMS
));
171 dev_dbg(dev
, "Cache params previous: 0x%08X\n", val
);
173 cc_iowrite(drvdata
, CC_REG(AXIM_CACHE_PARAMS
), cache_params
);
174 val
= cc_ioread(drvdata
, CC_REG(AXIM_CACHE_PARAMS
));
177 dev_dbg(dev
, "Cache params current: 0x%08X (expect: 0x%08X)\n",
183 static int init_cc_resources(struct platform_device
*plat_dev
)
185 struct resource
*req_mem_cc_regs
= NULL
;
186 struct cc_drvdata
*new_drvdata
;
187 struct device
*dev
= &plat_dev
->dev
;
188 struct device_node
*np
= dev
->of_node
;
191 const struct cc_hw_data
*hw_rev
;
192 const struct of_device_id
*dev_id
;
196 new_drvdata
= devm_kzalloc(dev
, sizeof(*new_drvdata
), GFP_KERNEL
);
200 dev_id
= of_match_node(arm_ccree_dev_of_match
, np
);
204 hw_rev
= (struct cc_hw_data
*)dev_id
->data
;
205 new_drvdata
->hw_rev_name
= hw_rev
->name
;
206 new_drvdata
->hw_rev
= hw_rev
->rev
;
208 if (hw_rev
->rev
>= CC_HW_REV_712
) {
209 new_drvdata
->hash_len_sz
= HASH_LEN_SIZE_712
;
210 new_drvdata
->axim_mon_offset
= CC_REG(AXIM_MON_COMP
);
211 new_drvdata
->sig_offset
= CC_REG(HOST_SIGNATURE_712
);
212 new_drvdata
->ver_offset
= CC_REG(HOST_VERSION_712
);
214 new_drvdata
->hash_len_sz
= HASH_LEN_SIZE_630
;
215 new_drvdata
->axim_mon_offset
= CC_REG(AXIM_MON_COMP8
);
216 new_drvdata
->sig_offset
= CC_REG(HOST_SIGNATURE_630
);
217 new_drvdata
->ver_offset
= CC_REG(HOST_VERSION_630
);
220 platform_set_drvdata(plat_dev
, new_drvdata
);
221 new_drvdata
->plat_dev
= plat_dev
;
223 clk
= devm_clk_get(dev
, NULL
);
225 switch (PTR_ERR(clk
)) {
226 /* Clock is optional so this might be fine */
230 /* Clock not available, let's try again soon */
232 return -EPROBE_DEFER
;
235 dev_err(dev
, "Error getting clock: %ld\n",
239 new_drvdata
->clk
= clk
;
241 new_drvdata
->coherent
= of_dma_is_coherent(np
);
243 /* Get device resources */
244 /* First CC registers space */
245 req_mem_cc_regs
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
246 /* Map registers space */
247 new_drvdata
->cc_base
= devm_ioremap_resource(dev
, req_mem_cc_regs
);
248 if (IS_ERR(new_drvdata
->cc_base
)) {
249 dev_err(dev
, "Failed to ioremap registers");
250 return PTR_ERR(new_drvdata
->cc_base
);
253 dev_dbg(dev
, "Got MEM resource (%s): %pR\n", req_mem_cc_regs
->name
,
255 dev_dbg(dev
, "CC registers mapped from %pa to 0x%p\n",
256 &req_mem_cc_regs
->start
, new_drvdata
->cc_base
);
259 new_drvdata
->irq
= platform_get_irq(plat_dev
, 0);
260 if (new_drvdata
->irq
< 0) {
261 dev_err(dev
, "Failed getting IRQ resource\n");
262 return new_drvdata
->irq
;
265 rc
= devm_request_irq(dev
, new_drvdata
->irq
, cc_isr
,
266 IRQF_SHARED
, "ccree", new_drvdata
);
268 dev_err(dev
, "Could not register to interrupt %d\n",
272 dev_dbg(dev
, "Registered to IRQ: %d\n", new_drvdata
->irq
);
274 init_completion(&new_drvdata
->hw_queue_avail
);
276 if (!plat_dev
->dev
.dma_mask
)
277 plat_dev
->dev
.dma_mask
= &plat_dev
->dev
.coherent_dma_mask
;
279 dma_mask
= DMA_BIT_MASK(DMA_BIT_MASK_LEN
);
280 while (dma_mask
> 0x7fffffffUL
) {
281 if (dma_supported(&plat_dev
->dev
, dma_mask
)) {
282 rc
= dma_set_coherent_mask(&plat_dev
->dev
, dma_mask
);
290 dev_err(dev
, "Failed in dma_set_mask, mask=%llx\n", dma_mask
);
294 rc
= cc_clk_on(new_drvdata
);
296 dev_err(dev
, "Failed to enable clock");
300 /* Verify correct mapping */
301 signature_val
= cc_ioread(new_drvdata
, new_drvdata
->sig_offset
);
302 if (signature_val
!= hw_rev
->sig
) {
303 dev_err(dev
, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
304 signature_val
, hw_rev
->sig
);
308 dev_dbg(dev
, "CC SIGNATURE=0x%08X\n", signature_val
);
310 /* Display HW versions */
311 dev_info(dev
, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
312 hw_rev
->name
, cc_ioread(new_drvdata
, new_drvdata
->ver_offset
),
315 rc
= init_cc_regs(new_drvdata
, true);
317 dev_err(dev
, "init_cc_regs failed\n");
321 rc
= cc_debugfs_init(new_drvdata
);
323 dev_err(dev
, "Failed registering debugfs interface\n");
327 rc
= cc_fips_init(new_drvdata
);
329 dev_err(dev
, "CC_FIPS_INIT failed 0x%x\n", rc
);
330 goto post_debugfs_err
;
332 rc
= cc_sram_mgr_init(new_drvdata
);
334 dev_err(dev
, "cc_sram_mgr_init failed\n");
335 goto post_fips_init_err
;
338 new_drvdata
->mlli_sram_addr
=
339 cc_sram_alloc(new_drvdata
, MAX_MLLI_BUFF_SIZE
);
340 if (new_drvdata
->mlli_sram_addr
== NULL_SRAM_ADDR
) {
341 dev_err(dev
, "Failed to alloc MLLI Sram buffer\n");
343 goto post_sram_mgr_err
;
346 rc
= cc_req_mgr_init(new_drvdata
);
348 dev_err(dev
, "cc_req_mgr_init failed\n");
349 goto post_sram_mgr_err
;
352 rc
= cc_buffer_mgr_init(new_drvdata
);
354 dev_err(dev
, "buffer_mgr_init failed\n");
355 goto post_req_mgr_err
;
358 rc
= cc_pm_init(new_drvdata
);
360 dev_err(dev
, "ssi_power_mgr_init failed\n");
361 goto post_buf_mgr_err
;
364 rc
= cc_ivgen_init(new_drvdata
);
366 dev_err(dev
, "cc_ivgen_init failed\n");
367 goto post_buf_mgr_err
;
370 /* Allocate crypto algs */
371 rc
= cc_cipher_alloc(new_drvdata
);
373 dev_err(dev
, "cc_cipher_alloc failed\n");
377 /* hash must be allocated before aead since hash exports APIs */
378 rc
= cc_hash_alloc(new_drvdata
);
380 dev_err(dev
, "cc_hash_alloc failed\n");
381 goto post_cipher_err
;
384 rc
= cc_aead_alloc(new_drvdata
);
386 dev_err(dev
, "cc_aead_alloc failed\n");
390 /* All set, we can allow autosuspend */
391 cc_pm_go(new_drvdata
);
393 /* If we got here and FIPS mode is enabled
394 * it means all FIPS test passed, so let TEE
397 cc_set_ree_fips_status(new_drvdata
, true);
402 cc_hash_free(new_drvdata
);
404 cc_cipher_free(new_drvdata
);
406 cc_ivgen_fini(new_drvdata
);
408 cc_buffer_mgr_fini(new_drvdata
);
410 cc_req_mgr_fini(new_drvdata
);
412 cc_sram_mgr_fini(new_drvdata
);
414 cc_fips_fini(new_drvdata
);
416 cc_debugfs_fini(new_drvdata
);
418 fini_cc_regs(new_drvdata
);
420 cc_clk_off(new_drvdata
);
424 void fini_cc_regs(struct cc_drvdata
*drvdata
)
426 /* Mask all interrupts */
427 cc_iowrite(drvdata
, CC_REG(HOST_IMR
), 0xFFFFFFFF);
430 static void cleanup_cc_resources(struct platform_device
*plat_dev
)
432 struct cc_drvdata
*drvdata
=
433 (struct cc_drvdata
*)platform_get_drvdata(plat_dev
);
435 cc_aead_free(drvdata
);
436 cc_hash_free(drvdata
);
437 cc_cipher_free(drvdata
);
438 cc_ivgen_fini(drvdata
);
440 cc_buffer_mgr_fini(drvdata
);
441 cc_req_mgr_fini(drvdata
);
442 cc_sram_mgr_fini(drvdata
);
443 cc_fips_fini(drvdata
);
444 cc_debugfs_fini(drvdata
);
445 fini_cc_regs(drvdata
);
449 int cc_clk_on(struct cc_drvdata
*drvdata
)
451 struct clk
*clk
= drvdata
->clk
;
455 /* Not all devices have a clock associated with CCREE */
458 rc
= clk_prepare_enable(clk
);
465 void cc_clk_off(struct cc_drvdata
*drvdata
)
467 struct clk
*clk
= drvdata
->clk
;
470 /* Not all devices have a clock associated with CCREE */
473 clk_disable_unprepare(clk
);
476 static int ccree_probe(struct platform_device
*plat_dev
)
479 struct device
*dev
= &plat_dev
->dev
;
481 /* Map registers space */
482 rc
= init_cc_resources(plat_dev
);
486 dev_info(dev
, "ARM ccree device initialized\n");
491 static int ccree_remove(struct platform_device
*plat_dev
)
493 struct device
*dev
= &plat_dev
->dev
;
495 dev_dbg(dev
, "Releasing ccree resources...\n");
497 cleanup_cc_resources(plat_dev
);
499 dev_info(dev
, "ARM ccree device terminated\n");
504 static struct platform_driver ccree_driver
= {
507 .of_match_table
= arm_ccree_dev_of_match
,
512 .probe
= ccree_probe
,
513 .remove
= ccree_remove
,
516 static int __init
ccree_init(void)
520 cc_hash_global_init();
522 ret
= cc_debugfs_global_init();
526 return platform_driver_register(&ccree_driver
);
528 module_init(ccree_init
);
530 static void __exit
ccree_exit(void)
532 platform_driver_unregister(&ccree_driver
);
533 cc_debugfs_global_fini();
535 module_exit(ccree_exit
);
537 /* Module description */
538 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
539 MODULE_VERSION(DRV_MODULE_VERSION
);
540 MODULE_AUTHOR("ARM");
541 MODULE_LICENSE("GPL v2");