1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 #include <crypto/algapi.h>
15 #include <crypto/internal/hash.h>
21 #define QCE_MAJOR_VERSION5 0x05
22 #define QCE_QUEUE_LENGTH 1
24 static const struct qce_algo_ops
*qce_ops
[] = {
25 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
28 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
33 static void qce_unregister_algs(struct qce_device
*qce
)
35 const struct qce_algo_ops
*ops
;
38 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
40 ops
->unregister_algs(qce
);
44 static int qce_register_algs(struct qce_device
*qce
)
46 const struct qce_algo_ops
*ops
;
49 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
51 ret
= ops
->register_algs(qce
);
59 static int qce_handle_request(struct crypto_async_request
*async_req
)
62 const struct qce_algo_ops
*ops
;
63 u32 type
= crypto_tfm_alg_type(async_req
->tfm
);
65 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
67 if (type
!= ops
->type
)
69 ret
= ops
->async_req_handle(async_req
);
76 static int qce_handle_queue(struct qce_device
*qce
,
77 struct crypto_async_request
*req
)
79 struct crypto_async_request
*async_req
, *backlog
;
83 spin_lock_irqsave(&qce
->lock
, flags
);
86 ret
= crypto_enqueue_request(&qce
->queue
, req
);
88 /* busy, do not dequeue request */
90 spin_unlock_irqrestore(&qce
->lock
, flags
);
94 backlog
= crypto_get_backlog(&qce
->queue
);
95 async_req
= crypto_dequeue_request(&qce
->queue
);
99 spin_unlock_irqrestore(&qce
->lock
, flags
);
105 spin_lock_bh(&qce
->lock
);
106 backlog
->complete(backlog
, -EINPROGRESS
);
107 spin_unlock_bh(&qce
->lock
);
110 err
= qce_handle_request(async_req
);
113 tasklet_schedule(&qce
->done_tasklet
);
119 static void qce_tasklet_req_done(unsigned long data
)
121 struct qce_device
*qce
= (struct qce_device
*)data
;
122 struct crypto_async_request
*req
;
125 spin_lock_irqsave(&qce
->lock
, flags
);
128 spin_unlock_irqrestore(&qce
->lock
, flags
);
131 req
->complete(req
, qce
->result
);
133 qce_handle_queue(qce
, NULL
);
136 static int qce_async_request_enqueue(struct qce_device
*qce
,
137 struct crypto_async_request
*req
)
139 return qce_handle_queue(qce
, req
);
142 static void qce_async_request_done(struct qce_device
*qce
, int ret
)
145 tasklet_schedule(&qce
->done_tasklet
);
148 static int qce_check_version(struct qce_device
*qce
)
150 u32 major
, minor
, step
;
152 qce_get_version(qce
, &major
, &minor
, &step
);
155 * the driver does not support v5 with minor 0 because it has special
156 * alignment requirements.
158 if (major
!= QCE_MAJOR_VERSION5
|| minor
== 0)
161 qce
->burst_size
= QCE_BAM_BURST_SIZE
;
164 * Rx and tx pipes are treated as a pair inside CE.
165 * Pipe pair number depends on the actual BAM dma pipe
166 * that is used for transfers. The BAM dma pipes are passed
167 * from the device tree and used to derive the pipe pair
168 * id in the CE driver as follows.
169 * BAM dma pipes(rx, tx) CE pipe pair id
176 qce
->pipe_pair_id
= qce
->dma
.rxchan
->chan_id
>> 1;
178 dev_dbg(qce
->dev
, "Crypto device found, version %d.%d.%d\n",
184 static int qce_crypto_probe(struct platform_device
*pdev
)
186 struct device
*dev
= &pdev
->dev
;
187 struct qce_device
*qce
;
190 qce
= devm_kzalloc(dev
, sizeof(*qce
), GFP_KERNEL
);
195 platform_set_drvdata(pdev
, qce
);
197 qce
->base
= devm_platform_ioremap_resource(pdev
, 0);
198 if (IS_ERR(qce
->base
))
199 return PTR_ERR(qce
->base
);
201 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
205 qce
->core
= devm_clk_get(qce
->dev
, "core");
206 if (IS_ERR(qce
->core
))
207 return PTR_ERR(qce
->core
);
209 qce
->iface
= devm_clk_get(qce
->dev
, "iface");
210 if (IS_ERR(qce
->iface
))
211 return PTR_ERR(qce
->iface
);
213 qce
->bus
= devm_clk_get(qce
->dev
, "bus");
214 if (IS_ERR(qce
->bus
))
215 return PTR_ERR(qce
->bus
);
217 ret
= clk_prepare_enable(qce
->core
);
221 ret
= clk_prepare_enable(qce
->iface
);
225 ret
= clk_prepare_enable(qce
->bus
);
229 ret
= qce_dma_request(qce
->dev
, &qce
->dma
);
233 ret
= qce_check_version(qce
);
237 spin_lock_init(&qce
->lock
);
238 tasklet_init(&qce
->done_tasklet
, qce_tasklet_req_done
,
240 crypto_init_queue(&qce
->queue
, QCE_QUEUE_LENGTH
);
242 qce
->async_req_enqueue
= qce_async_request_enqueue
;
243 qce
->async_req_done
= qce_async_request_done
;
245 ret
= qce_register_algs(qce
);
252 qce_dma_release(&qce
->dma
);
254 clk_disable_unprepare(qce
->bus
);
256 clk_disable_unprepare(qce
->iface
);
258 clk_disable_unprepare(qce
->core
);
262 static int qce_crypto_remove(struct platform_device
*pdev
)
264 struct qce_device
*qce
= platform_get_drvdata(pdev
);
266 tasklet_kill(&qce
->done_tasklet
);
267 qce_unregister_algs(qce
);
268 qce_dma_release(&qce
->dma
);
269 clk_disable_unprepare(qce
->bus
);
270 clk_disable_unprepare(qce
->iface
);
271 clk_disable_unprepare(qce
->core
);
275 static const struct of_device_id qce_crypto_of_match
[] = {
276 { .compatible
= "qcom,crypto-v5.1", },
277 { .compatible
= "qcom,crypto-v5.4", },
280 MODULE_DEVICE_TABLE(of
, qce_crypto_of_match
);
282 static struct platform_driver qce_crypto_driver
= {
283 .probe
= qce_crypto_probe
,
284 .remove
= qce_crypto_remove
,
286 .name
= KBUILD_MODNAME
,
287 .of_match_table
= qce_crypto_of_match
,
290 module_platform_driver(qce_crypto_driver
);
292 MODULE_LICENSE("GPL v2");
293 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
294 MODULE_ALIAS("platform:" KBUILD_MODNAME
);
295 MODULE_AUTHOR("The Linux Foundation");