1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/platform_device.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/sha.h>
21 #define QCE_MAJOR_VERSION5 0x05
22 #define QCE_QUEUE_LENGTH 1
24 static const struct qce_algo_ops
*qce_ops
[] = {
25 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
28 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
33 static void qce_unregister_algs(struct qce_device
*qce
)
35 const struct qce_algo_ops
*ops
;
38 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
40 ops
->unregister_algs(qce
);
44 static int qce_register_algs(struct qce_device
*qce
)
46 const struct qce_algo_ops
*ops
;
49 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
51 ret
= ops
->register_algs(qce
);
59 static int qce_handle_request(struct crypto_async_request
*async_req
)
62 const struct qce_algo_ops
*ops
;
63 u32 type
= crypto_tfm_alg_type(async_req
->tfm
);
65 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
67 if (type
!= ops
->type
)
69 ret
= ops
->async_req_handle(async_req
);
76 static int qce_handle_queue(struct qce_device
*qce
,
77 struct crypto_async_request
*req
)
79 struct crypto_async_request
*async_req
, *backlog
;
83 spin_lock_irqsave(&qce
->lock
, flags
);
86 ret
= crypto_enqueue_request(&qce
->queue
, req
);
88 /* busy, do not dequeue request */
90 spin_unlock_irqrestore(&qce
->lock
, flags
);
94 backlog
= crypto_get_backlog(&qce
->queue
);
95 async_req
= crypto_dequeue_request(&qce
->queue
);
99 spin_unlock_irqrestore(&qce
->lock
, flags
);
105 spin_lock_bh(&qce
->lock
);
106 backlog
->complete(backlog
, -EINPROGRESS
);
107 spin_unlock_bh(&qce
->lock
);
110 err
= qce_handle_request(async_req
);
113 tasklet_schedule(&qce
->done_tasklet
);
119 static void qce_tasklet_req_done(unsigned long data
)
121 struct qce_device
*qce
= (struct qce_device
*)data
;
122 struct crypto_async_request
*req
;
125 spin_lock_irqsave(&qce
->lock
, flags
);
128 spin_unlock_irqrestore(&qce
->lock
, flags
);
131 req
->complete(req
, qce
->result
);
133 qce_handle_queue(qce
, NULL
);
136 static int qce_async_request_enqueue(struct qce_device
*qce
,
137 struct crypto_async_request
*req
)
139 return qce_handle_queue(qce
, req
);
142 static void qce_async_request_done(struct qce_device
*qce
, int ret
)
145 tasklet_schedule(&qce
->done_tasklet
);
148 static int qce_check_version(struct qce_device
*qce
)
150 u32 major
, minor
, step
;
152 qce_get_version(qce
, &major
, &minor
, &step
);
155 * the driver does not support v5 with minor 0 because it has special
156 * alignment requirements.
158 if (major
!= QCE_MAJOR_VERSION5
|| minor
== 0)
161 qce
->burst_size
= QCE_BAM_BURST_SIZE
;
162 qce
->pipe_pair_id
= 1;
164 dev_dbg(qce
->dev
, "Crypto device found, version %d.%d.%d\n",
170 static int qce_crypto_probe(struct platform_device
*pdev
)
172 struct device
*dev
= &pdev
->dev
;
173 struct qce_device
*qce
;
176 qce
= devm_kzalloc(dev
, sizeof(*qce
), GFP_KERNEL
);
181 platform_set_drvdata(pdev
, qce
);
183 qce
->base
= devm_platform_ioremap_resource(pdev
, 0);
184 if (IS_ERR(qce
->base
))
185 return PTR_ERR(qce
->base
);
187 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
191 qce
->core
= devm_clk_get(qce
->dev
, "core");
192 if (IS_ERR(qce
->core
))
193 return PTR_ERR(qce
->core
);
195 qce
->iface
= devm_clk_get(qce
->dev
, "iface");
196 if (IS_ERR(qce
->iface
))
197 return PTR_ERR(qce
->iface
);
199 qce
->bus
= devm_clk_get(qce
->dev
, "bus");
200 if (IS_ERR(qce
->bus
))
201 return PTR_ERR(qce
->bus
);
203 ret
= clk_prepare_enable(qce
->core
);
207 ret
= clk_prepare_enable(qce
->iface
);
211 ret
= clk_prepare_enable(qce
->bus
);
215 ret
= qce_dma_request(qce
->dev
, &qce
->dma
);
219 ret
= qce_check_version(qce
);
223 spin_lock_init(&qce
->lock
);
224 tasklet_init(&qce
->done_tasklet
, qce_tasklet_req_done
,
226 crypto_init_queue(&qce
->queue
, QCE_QUEUE_LENGTH
);
228 qce
->async_req_enqueue
= qce_async_request_enqueue
;
229 qce
->async_req_done
= qce_async_request_done
;
231 ret
= qce_register_algs(qce
);
238 qce_dma_release(&qce
->dma
);
240 clk_disable_unprepare(qce
->bus
);
242 clk_disable_unprepare(qce
->iface
);
244 clk_disable_unprepare(qce
->core
);
248 static int qce_crypto_remove(struct platform_device
*pdev
)
250 struct qce_device
*qce
= platform_get_drvdata(pdev
);
252 tasklet_kill(&qce
->done_tasklet
);
253 qce_unregister_algs(qce
);
254 qce_dma_release(&qce
->dma
);
255 clk_disable_unprepare(qce
->bus
);
256 clk_disable_unprepare(qce
->iface
);
257 clk_disable_unprepare(qce
->core
);
261 static const struct of_device_id qce_crypto_of_match
[] = {
262 { .compatible
= "qcom,crypto-v5.1", },
265 MODULE_DEVICE_TABLE(of
, qce_crypto_of_match
);
267 static struct platform_driver qce_crypto_driver
= {
268 .probe
= qce_crypto_probe
,
269 .remove
= qce_crypto_remove
,
271 .name
= KBUILD_MODNAME
,
272 .of_match_table
= qce_crypto_of_match
,
275 module_platform_driver(qce_crypto_driver
);
277 MODULE_LICENSE("GPL v2");
278 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
279 MODULE_ALIAS("platform:" KBUILD_MODNAME
);
280 MODULE_AUTHOR("The Linux Foundation");