2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/spinlock.h>
19 #include <linux/types.h>
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/sha.h>
28 #define QCE_MAJOR_VERSION5 0x05
29 #define QCE_QUEUE_LENGTH 1
31 static const struct qce_algo_ops
*qce_ops
[] = {
36 static void qce_unregister_algs(struct qce_device
*qce
)
38 const struct qce_algo_ops
*ops
;
41 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
43 ops
->unregister_algs(qce
);
47 static int qce_register_algs(struct qce_device
*qce
)
49 const struct qce_algo_ops
*ops
;
52 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
54 ret
= ops
->register_algs(qce
);
62 static int qce_handle_request(struct crypto_async_request
*async_req
)
65 const struct qce_algo_ops
*ops
;
66 u32 type
= crypto_tfm_alg_type(async_req
->tfm
);
68 for (i
= 0; i
< ARRAY_SIZE(qce_ops
); i
++) {
70 if (type
!= ops
->type
)
72 ret
= ops
->async_req_handle(async_req
);
79 static int qce_handle_queue(struct qce_device
*qce
,
80 struct crypto_async_request
*req
)
82 struct crypto_async_request
*async_req
, *backlog
;
86 spin_lock_irqsave(&qce
->lock
, flags
);
89 ret
= crypto_enqueue_request(&qce
->queue
, req
);
91 /* busy, do not dequeue request */
93 spin_unlock_irqrestore(&qce
->lock
, flags
);
97 backlog
= crypto_get_backlog(&qce
->queue
);
98 async_req
= crypto_dequeue_request(&qce
->queue
);
100 qce
->req
= async_req
;
102 spin_unlock_irqrestore(&qce
->lock
, flags
);
108 spin_lock_bh(&qce
->lock
);
109 backlog
->complete(backlog
, -EINPROGRESS
);
110 spin_unlock_bh(&qce
->lock
);
113 err
= qce_handle_request(async_req
);
116 tasklet_schedule(&qce
->done_tasklet
);
122 static void qce_tasklet_req_done(unsigned long data
)
124 struct qce_device
*qce
= (struct qce_device
*)data
;
125 struct crypto_async_request
*req
;
128 spin_lock_irqsave(&qce
->lock
, flags
);
131 spin_unlock_irqrestore(&qce
->lock
, flags
);
134 req
->complete(req
, qce
->result
);
136 qce_handle_queue(qce
, NULL
);
139 static int qce_async_request_enqueue(struct qce_device
*qce
,
140 struct crypto_async_request
*req
)
142 return qce_handle_queue(qce
, req
);
145 static void qce_async_request_done(struct qce_device
*qce
, int ret
)
148 tasklet_schedule(&qce
->done_tasklet
);
151 static int qce_check_version(struct qce_device
*qce
)
153 u32 major
, minor
, step
;
155 qce_get_version(qce
, &major
, &minor
, &step
);
158 * the driver does not support v5 with minor 0 because it has special
159 * alignment requirements.
161 if (major
!= QCE_MAJOR_VERSION5
|| minor
== 0)
164 qce
->burst_size
= QCE_BAM_BURST_SIZE
;
165 qce
->pipe_pair_id
= 1;
167 dev_dbg(qce
->dev
, "Crypto device found, version %d.%d.%d\n",
173 static int qce_crypto_probe(struct platform_device
*pdev
)
175 struct device
*dev
= &pdev
->dev
;
176 struct qce_device
*qce
;
177 struct resource
*res
;
180 qce
= devm_kzalloc(dev
, sizeof(*qce
), GFP_KERNEL
);
185 platform_set_drvdata(pdev
, qce
);
187 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
188 qce
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
189 if (IS_ERR(qce
->base
))
190 return PTR_ERR(qce
->base
);
192 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
196 qce
->core
= devm_clk_get(qce
->dev
, "core");
197 if (IS_ERR(qce
->core
))
198 return PTR_ERR(qce
->core
);
200 qce
->iface
= devm_clk_get(qce
->dev
, "iface");
201 if (IS_ERR(qce
->iface
))
202 return PTR_ERR(qce
->iface
);
204 qce
->bus
= devm_clk_get(qce
->dev
, "bus");
205 if (IS_ERR(qce
->bus
))
206 return PTR_ERR(qce
->bus
);
208 ret
= clk_prepare_enable(qce
->core
);
212 ret
= clk_prepare_enable(qce
->iface
);
216 ret
= clk_prepare_enable(qce
->bus
);
220 ret
= qce_dma_request(qce
->dev
, &qce
->dma
);
224 ret
= qce_check_version(qce
);
228 spin_lock_init(&qce
->lock
);
229 tasklet_init(&qce
->done_tasklet
, qce_tasklet_req_done
,
231 crypto_init_queue(&qce
->queue
, QCE_QUEUE_LENGTH
);
233 qce
->async_req_enqueue
= qce_async_request_enqueue
;
234 qce
->async_req_done
= qce_async_request_done
;
236 ret
= qce_register_algs(qce
);
243 qce_dma_release(&qce
->dma
);
245 clk_disable_unprepare(qce
->bus
);
247 clk_disable_unprepare(qce
->iface
);
249 clk_disable_unprepare(qce
->core
);
253 static int qce_crypto_remove(struct platform_device
*pdev
)
255 struct qce_device
*qce
= platform_get_drvdata(pdev
);
257 tasklet_kill(&qce
->done_tasklet
);
258 qce_unregister_algs(qce
);
259 qce_dma_release(&qce
->dma
);
260 clk_disable_unprepare(qce
->bus
);
261 clk_disable_unprepare(qce
->iface
);
262 clk_disable_unprepare(qce
->core
);
266 static const struct of_device_id qce_crypto_of_match
[] = {
267 { .compatible
= "qcom,crypto-v5.1", },
270 MODULE_DEVICE_TABLE(of
, qce_crypto_of_match
);
272 static struct platform_driver qce_crypto_driver
= {
273 .probe
= qce_crypto_probe
,
274 .remove
= qce_crypto_remove
,
276 .name
= KBUILD_MODNAME
,
277 .of_match_table
= qce_crypto_of_match
,
280 module_platform_driver(qce_crypto_driver
);
282 MODULE_LICENSE("GPL v2");
283 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
284 MODULE_ALIAS("platform:" KBUILD_MODNAME
);
285 MODULE_AUTHOR("The Linux Foundation");