sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / crypto / ux500 / cryp / cryp_core.c
blob790f7cadc1ed8764d8e98745ff5d91819bd80733
1 /**
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
4 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
5 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
6 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
7 * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
8 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
9 * License terms: GNU General Public License (GPL) version 2
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/crypto.h>
15 #include <linux/dmaengine.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irqreturn.h>
21 #include <linux/klist.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/semaphore.h>
26 #include <linux/platform_data/dma-ste-dma40.h>
28 #include <crypto/aes.h>
29 #include <crypto/algapi.h>
30 #include <crypto/ctr.h>
31 #include <crypto/des.h>
32 #include <crypto/scatterwalk.h>
34 #include <linux/platform_data/crypto-ux500.h>
36 #include "cryp_p.h"
37 #include "cryp.h"
39 #define CRYP_MAX_KEY_SIZE 32
40 #define BYTES_PER_WORD 4
42 static int cryp_mode;
43 static atomic_t session_id;
45 static struct stedma40_chan_cfg *mem_to_engine;
46 static struct stedma40_chan_cfg *engine_to_mem;
48 /**
49 * struct cryp_driver_data - data specific to the driver.
51 * @device_list: A list of registered devices to choose from.
52 * @device_allocation: A semaphore initialized with number of devices.
54 struct cryp_driver_data {
55 struct klist device_list;
56 struct semaphore device_allocation;
59 /**
60 * struct cryp_ctx - Crypto context
61 * @config: Crypto mode.
62 * @key[CRYP_MAX_KEY_SIZE]: Key.
63 * @keylen: Length of key.
64 * @iv: Pointer to initialization vector.
65 * @indata: Pointer to indata.
66 * @outdata: Pointer to outdata.
67 * @datalen: Length of indata.
68 * @outlen: Length of outdata.
69 * @blocksize: Size of blocks.
70 * @updated: Updated flag.
71 * @dev_ctx: Device dependent context.
72 * @device: Pointer to the device.
74 struct cryp_ctx {
75 struct cryp_config config;
76 u8 key[CRYP_MAX_KEY_SIZE];
77 u32 keylen;
78 u8 *iv;
79 const u8 *indata;
80 u8 *outdata;
81 u32 datalen;
82 u32 outlen;
83 u32 blocksize;
84 u8 updated;
85 struct cryp_device_context dev_ctx;
86 struct cryp_device_data *device;
87 u32 session_id;
90 static struct cryp_driver_data driver_data;
92 /**
93 * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
94 * @in: Data to convert.
96 static inline u32 uint8p_to_uint32_be(u8 *in)
98 u32 *data = (u32 *)in;
100 return cpu_to_be32p(data);
104 * swap_bits_in_byte - mirror the bits in a byte
105 * @b: the byte to be mirrored
107 * The bits are swapped the following way:
108 * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
109 * nibble 2 (n2) bits 4-7.
111 * Nibble 1 (n1):
112 * (The "old" (moved) bit is replaced with a zero)
113 * 1. Move bit 6 and 7, 4 positions to the left.
114 * 2. Move bit 3 and 5, 2 positions to the left.
115 * 3. Move bit 1-4, 1 position to the left.
117 * Nibble 2 (n2):
118 * 1. Move bit 0 and 1, 4 positions to the right.
119 * 2. Move bit 2 and 4, 2 positions to the right.
120 * 3. Move bit 3-6, 1 position to the right.
122 * Combine the two nibbles to a complete and swapped byte.
125 static inline u8 swap_bits_in_byte(u8 b)
127 #define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
128 #define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
129 right shift 2 */
130 #define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
131 right shift 1 */
132 #define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
133 #define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
134 left shift 2 */
135 #define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
136 left shift 1 */
138 u8 n1;
139 u8 n2;
141 /* Swap most significant nibble */
142 /* Right shift 4, bits 6 and 7 */
143 n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
144 /* Right shift 2, bits 3 and 5 */
145 n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
146 /* Right shift 1, bits 1-4 */
147 n1 = (n1 & R_SHIFT_1_MASK) >> 1;
149 /* Swap least significant nibble */
150 /* Left shift 4, bits 0 and 1 */
151 n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
152 /* Left shift 2, bits 2 and 4 */
153 n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
154 /* Left shift 1, bits 3-6 */
155 n2 = (n2 & L_SHIFT_1_MASK) << 1;
157 return n1 | n2;
160 static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
161 u8 *out, u32 len)
163 unsigned int i = 0;
164 int j;
165 int index = 0;
167 j = len - BYTES_PER_WORD;
168 while (j >= 0) {
169 for (i = 0; i < BYTES_PER_WORD; i++) {
170 index = len - j - BYTES_PER_WORD + i;
171 out[j + i] =
172 swap_bits_in_byte(in[index]);
174 j -= BYTES_PER_WORD;
178 static void add_session_id(struct cryp_ctx *ctx)
181 * We never want 0 to be a valid value, since this is the default value
182 * for the software context.
184 if (unlikely(atomic_inc_and_test(&session_id)))
185 atomic_inc(&session_id);
187 ctx->session_id = atomic_read(&session_id);
190 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
192 struct cryp_ctx *ctx;
193 int count;
194 struct cryp_device_data *device_data;
196 if (param == NULL) {
197 BUG_ON(!param);
198 return IRQ_HANDLED;
201 /* The device is coming from the one found in hw_crypt_noxts. */
202 device_data = (struct cryp_device_data *)param;
204 ctx = device_data->current_ctx;
206 if (ctx == NULL) {
207 BUG_ON(!ctx);
208 return IRQ_HANDLED;
211 dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
212 cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
213 "out" : "in");
215 if (cryp_pending_irq_src(device_data,
216 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
217 if (ctx->outlen / ctx->blocksize > 0) {
218 count = ctx->blocksize / 4;
220 readsl(&device_data->base->dout, ctx->outdata, count);
221 ctx->outdata += count;
222 ctx->outlen -= count;
224 if (ctx->outlen == 0) {
225 cryp_disable_irq_src(device_data,
226 CRYP_IRQ_SRC_OUTPUT_FIFO);
229 } else if (cryp_pending_irq_src(device_data,
230 CRYP_IRQ_SRC_INPUT_FIFO)) {
231 if (ctx->datalen / ctx->blocksize > 0) {
232 count = ctx->blocksize / 4;
234 writesl(&device_data->base->din, ctx->indata, count);
236 ctx->indata += count;
237 ctx->datalen -= count;
239 if (ctx->datalen == 0)
240 cryp_disable_irq_src(device_data,
241 CRYP_IRQ_SRC_INPUT_FIFO);
243 if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
244 CRYP_PUT_BITS(&device_data->base->cr,
245 CRYP_START_ENABLE,
246 CRYP_CR_START_POS,
247 CRYP_CR_START_MASK);
249 cryp_wait_until_done(device_data);
254 return IRQ_HANDLED;
257 static int mode_is_aes(enum cryp_algo_mode mode)
259 return CRYP_ALGO_AES_ECB == mode ||
260 CRYP_ALGO_AES_CBC == mode ||
261 CRYP_ALGO_AES_CTR == mode ||
262 CRYP_ALGO_AES_XTS == mode;
265 static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
266 enum cryp_init_vector_index index)
268 struct cryp_init_vector_value vector_value;
270 dev_dbg(device_data->dev, "[%s]", __func__);
272 vector_value.init_value_left = left;
273 vector_value.init_value_right = right;
275 return cryp_configure_init_vector(device_data,
276 index,
277 vector_value);
280 static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
282 int i;
283 int status = 0;
284 int num_of_regs = ctx->blocksize / 8;
285 u32 iv[AES_BLOCK_SIZE / 4];
287 dev_dbg(device_data->dev, "[%s]", __func__);
290 * Since we loop on num_of_regs we need to have a check in case
291 * someone provides an incorrect blocksize which would force calling
292 * cfg_iv with i greater than 2 which is an error.
294 if (num_of_regs > 2) {
295 dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
296 __func__, ctx->blocksize);
297 return -EINVAL;
300 for (i = 0; i < ctx->blocksize / 4; i++)
301 iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
303 for (i = 0; i < num_of_regs; i++) {
304 status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
305 (enum cryp_init_vector_index) i);
306 if (status != 0)
307 return status;
309 return status;
312 static int set_key(struct cryp_device_data *device_data,
313 u32 left_key,
314 u32 right_key,
315 enum cryp_key_reg_index index)
317 struct cryp_key_value key_value;
318 int cryp_error;
320 dev_dbg(device_data->dev, "[%s]", __func__);
322 key_value.key_value_left = left_key;
323 key_value.key_value_right = right_key;
325 cryp_error = cryp_configure_key_values(device_data,
326 index,
327 key_value);
328 if (cryp_error != 0)
329 dev_err(device_data->dev, "[%s]: "
330 "cryp_configure_key_values() failed!", __func__);
332 return cryp_error;
335 static int cfg_keys(struct cryp_ctx *ctx)
337 int i;
338 int num_of_regs = ctx->keylen / 8;
339 u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
340 int cryp_error = 0;
342 dev_dbg(ctx->device->dev, "[%s]", __func__);
344 if (mode_is_aes(ctx->config.algomode)) {
345 swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
346 (u8 *)swapped_key,
347 ctx->keylen);
348 } else {
349 for (i = 0; i < ctx->keylen / 4; i++)
350 swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
353 for (i = 0; i < num_of_regs; i++) {
354 cryp_error = set_key(ctx->device,
355 *(((u32 *)swapped_key)+i*2),
356 *(((u32 *)swapped_key)+i*2+1),
357 (enum cryp_key_reg_index) i);
359 if (cryp_error != 0) {
360 dev_err(ctx->device->dev, "[%s]: set_key() failed!",
361 __func__);
362 return cryp_error;
365 return cryp_error;
368 static int cryp_setup_context(struct cryp_ctx *ctx,
369 struct cryp_device_data *device_data)
371 u32 control_register = CRYP_CR_DEFAULT;
373 switch (cryp_mode) {
374 case CRYP_MODE_INTERRUPT:
375 writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
376 break;
378 case CRYP_MODE_DMA:
379 writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
380 break;
382 default:
383 break;
386 if (ctx->updated == 0) {
387 cryp_flush_inoutfifo(device_data);
388 if (cfg_keys(ctx) != 0) {
389 dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
390 __func__);
391 return -EINVAL;
394 if (ctx->iv &&
395 CRYP_ALGO_AES_ECB != ctx->config.algomode &&
396 CRYP_ALGO_DES_ECB != ctx->config.algomode &&
397 CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
398 if (cfg_ivs(device_data, ctx) != 0)
399 return -EPERM;
402 cryp_set_configuration(device_data, &ctx->config,
403 &control_register);
404 add_session_id(ctx);
405 } else if (ctx->updated == 1 &&
406 ctx->session_id != atomic_read(&session_id)) {
407 cryp_flush_inoutfifo(device_data);
408 cryp_restore_device_context(device_data, &ctx->dev_ctx);
410 add_session_id(ctx);
411 control_register = ctx->dev_ctx.cr;
412 } else
413 control_register = ctx->dev_ctx.cr;
415 writel(control_register |
416 (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
417 &device_data->base->cr);
419 return 0;
422 static int cryp_get_device_data(struct cryp_ctx *ctx,
423 struct cryp_device_data **device_data)
425 int ret;
426 struct klist_iter device_iterator;
427 struct klist_node *device_node;
428 struct cryp_device_data *local_device_data = NULL;
429 pr_debug(DEV_DBG_NAME " [%s]", __func__);
431 /* Wait until a device is available */
432 ret = down_interruptible(&driver_data.device_allocation);
433 if (ret)
434 return ret; /* Interrupted */
436 /* Select a device */
437 klist_iter_init(&driver_data.device_list, &device_iterator);
439 device_node = klist_next(&device_iterator);
440 while (device_node) {
441 local_device_data = container_of(device_node,
442 struct cryp_device_data, list_node);
443 spin_lock(&local_device_data->ctx_lock);
444 /* current_ctx allocates a device, NULL = unallocated */
445 if (local_device_data->current_ctx) {
446 device_node = klist_next(&device_iterator);
447 } else {
448 local_device_data->current_ctx = ctx;
449 ctx->device = local_device_data;
450 spin_unlock(&local_device_data->ctx_lock);
451 break;
453 spin_unlock(&local_device_data->ctx_lock);
455 klist_iter_exit(&device_iterator);
457 if (!device_node) {
459 * No free device found.
460 * Since we allocated a device with down_interruptible, this
461 * should not be able to happen.
462 * Number of available devices, which are contained in
463 * device_allocation, is therefore decremented by not doing
464 * an up(device_allocation).
466 return -EBUSY;
469 *device_data = local_device_data;
471 return 0;
474 static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
475 struct device *dev)
477 struct dma_slave_config mem2cryp = {
478 .direction = DMA_MEM_TO_DEV,
479 .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
480 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
481 .dst_maxburst = 4,
483 struct dma_slave_config cryp2mem = {
484 .direction = DMA_DEV_TO_MEM,
485 .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
486 .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
487 .src_maxburst = 4,
490 dma_cap_zero(device_data->dma.mask);
491 dma_cap_set(DMA_SLAVE, device_data->dma.mask);
493 device_data->dma.cfg_mem2cryp = mem_to_engine;
494 device_data->dma.chan_mem2cryp =
495 dma_request_channel(device_data->dma.mask,
496 stedma40_filter,
497 device_data->dma.cfg_mem2cryp);
499 device_data->dma.cfg_cryp2mem = engine_to_mem;
500 device_data->dma.chan_cryp2mem =
501 dma_request_channel(device_data->dma.mask,
502 stedma40_filter,
503 device_data->dma.cfg_cryp2mem);
505 dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
506 dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
508 init_completion(&device_data->dma.cryp_dma_complete);
511 static void cryp_dma_out_callback(void *data)
513 struct cryp_ctx *ctx = (struct cryp_ctx *) data;
514 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
516 complete(&ctx->device->dma.cryp_dma_complete);
519 static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
520 struct scatterlist *sg,
521 int len,
522 enum dma_data_direction direction)
524 struct dma_async_tx_descriptor *desc;
525 struct dma_chan *channel = NULL;
526 dma_cookie_t cookie;
528 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
530 if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
531 dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
532 "aligned! Addr: 0x%08x", __func__, (u32)sg);
533 return -EFAULT;
536 switch (direction) {
537 case DMA_TO_DEVICE:
538 channel = ctx->device->dma.chan_mem2cryp;
539 ctx->device->dma.sg_src = sg;
540 ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
541 ctx->device->dma.sg_src,
542 ctx->device->dma.nents_src,
543 direction);
545 if (!ctx->device->dma.sg_src_len) {
546 dev_dbg(ctx->device->dev,
547 "[%s]: Could not map the sg list (TO_DEVICE)",
548 __func__);
549 return -EFAULT;
552 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
553 "(TO_DEVICE)", __func__);
555 desc = dmaengine_prep_slave_sg(channel,
556 ctx->device->dma.sg_src,
557 ctx->device->dma.sg_src_len,
558 direction, DMA_CTRL_ACK);
559 break;
561 case DMA_FROM_DEVICE:
562 channel = ctx->device->dma.chan_cryp2mem;
563 ctx->device->dma.sg_dst = sg;
564 ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
565 ctx->device->dma.sg_dst,
566 ctx->device->dma.nents_dst,
567 direction);
569 if (!ctx->device->dma.sg_dst_len) {
570 dev_dbg(ctx->device->dev,
571 "[%s]: Could not map the sg list (FROM_DEVICE)",
572 __func__);
573 return -EFAULT;
576 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
577 "(FROM_DEVICE)", __func__);
579 desc = dmaengine_prep_slave_sg(channel,
580 ctx->device->dma.sg_dst,
581 ctx->device->dma.sg_dst_len,
582 direction,
583 DMA_CTRL_ACK |
584 DMA_PREP_INTERRUPT);
586 desc->callback = cryp_dma_out_callback;
587 desc->callback_param = ctx;
588 break;
590 default:
591 dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
592 __func__);
593 return -EFAULT;
596 cookie = dmaengine_submit(desc);
597 dma_async_issue_pending(channel);
599 return 0;
602 static void cryp_dma_done(struct cryp_ctx *ctx)
604 struct dma_chan *chan;
606 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
608 chan = ctx->device->dma.chan_mem2cryp;
609 dmaengine_terminate_all(chan);
610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
613 chan = ctx->device->dma.chan_cryp2mem;
614 dmaengine_terminate_all(chan);
615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
619 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
620 int len)
622 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
623 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
625 if (error) {
626 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
627 "failed", __func__);
628 return error;
631 return len;
634 static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
636 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
637 if (error) {
638 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
639 "failed", __func__);
640 return error;
643 return len;
646 static void cryp_polling_mode(struct cryp_ctx *ctx,
647 struct cryp_device_data *device_data)
649 int len = ctx->blocksize / BYTES_PER_WORD;
650 int remaining_length = ctx->datalen;
651 u32 *indata = (u32 *)ctx->indata;
652 u32 *outdata = (u32 *)ctx->outdata;
654 while (remaining_length > 0) {
655 writesl(&device_data->base->din, indata, len);
656 indata += len;
657 remaining_length -= (len * BYTES_PER_WORD);
658 cryp_wait_until_done(device_data);
660 readsl(&device_data->base->dout, outdata, len);
661 outdata += len;
662 cryp_wait_until_done(device_data);
666 static int cryp_disable_power(struct device *dev,
667 struct cryp_device_data *device_data,
668 bool save_device_context)
670 int ret = 0;
672 dev_dbg(dev, "[%s]", __func__);
674 spin_lock(&device_data->power_state_spinlock);
675 if (!device_data->power_state)
676 goto out;
678 spin_lock(&device_data->ctx_lock);
679 if (save_device_context && device_data->current_ctx) {
680 cryp_save_device_context(device_data,
681 &device_data->current_ctx->dev_ctx,
682 cryp_mode);
683 device_data->restore_dev_ctx = true;
685 spin_unlock(&device_data->ctx_lock);
687 clk_disable(device_data->clk);
688 ret = regulator_disable(device_data->pwr_regulator);
689 if (ret)
690 dev_err(dev, "[%s]: "
691 "regulator_disable() failed!",
692 __func__);
694 device_data->power_state = false;
696 out:
697 spin_unlock(&device_data->power_state_spinlock);
699 return ret;
702 static int cryp_enable_power(
703 struct device *dev,
704 struct cryp_device_data *device_data,
705 bool restore_device_context)
707 int ret = 0;
709 dev_dbg(dev, "[%s]", __func__);
711 spin_lock(&device_data->power_state_spinlock);
712 if (!device_data->power_state) {
713 ret = regulator_enable(device_data->pwr_regulator);
714 if (ret) {
715 dev_err(dev, "[%s]: regulator_enable() failed!",
716 __func__);
717 goto out;
720 ret = clk_enable(device_data->clk);
721 if (ret) {
722 dev_err(dev, "[%s]: clk_enable() failed!",
723 __func__);
724 regulator_disable(device_data->pwr_regulator);
725 goto out;
727 device_data->power_state = true;
730 if (device_data->restore_dev_ctx) {
731 spin_lock(&device_data->ctx_lock);
732 if (restore_device_context && device_data->current_ctx) {
733 device_data->restore_dev_ctx = false;
734 cryp_restore_device_context(device_data,
735 &device_data->current_ctx->dev_ctx);
737 spin_unlock(&device_data->ctx_lock);
739 out:
740 spin_unlock(&device_data->power_state_spinlock);
742 return ret;
745 static int hw_crypt_noxts(struct cryp_ctx *ctx,
746 struct cryp_device_data *device_data)
748 int ret = 0;
750 const u8 *indata = ctx->indata;
751 u8 *outdata = ctx->outdata;
752 u32 datalen = ctx->datalen;
753 u32 outlen = datalen;
755 pr_debug(DEV_DBG_NAME " [%s]", __func__);
757 ctx->outlen = ctx->datalen;
759 if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
760 pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
761 "0x%08x", __func__, (u32)indata);
762 return -EINVAL;
765 ret = cryp_setup_context(ctx, device_data);
767 if (ret)
768 goto out;
770 if (cryp_mode == CRYP_MODE_INTERRUPT) {
771 cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
772 CRYP_IRQ_SRC_OUTPUT_FIFO);
775 * ctx->outlen is decremented in the cryp_interrupt_handler
776 * function. We had to add cpu_relax() (barrier) to make sure
777 * that gcc didn't optimze away this variable.
779 while (ctx->outlen > 0)
780 cpu_relax();
781 } else if (cryp_mode == CRYP_MODE_POLLING ||
782 cryp_mode == CRYP_MODE_DMA) {
784 * The reason for having DMA in this if case is that if we are
785 * running cryp_mode = 2, then we separate DMA routines for
786 * handling cipher/plaintext > blocksize, except when
787 * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
788 * the polling mode. Overhead of doing DMA setup eats up the
789 * benefits using it.
791 cryp_polling_mode(ctx, device_data);
792 } else {
793 dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
794 __func__);
795 ret = -EPERM;
796 goto out;
799 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
800 ctx->updated = 1;
802 out:
803 ctx->indata = indata;
804 ctx->outdata = outdata;
805 ctx->datalen = datalen;
806 ctx->outlen = outlen;
808 return ret;
811 static int get_nents(struct scatterlist *sg, int nbytes)
813 int nents = 0;
815 while (nbytes > 0) {
816 nbytes -= sg->length;
817 sg = sg_next(sg);
818 nents++;
821 return nents;
824 static int ablk_dma_crypt(struct ablkcipher_request *areq)
826 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
827 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
828 struct cryp_device_data *device_data;
830 int bytes_written = 0;
831 int bytes_read = 0;
832 int ret;
834 pr_debug(DEV_DBG_NAME " [%s]", __func__);
836 ctx->datalen = areq->nbytes;
837 ctx->outlen = areq->nbytes;
839 ret = cryp_get_device_data(ctx, &device_data);
840 if (ret)
841 return ret;
843 ret = cryp_setup_context(ctx, device_data);
844 if (ret)
845 goto out;
847 /* We have the device now, so store the nents in the dma struct. */
848 ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
849 ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
851 /* Enable DMA in- and output. */
852 cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
854 bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
855 bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
857 wait_for_completion(&ctx->device->dma.cryp_dma_complete);
858 cryp_dma_done(ctx);
860 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
861 ctx->updated = 1;
863 out:
864 spin_lock(&device_data->ctx_lock);
865 device_data->current_ctx = NULL;
866 ctx->device = NULL;
867 spin_unlock(&device_data->ctx_lock);
870 * The down_interruptible part for this semaphore is called in
871 * cryp_get_device_data.
873 up(&driver_data.device_allocation);
875 if (unlikely(bytes_written != bytes_read))
876 return -EPERM;
878 return 0;
881 static int ablk_crypt(struct ablkcipher_request *areq)
883 struct ablkcipher_walk walk;
884 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
885 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
886 struct cryp_device_data *device_data;
887 unsigned long src_paddr;
888 unsigned long dst_paddr;
889 int ret;
890 int nbytes;
892 pr_debug(DEV_DBG_NAME " [%s]", __func__);
894 ret = cryp_get_device_data(ctx, &device_data);
895 if (ret)
896 goto out;
898 ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
899 ret = ablkcipher_walk_phys(areq, &walk);
901 if (ret) {
902 pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
903 __func__);
904 goto out;
907 while ((nbytes = walk.nbytes) > 0) {
908 ctx->iv = walk.iv;
909 src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
910 ctx->indata = phys_to_virt(src_paddr);
912 dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
913 ctx->outdata = phys_to_virt(dst_paddr);
915 ctx->datalen = nbytes - (nbytes % ctx->blocksize);
917 ret = hw_crypt_noxts(ctx, device_data);
918 if (ret)
919 goto out;
921 nbytes -= ctx->datalen;
922 ret = ablkcipher_walk_done(areq, &walk, nbytes);
923 if (ret)
924 goto out;
926 ablkcipher_walk_complete(&walk);
928 out:
929 /* Release the device */
930 spin_lock(&device_data->ctx_lock);
931 device_data->current_ctx = NULL;
932 ctx->device = NULL;
933 spin_unlock(&device_data->ctx_lock);
936 * The down_interruptible part for this semaphore is called in
937 * cryp_get_device_data.
939 up(&driver_data.device_allocation);
941 return ret;
944 static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
945 const u8 *key, unsigned int keylen)
947 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
948 u32 *flags = &cipher->base.crt_flags;
950 pr_debug(DEV_DBG_NAME " [%s]", __func__);
952 switch (keylen) {
953 case AES_KEYSIZE_128:
954 ctx->config.keysize = CRYP_KEY_SIZE_128;
955 break;
957 case AES_KEYSIZE_192:
958 ctx->config.keysize = CRYP_KEY_SIZE_192;
959 break;
961 case AES_KEYSIZE_256:
962 ctx->config.keysize = CRYP_KEY_SIZE_256;
963 break;
965 default:
966 pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
967 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
968 return -EINVAL;
971 memcpy(ctx->key, key, keylen);
972 ctx->keylen = keylen;
974 ctx->updated = 0;
976 return 0;
979 static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
980 const u8 *key, unsigned int keylen)
982 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
983 u32 *flags = &cipher->base.crt_flags;
984 u32 tmp[DES_EXPKEY_WORDS];
985 int ret;
987 pr_debug(DEV_DBG_NAME " [%s]", __func__);
988 if (keylen != DES_KEY_SIZE) {
989 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
990 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
991 __func__);
992 return -EINVAL;
995 ret = des_ekey(tmp, key);
996 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
997 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
998 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
999 __func__);
1000 return -EINVAL;
1003 memcpy(ctx->key, key, keylen);
1004 ctx->keylen = keylen;
1006 ctx->updated = 0;
1007 return 0;
1010 static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1011 const u8 *key, unsigned int keylen)
1013 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1014 u32 *flags = &cipher->base.crt_flags;
1015 const u32 *K = (const u32 *)key;
1016 u32 tmp[DES3_EDE_EXPKEY_WORDS];
1017 int i, ret;
1019 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1020 if (keylen != DES3_EDE_KEY_SIZE) {
1021 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1022 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
1023 __func__);
1024 return -EINVAL;
1027 /* Checking key interdependency for weak key detection. */
1028 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1029 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
1030 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1031 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1032 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
1033 __func__);
1034 return -EINVAL;
1036 for (i = 0; i < 3; i++) {
1037 ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
1038 if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
1039 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1040 pr_debug(DEV_DBG_NAME " [%s]: "
1041 "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
1042 return -EINVAL;
1046 memcpy(ctx->key, key, keylen);
1047 ctx->keylen = keylen;
1049 ctx->updated = 0;
1050 return 0;
1053 static int cryp_blk_encrypt(struct ablkcipher_request *areq)
1055 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1056 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1058 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1060 ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
1063 * DMA does not work for DES due to a hw bug */
1064 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1065 return ablk_dma_crypt(areq);
1067 /* For everything except DMA, we run the non DMA version. */
1068 return ablk_crypt(areq);
1071 static int cryp_blk_decrypt(struct ablkcipher_request *areq)
1073 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1074 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1076 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1078 ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
1080 /* DMA does not work for DES due to a hw bug */
1081 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
1082 return ablk_dma_crypt(areq);
1084 /* For everything except DMA, we run the non DMA version. */
1085 return ablk_crypt(areq);
1088 struct cryp_algo_template {
1089 enum cryp_algo_mode algomode;
1090 struct crypto_alg crypto;
1093 static int cryp_cra_init(struct crypto_tfm *tfm)
1095 struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
1096 struct crypto_alg *alg = tfm->__crt_alg;
1097 struct cryp_algo_template *cryp_alg = container_of(alg,
1098 struct cryp_algo_template,
1099 crypto);
1101 ctx->config.algomode = cryp_alg->algomode;
1102 ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
1104 return 0;
1107 static struct cryp_algo_template cryp_algs[] = {
1109 .algomode = CRYP_ALGO_AES_ECB,
1110 .crypto = {
1111 .cra_name = "aes",
1112 .cra_driver_name = "aes-ux500",
1113 .cra_priority = 300,
1114 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1115 CRYPTO_ALG_ASYNC,
1116 .cra_blocksize = AES_BLOCK_SIZE,
1117 .cra_ctxsize = sizeof(struct cryp_ctx),
1118 .cra_alignmask = 3,
1119 .cra_type = &crypto_ablkcipher_type,
1120 .cra_init = cryp_cra_init,
1121 .cra_module = THIS_MODULE,
1122 .cra_u = {
1123 .ablkcipher = {
1124 .min_keysize = AES_MIN_KEY_SIZE,
1125 .max_keysize = AES_MAX_KEY_SIZE,
1126 .setkey = aes_ablkcipher_setkey,
1127 .encrypt = cryp_blk_encrypt,
1128 .decrypt = cryp_blk_decrypt
1134 .algomode = CRYP_ALGO_AES_ECB,
1135 .crypto = {
1136 .cra_name = "ecb(aes)",
1137 .cra_driver_name = "ecb-aes-ux500",
1138 .cra_priority = 300,
1139 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1140 CRYPTO_ALG_ASYNC,
1141 .cra_blocksize = AES_BLOCK_SIZE,
1142 .cra_ctxsize = sizeof(struct cryp_ctx),
1143 .cra_alignmask = 3,
1144 .cra_type = &crypto_ablkcipher_type,
1145 .cra_init = cryp_cra_init,
1146 .cra_module = THIS_MODULE,
1147 .cra_u = {
1148 .ablkcipher = {
1149 .min_keysize = AES_MIN_KEY_SIZE,
1150 .max_keysize = AES_MAX_KEY_SIZE,
1151 .setkey = aes_ablkcipher_setkey,
1152 .encrypt = cryp_blk_encrypt,
1153 .decrypt = cryp_blk_decrypt,
1159 .algomode = CRYP_ALGO_AES_CBC,
1160 .crypto = {
1161 .cra_name = "cbc(aes)",
1162 .cra_driver_name = "cbc-aes-ux500",
1163 .cra_priority = 300,
1164 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1165 CRYPTO_ALG_ASYNC,
1166 .cra_blocksize = AES_BLOCK_SIZE,
1167 .cra_ctxsize = sizeof(struct cryp_ctx),
1168 .cra_alignmask = 3,
1169 .cra_type = &crypto_ablkcipher_type,
1170 .cra_init = cryp_cra_init,
1171 .cra_module = THIS_MODULE,
1172 .cra_u = {
1173 .ablkcipher = {
1174 .min_keysize = AES_MIN_KEY_SIZE,
1175 .max_keysize = AES_MAX_KEY_SIZE,
1176 .setkey = aes_ablkcipher_setkey,
1177 .encrypt = cryp_blk_encrypt,
1178 .decrypt = cryp_blk_decrypt,
1179 .ivsize = AES_BLOCK_SIZE,
1185 .algomode = CRYP_ALGO_AES_CTR,
1186 .crypto = {
1187 .cra_name = "ctr(aes)",
1188 .cra_driver_name = "ctr-aes-ux500",
1189 .cra_priority = 300,
1190 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1191 CRYPTO_ALG_ASYNC,
1192 .cra_blocksize = AES_BLOCK_SIZE,
1193 .cra_ctxsize = sizeof(struct cryp_ctx),
1194 .cra_alignmask = 3,
1195 .cra_type = &crypto_ablkcipher_type,
1196 .cra_init = cryp_cra_init,
1197 .cra_module = THIS_MODULE,
1198 .cra_u = {
1199 .ablkcipher = {
1200 .min_keysize = AES_MIN_KEY_SIZE,
1201 .max_keysize = AES_MAX_KEY_SIZE,
1202 .setkey = aes_ablkcipher_setkey,
1203 .encrypt = cryp_blk_encrypt,
1204 .decrypt = cryp_blk_decrypt,
1205 .ivsize = AES_BLOCK_SIZE,
1211 .algomode = CRYP_ALGO_DES_ECB,
1212 .crypto = {
1213 .cra_name = "des",
1214 .cra_driver_name = "des-ux500",
1215 .cra_priority = 300,
1216 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1217 CRYPTO_ALG_ASYNC,
1218 .cra_blocksize = DES_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct cryp_ctx),
1220 .cra_alignmask = 3,
1221 .cra_type = &crypto_ablkcipher_type,
1222 .cra_init = cryp_cra_init,
1223 .cra_module = THIS_MODULE,
1224 .cra_u = {
1225 .ablkcipher = {
1226 .min_keysize = DES_KEY_SIZE,
1227 .max_keysize = DES_KEY_SIZE,
1228 .setkey = des_ablkcipher_setkey,
1229 .encrypt = cryp_blk_encrypt,
1230 .decrypt = cryp_blk_decrypt
1237 .algomode = CRYP_ALGO_TDES_ECB,
1238 .crypto = {
1239 .cra_name = "des3_ede",
1240 .cra_driver_name = "des3_ede-ux500",
1241 .cra_priority = 300,
1242 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1243 CRYPTO_ALG_ASYNC,
1244 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1245 .cra_ctxsize = sizeof(struct cryp_ctx),
1246 .cra_alignmask = 3,
1247 .cra_type = &crypto_ablkcipher_type,
1248 .cra_init = cryp_cra_init,
1249 .cra_module = THIS_MODULE,
1250 .cra_u = {
1251 .ablkcipher = {
1252 .min_keysize = DES3_EDE_KEY_SIZE,
1253 .max_keysize = DES3_EDE_KEY_SIZE,
1254 .setkey = des_ablkcipher_setkey,
1255 .encrypt = cryp_blk_encrypt,
1256 .decrypt = cryp_blk_decrypt
1262 .algomode = CRYP_ALGO_DES_ECB,
1263 .crypto = {
1264 .cra_name = "ecb(des)",
1265 .cra_driver_name = "ecb-des-ux500",
1266 .cra_priority = 300,
1267 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1268 CRYPTO_ALG_ASYNC,
1269 .cra_blocksize = DES_BLOCK_SIZE,
1270 .cra_ctxsize = sizeof(struct cryp_ctx),
1271 .cra_alignmask = 3,
1272 .cra_type = &crypto_ablkcipher_type,
1273 .cra_init = cryp_cra_init,
1274 .cra_module = THIS_MODULE,
1275 .cra_u = {
1276 .ablkcipher = {
1277 .min_keysize = DES_KEY_SIZE,
1278 .max_keysize = DES_KEY_SIZE,
1279 .setkey = des_ablkcipher_setkey,
1280 .encrypt = cryp_blk_encrypt,
1281 .decrypt = cryp_blk_decrypt,
1287 .algomode = CRYP_ALGO_TDES_ECB,
1288 .crypto = {
1289 .cra_name = "ecb(des3_ede)",
1290 .cra_driver_name = "ecb-des3_ede-ux500",
1291 .cra_priority = 300,
1292 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1293 CRYPTO_ALG_ASYNC,
1294 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1295 .cra_ctxsize = sizeof(struct cryp_ctx),
1296 .cra_alignmask = 3,
1297 .cra_type = &crypto_ablkcipher_type,
1298 .cra_init = cryp_cra_init,
1299 .cra_module = THIS_MODULE,
1300 .cra_u = {
1301 .ablkcipher = {
1302 .min_keysize = DES3_EDE_KEY_SIZE,
1303 .max_keysize = DES3_EDE_KEY_SIZE,
1304 .setkey = des3_ablkcipher_setkey,
1305 .encrypt = cryp_blk_encrypt,
1306 .decrypt = cryp_blk_decrypt,
1312 .algomode = CRYP_ALGO_DES_CBC,
1313 .crypto = {
1314 .cra_name = "cbc(des)",
1315 .cra_driver_name = "cbc-des-ux500",
1316 .cra_priority = 300,
1317 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1318 CRYPTO_ALG_ASYNC,
1319 .cra_blocksize = DES_BLOCK_SIZE,
1320 .cra_ctxsize = sizeof(struct cryp_ctx),
1321 .cra_alignmask = 3,
1322 .cra_type = &crypto_ablkcipher_type,
1323 .cra_init = cryp_cra_init,
1324 .cra_module = THIS_MODULE,
1325 .cra_u = {
1326 .ablkcipher = {
1327 .min_keysize = DES_KEY_SIZE,
1328 .max_keysize = DES_KEY_SIZE,
1329 .setkey = des_ablkcipher_setkey,
1330 .encrypt = cryp_blk_encrypt,
1331 .decrypt = cryp_blk_decrypt,
1337 .algomode = CRYP_ALGO_TDES_CBC,
1338 .crypto = {
1339 .cra_name = "cbc(des3_ede)",
1340 .cra_driver_name = "cbc-des3_ede-ux500",
1341 .cra_priority = 300,
1342 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1343 CRYPTO_ALG_ASYNC,
1344 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1345 .cra_ctxsize = sizeof(struct cryp_ctx),
1346 .cra_alignmask = 3,
1347 .cra_type = &crypto_ablkcipher_type,
1348 .cra_init = cryp_cra_init,
1349 .cra_module = THIS_MODULE,
1350 .cra_u = {
1351 .ablkcipher = {
1352 .min_keysize = DES3_EDE_KEY_SIZE,
1353 .max_keysize = DES3_EDE_KEY_SIZE,
1354 .setkey = des3_ablkcipher_setkey,
1355 .encrypt = cryp_blk_encrypt,
1356 .decrypt = cryp_blk_decrypt,
1357 .ivsize = DES3_EDE_BLOCK_SIZE,
1365 * cryp_algs_register_all -
1367 static int cryp_algs_register_all(void)
1369 int ret;
1370 int i;
1371 int count;
1373 pr_debug("[%s]", __func__);
1375 for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
1376 ret = crypto_register_alg(&cryp_algs[i].crypto);
1377 if (ret) {
1378 count = i;
1379 pr_err("[%s] alg registration failed",
1380 cryp_algs[i].crypto.cra_driver_name);
1381 goto unreg;
1384 return 0;
1385 unreg:
1386 for (i = 0; i < count; i++)
1387 crypto_unregister_alg(&cryp_algs[i].crypto);
1388 return ret;
1392 * cryp_algs_unregister_all -
1394 static void cryp_algs_unregister_all(void)
1396 int i;
1398 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1400 for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
1401 crypto_unregister_alg(&cryp_algs[i].crypto);
1404 static int ux500_cryp_probe(struct platform_device *pdev)
1406 int ret;
1407 int cryp_error = 0;
1408 struct resource *res = NULL;
1409 struct resource *res_irq = NULL;
1410 struct cryp_device_data *device_data;
1411 struct cryp_protection_config prot = {
1412 .privilege_access = CRYP_STATE_ENABLE
1414 struct device *dev = &pdev->dev;
1416 dev_dbg(dev, "[%s]", __func__);
1417 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1418 if (!device_data) {
1419 dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1420 ret = -ENOMEM;
1421 goto out;
1424 device_data->dev = dev;
1425 device_data->current_ctx = NULL;
1427 /* Grab the DMA configuration from platform data. */
1428 mem_to_engine = &((struct cryp_platform_data *)
1429 dev->platform_data)->mem_to_engine;
1430 engine_to_mem = &((struct cryp_platform_data *)
1431 dev->platform_data)->engine_to_mem;
1433 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1434 if (!res) {
1435 dev_err(dev, "[%s]: platform_get_resource() failed",
1436 __func__);
1437 ret = -ENODEV;
1438 goto out;
1441 device_data->phybase = res->start;
1442 device_data->base = devm_ioremap_resource(dev, res);
1443 if (IS_ERR(device_data->base)) {
1444 dev_err(dev, "[%s]: ioremap failed!", __func__);
1445 ret = PTR_ERR(device_data->base);
1446 goto out;
1449 spin_lock_init(&device_data->ctx_lock);
1450 spin_lock_init(&device_data->power_state_spinlock);
1452 /* Enable power for CRYP hardware block */
1453 device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
1454 if (IS_ERR(device_data->pwr_regulator)) {
1455 dev_err(dev, "[%s]: could not get cryp regulator", __func__);
1456 ret = PTR_ERR(device_data->pwr_regulator);
1457 device_data->pwr_regulator = NULL;
1458 goto out;
1461 /* Enable the clk for CRYP hardware block */
1462 device_data->clk = devm_clk_get(&pdev->dev, NULL);
1463 if (IS_ERR(device_data->clk)) {
1464 dev_err(dev, "[%s]: clk_get() failed!", __func__);
1465 ret = PTR_ERR(device_data->clk);
1466 goto out_regulator;
1469 ret = clk_prepare(device_data->clk);
1470 if (ret) {
1471 dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
1472 goto out_regulator;
1475 /* Enable device power (and clock) */
1476 ret = cryp_enable_power(device_data->dev, device_data, false);
1477 if (ret) {
1478 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1479 goto out_clk_unprepare;
1482 cryp_error = cryp_check(device_data);
1483 if (cryp_error != 0) {
1484 dev_err(dev, "[%s]: cryp_init() failed!", __func__);
1485 ret = -EINVAL;
1486 goto out_power;
1489 cryp_error = cryp_configure_protection(device_data, &prot);
1490 if (cryp_error != 0) {
1491 dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1492 __func__);
1493 ret = -EINVAL;
1494 goto out_power;
1497 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1498 if (!res_irq) {
1499 dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
1500 __func__);
1501 ret = -ENODEV;
1502 goto out_power;
1505 ret = devm_request_irq(&pdev->dev, res_irq->start,
1506 cryp_interrupt_handler, 0, "cryp1", device_data);
1507 if (ret) {
1508 dev_err(dev, "[%s]: Unable to request IRQ", __func__);
1509 goto out_power;
1512 if (cryp_mode == CRYP_MODE_DMA)
1513 cryp_dma_setup_channel(device_data, dev);
1515 platform_set_drvdata(pdev, device_data);
1517 /* Put the new device into the device list... */
1518 klist_add_tail(&device_data->list_node, &driver_data.device_list);
1520 /* ... and signal that a new device is available. */
1521 up(&driver_data.device_allocation);
1523 atomic_set(&session_id, 1);
1525 ret = cryp_algs_register_all();
1526 if (ret) {
1527 dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
1528 __func__);
1529 goto out_power;
1532 dev_info(dev, "successfully registered\n");
1534 return 0;
1536 out_power:
1537 cryp_disable_power(device_data->dev, device_data, false);
1539 out_clk_unprepare:
1540 clk_unprepare(device_data->clk);
1542 out_regulator:
1543 regulator_put(device_data->pwr_regulator);
1545 out:
1546 return ret;
1549 static int ux500_cryp_remove(struct platform_device *pdev)
1551 struct cryp_device_data *device_data;
1553 dev_dbg(&pdev->dev, "[%s]", __func__);
1554 device_data = platform_get_drvdata(pdev);
1555 if (!device_data) {
1556 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1557 __func__);
1558 return -ENOMEM;
1561 /* Try to decrease the number of available devices. */
1562 if (down_trylock(&driver_data.device_allocation))
1563 return -EBUSY;
1565 /* Check that the device is free */
1566 spin_lock(&device_data->ctx_lock);
1567 /* current_ctx allocates a device, NULL = unallocated */
1568 if (device_data->current_ctx) {
1569 /* The device is busy */
1570 spin_unlock(&device_data->ctx_lock);
1571 /* Return the device to the pool. */
1572 up(&driver_data.device_allocation);
1573 return -EBUSY;
1576 spin_unlock(&device_data->ctx_lock);
1578 /* Remove the device from the list */
1579 if (klist_node_attached(&device_data->list_node))
1580 klist_remove(&device_data->list_node);
1582 /* If this was the last device, remove the services */
1583 if (list_empty(&driver_data.device_list.k_list))
1584 cryp_algs_unregister_all();
1586 if (cryp_disable_power(&pdev->dev, device_data, false))
1587 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1588 __func__);
1590 clk_unprepare(device_data->clk);
1591 regulator_put(device_data->pwr_regulator);
1593 return 0;
1596 static void ux500_cryp_shutdown(struct platform_device *pdev)
1598 struct cryp_device_data *device_data;
1600 dev_dbg(&pdev->dev, "[%s]", __func__);
1602 device_data = platform_get_drvdata(pdev);
1603 if (!device_data) {
1604 dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
1605 __func__);
1606 return;
1609 /* Check that the device is free */
1610 spin_lock(&device_data->ctx_lock);
1611 /* current_ctx allocates a device, NULL = unallocated */
1612 if (!device_data->current_ctx) {
1613 if (down_trylock(&driver_data.device_allocation))
1614 dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
1615 "Shutting down anyway...", __func__);
1617 * (Allocate the device)
1618 * Need to set this to non-null (dummy) value,
1619 * to avoid usage if context switching.
1621 device_data->current_ctx++;
1623 spin_unlock(&device_data->ctx_lock);
1625 /* Remove the device from the list */
1626 if (klist_node_attached(&device_data->list_node))
1627 klist_remove(&device_data->list_node);
1629 /* If this was the last device, remove the services */
1630 if (list_empty(&driver_data.device_list.k_list))
1631 cryp_algs_unregister_all();
1633 if (cryp_disable_power(&pdev->dev, device_data, false))
1634 dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
1635 __func__);
1639 #ifdef CONFIG_PM_SLEEP
1640 static int ux500_cryp_suspend(struct device *dev)
1642 int ret;
1643 struct platform_device *pdev = to_platform_device(dev);
1644 struct cryp_device_data *device_data;
1645 struct resource *res_irq;
1646 struct cryp_ctx *temp_ctx = NULL;
1648 dev_dbg(dev, "[%s]", __func__);
1650 /* Handle state? */
1651 device_data = platform_get_drvdata(pdev);
1652 if (!device_data) {
1653 dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1654 return -ENOMEM;
1657 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1658 if (!res_irq)
1659 dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
1660 else
1661 disable_irq(res_irq->start);
1663 spin_lock(&device_data->ctx_lock);
1664 if (!device_data->current_ctx)
1665 device_data->current_ctx++;
1666 spin_unlock(&device_data->ctx_lock);
1668 if (device_data->current_ctx == ++temp_ctx) {
1669 if (down_interruptible(&driver_data.device_allocation))
1670 dev_dbg(dev, "[%s]: down_interruptible() failed",
1671 __func__);
1672 ret = cryp_disable_power(dev, device_data, false);
1674 } else
1675 ret = cryp_disable_power(dev, device_data, true);
1677 if (ret)
1678 dev_err(dev, "[%s]: cryp_disable_power()", __func__);
1680 return ret;
1683 static int ux500_cryp_resume(struct device *dev)
1685 int ret = 0;
1686 struct platform_device *pdev = to_platform_device(dev);
1687 struct cryp_device_data *device_data;
1688 struct resource *res_irq;
1689 struct cryp_ctx *temp_ctx = NULL;
1691 dev_dbg(dev, "[%s]", __func__);
1693 device_data = platform_get_drvdata(pdev);
1694 if (!device_data) {
1695 dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
1696 return -ENOMEM;
1699 spin_lock(&device_data->ctx_lock);
1700 if (device_data->current_ctx == ++temp_ctx)
1701 device_data->current_ctx = NULL;
1702 spin_unlock(&device_data->ctx_lock);
1705 if (!device_data->current_ctx)
1706 up(&driver_data.device_allocation);
1707 else
1708 ret = cryp_enable_power(dev, device_data, true);
1710 if (ret)
1711 dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
1712 else {
1713 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1714 if (res_irq)
1715 enable_irq(res_irq->start);
1718 return ret;
1720 #endif
1722 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
1724 static const struct of_device_id ux500_cryp_match[] = {
1725 { .compatible = "stericsson,ux500-cryp" },
1726 { },
1728 MODULE_DEVICE_TABLE(of, ux500_cryp_match);
1730 static struct platform_driver cryp_driver = {
1731 .probe = ux500_cryp_probe,
1732 .remove = ux500_cryp_remove,
1733 .shutdown = ux500_cryp_shutdown,
1734 .driver = {
1735 .name = "cryp1",
1736 .of_match_table = ux500_cryp_match,
1737 .pm = &ux500_cryp_pm,
1741 static int __init ux500_cryp_mod_init(void)
1743 pr_debug("[%s] is called!", __func__);
1744 klist_init(&driver_data.device_list, NULL, NULL);
1745 /* Initialize the semaphore to 0 devices (locked state) */
1746 sema_init(&driver_data.device_allocation, 0);
1747 return platform_driver_register(&cryp_driver);
1750 static void __exit ux500_cryp_mod_fini(void)
1752 pr_debug("[%s] is called!", __func__);
1753 platform_driver_unregister(&cryp_driver);
1754 return;
1757 module_init(ux500_cryp_mod_init);
1758 module_exit(ux500_cryp_mod_fini);
1760 module_param(cryp_mode, int, 0);
1762 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1763 MODULE_ALIAS_CRYPTO("aes-all");
1764 MODULE_ALIAS_CRYPTO("des-all");
1766 MODULE_LICENSE("GPL");