1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP Crypto driver common support routines.
5 * Copyright (c) 2017 Texas Instruments Incorporated
6 * Tero Kristo <t-kristo@ti.com>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/scatterlist.h>
12 #include <crypto/scatterwalk.h>
14 #include "omap-crypto.h"
16 static int omap_crypto_copy_sg_lists(int total
, int bs
,
17 struct scatterlist
**sg
,
18 struct scatterlist
*new_sg
, u16 flags
)
20 int n
= sg_nents(*sg
);
21 struct scatterlist
*tmp
;
23 if (!(flags
& OMAP_CRYPTO_FORCE_SINGLE_ENTRY
)) {
24 new_sg
= kmalloc_array(n
, sizeof(*sg
), GFP_KERNEL
);
28 sg_init_table(new_sg
, n
);
33 while (*sg
&& total
) {
34 int len
= (*sg
)->length
;
41 sg_set_page(tmp
, sg_page(*sg
), len
, (*sg
)->offset
);
55 static int omap_crypto_copy_sgs(int total
, int bs
, struct scatterlist
**sg
,
56 struct scatterlist
*new_sg
, u16 flags
)
62 new_len
= ALIGN(total
, bs
);
63 pages
= get_order(new_len
);
65 buf
= (void *)__get_free_pages(GFP_ATOMIC
, pages
);
67 pr_err("%s: Couldn't allocate pages for unaligned cases.\n",
72 if (flags
& OMAP_CRYPTO_COPY_DATA
) {
73 scatterwalk_map_and_copy(buf
, *sg
, 0, total
, 0);
74 if (flags
& OMAP_CRYPTO_ZERO_BUF
)
75 memset(buf
+ total
, 0, new_len
- total
);
78 if (!(flags
& OMAP_CRYPTO_FORCE_SINGLE_ENTRY
))
79 sg_init_table(new_sg
, 1);
81 sg_set_buf(new_sg
, buf
, new_len
);
88 static int omap_crypto_check_sg(struct scatterlist
*sg
, int total
, int bs
,
94 if (!IS_ALIGNED(total
, bs
))
95 return OMAP_CRYPTO_NOT_ALIGNED
;
100 if (!IS_ALIGNED(sg
->offset
, 4))
101 return OMAP_CRYPTO_NOT_ALIGNED
;
102 if (!IS_ALIGNED(sg
->length
, bs
))
103 return OMAP_CRYPTO_NOT_ALIGNED
;
104 #ifdef CONFIG_ZONE_DMA
105 if (page_zonenum(sg_page(sg
)) != ZONE_DMA
)
106 return OMAP_CRYPTO_NOT_ALIGNED
;
116 if ((flags
& OMAP_CRYPTO_FORCE_SINGLE_ENTRY
) && num_sg
> 1)
117 return OMAP_CRYPTO_NOT_ALIGNED
;
120 return OMAP_CRYPTO_BAD_DATA_LENGTH
;
125 int omap_crypto_align_sg(struct scatterlist
**sg
, int total
, int bs
,
126 struct scatterlist
*new_sg
, u16 flags
,
127 u8 flags_shift
, unsigned long *dd_flags
)
131 *dd_flags
&= ~(OMAP_CRYPTO_COPY_MASK
<< flags_shift
);
133 if (flags
& OMAP_CRYPTO_FORCE_COPY
)
134 ret
= OMAP_CRYPTO_NOT_ALIGNED
;
136 ret
= omap_crypto_check_sg(*sg
, total
, bs
, flags
);
138 if (ret
== OMAP_CRYPTO_NOT_ALIGNED
) {
139 ret
= omap_crypto_copy_sgs(total
, bs
, sg
, new_sg
, flags
);
142 *dd_flags
|= OMAP_CRYPTO_DATA_COPIED
<< flags_shift
;
143 } else if (ret
== OMAP_CRYPTO_BAD_DATA_LENGTH
) {
144 ret
= omap_crypto_copy_sg_lists(total
, bs
, sg
, new_sg
, flags
);
147 if (!(flags
& OMAP_CRYPTO_FORCE_SINGLE_ENTRY
))
148 *dd_flags
|= OMAP_CRYPTO_SG_COPIED
<< flags_shift
;
149 } else if (flags
& OMAP_CRYPTO_FORCE_SINGLE_ENTRY
) {
150 sg_set_buf(new_sg
, sg_virt(*sg
), (*sg
)->length
);
155 EXPORT_SYMBOL_GPL(omap_crypto_align_sg
);
157 static void omap_crypto_copy_data(struct scatterlist
*src
,
158 struct scatterlist
*dst
,
163 int srco
= 0, dsto
= offset
;
165 while (src
&& dst
&& len
) {
166 if (srco
>= src
->length
) {
172 if (dsto
>= dst
->length
) {
178 amt
= min(src
->length
- srco
, dst
->length
- dsto
);
181 srcb
= sg_virt(src
) + srco
;
182 dstb
= sg_virt(dst
) + dsto
;
184 memcpy(dstb
, srcb
, amt
);
192 void omap_crypto_cleanup(struct scatterlist
*sg
, struct scatterlist
*orig
,
193 int offset
, int len
, u8 flags_shift
,
199 flags
>>= flags_shift
;
200 flags
&= OMAP_CRYPTO_COPY_MASK
;
206 pages
= get_order(len
);
208 if (orig
&& (flags
& OMAP_CRYPTO_COPY_MASK
))
209 omap_crypto_copy_data(sg
, orig
, offset
, len
);
211 if (flags
& OMAP_CRYPTO_DATA_COPIED
)
212 free_pages((unsigned long)buf
, pages
);
213 else if (flags
& OMAP_CRYPTO_SG_COPIED
)
216 EXPORT_SYMBOL_GPL(omap_crypto_cleanup
);
218 MODULE_DESCRIPTION("OMAP crypto support library.");
219 MODULE_LICENSE("GPL v2");
220 MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");