1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Synchronous Compression operations
5 * Copyright 2015 LG Electronics Inc.
6 * Copyright (c) 2016, Intel Corporation
7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/vmalloc.h>
22 #include <net/netlink.h>
26 struct scomp_scratch
{
32 static DEFINE_PER_CPU(struct scomp_scratch
, scomp_scratch
) = {
33 .lock
= __SPIN_LOCK_UNLOCKED(scomp_scratch
.lock
),
36 static const struct crypto_type crypto_scomp_type
;
37 static int scomp_scratch_users
;
38 static DEFINE_MUTEX(scomp_lock
);
40 static int __maybe_unused
crypto_scomp_report(
41 struct sk_buff
*skb
, struct crypto_alg
*alg
)
43 struct crypto_report_comp rscomp
;
45 memset(&rscomp
, 0, sizeof(rscomp
));
47 strscpy(rscomp
.type
, "scomp", sizeof(rscomp
.type
));
49 return nla_put(skb
, CRYPTOCFGA_REPORT_COMPRESS
,
50 sizeof(rscomp
), &rscomp
);
53 static void crypto_scomp_show(struct seq_file
*m
, struct crypto_alg
*alg
)
56 static void crypto_scomp_show(struct seq_file
*m
, struct crypto_alg
*alg
)
58 seq_puts(m
, "type : scomp\n");
61 static void crypto_scomp_free_scratches(void)
63 struct scomp_scratch
*scratch
;
66 for_each_possible_cpu(i
) {
67 scratch
= per_cpu_ptr(&scomp_scratch
, i
);
76 static int crypto_scomp_alloc_scratches(void)
78 struct scomp_scratch
*scratch
;
81 for_each_possible_cpu(i
) {
84 scratch
= per_cpu_ptr(&scomp_scratch
, i
);
86 mem
= vmalloc_node(SCOMP_SCRATCH_SIZE
, cpu_to_node(i
));
90 mem
= vmalloc_node(SCOMP_SCRATCH_SIZE
, cpu_to_node(i
));
97 crypto_scomp_free_scratches();
101 static int crypto_scomp_init_tfm(struct crypto_tfm
*tfm
)
105 mutex_lock(&scomp_lock
);
106 if (!scomp_scratch_users
++)
107 ret
= crypto_scomp_alloc_scratches();
108 mutex_unlock(&scomp_lock
);
113 static int scomp_acomp_comp_decomp(struct acomp_req
*req
, int dir
)
115 struct crypto_acomp
*tfm
= crypto_acomp_reqtfm(req
);
116 void **tfm_ctx
= acomp_tfm_ctx(tfm
);
117 struct crypto_scomp
*scomp
= *tfm_ctx
;
118 void **ctx
= acomp_request_ctx(req
);
119 struct scomp_scratch
*scratch
;
124 if (!req
->src
|| !req
->slen
|| req
->slen
> SCOMP_SCRATCH_SIZE
)
127 if (req
->dst
&& !req
->dlen
)
130 if (!req
->dlen
|| req
->dlen
> SCOMP_SCRATCH_SIZE
)
131 req
->dlen
= SCOMP_SCRATCH_SIZE
;
135 scratch
= raw_cpu_ptr(&scomp_scratch
);
136 spin_lock(&scratch
->lock
);
138 if (sg_nents(req
->src
) == 1 && !PageHighMem(sg_page(req
->src
))) {
139 src
= page_to_virt(sg_page(req
->src
)) + req
->src
->offset
;
141 scatterwalk_map_and_copy(scratch
->src
, req
->src
, 0,
146 if (req
->dst
&& sg_nents(req
->dst
) == 1 && !PageHighMem(sg_page(req
->dst
)))
147 dst
= page_to_virt(sg_page(req
->dst
)) + req
->dst
->offset
;
152 ret
= crypto_scomp_compress(scomp
, src
, req
->slen
,
153 dst
, &req
->dlen
, *ctx
);
155 ret
= crypto_scomp_decompress(scomp
, src
, req
->slen
,
156 dst
, &req
->dlen
, *ctx
);
159 req
->dst
= sgl_alloc(req
->dlen
, GFP_ATOMIC
, NULL
);
164 } else if (req
->dlen
> dlen
) {
168 if (dst
== scratch
->dst
) {
169 scatterwalk_map_and_copy(scratch
->dst
, req
->dst
, 0,
172 int nr_pages
= DIV_ROUND_UP(req
->dst
->offset
+ req
->dlen
, PAGE_SIZE
);
174 struct page
*dst_page
= sg_page(req
->dst
);
176 for (i
= 0; i
< nr_pages
; i
++)
177 flush_dcache_page(dst_page
+ i
);
181 spin_unlock(&scratch
->lock
);
185 static int scomp_acomp_compress(struct acomp_req
*req
)
187 return scomp_acomp_comp_decomp(req
, 1);
190 static int scomp_acomp_decompress(struct acomp_req
*req
)
192 return scomp_acomp_comp_decomp(req
, 0);
195 static void crypto_exit_scomp_ops_async(struct crypto_tfm
*tfm
)
197 struct crypto_scomp
**ctx
= crypto_tfm_ctx(tfm
);
199 crypto_free_scomp(*ctx
);
201 mutex_lock(&scomp_lock
);
202 if (!--scomp_scratch_users
)
203 crypto_scomp_free_scratches();
204 mutex_unlock(&scomp_lock
);
207 int crypto_init_scomp_ops_async(struct crypto_tfm
*tfm
)
209 struct crypto_alg
*calg
= tfm
->__crt_alg
;
210 struct crypto_acomp
*crt
= __crypto_acomp_tfm(tfm
);
211 struct crypto_scomp
**ctx
= crypto_tfm_ctx(tfm
);
212 struct crypto_scomp
*scomp
;
214 if (!crypto_mod_get(calg
))
217 scomp
= crypto_create_tfm(calg
, &crypto_scomp_type
);
219 crypto_mod_put(calg
);
220 return PTR_ERR(scomp
);
224 tfm
->exit
= crypto_exit_scomp_ops_async
;
226 crt
->compress
= scomp_acomp_compress
;
227 crt
->decompress
= scomp_acomp_decompress
;
228 crt
->dst_free
= sgl_free
;
229 crt
->reqsize
= sizeof(void *);
234 struct acomp_req
*crypto_acomp_scomp_alloc_ctx(struct acomp_req
*req
)
236 struct crypto_acomp
*acomp
= crypto_acomp_reqtfm(req
);
237 struct crypto_tfm
*tfm
= crypto_acomp_tfm(acomp
);
238 struct crypto_scomp
**tfm_ctx
= crypto_tfm_ctx(tfm
);
239 struct crypto_scomp
*scomp
= *tfm_ctx
;
242 ctx
= crypto_scomp_alloc_ctx(scomp
);
253 void crypto_acomp_scomp_free_ctx(struct acomp_req
*req
)
255 struct crypto_acomp
*acomp
= crypto_acomp_reqtfm(req
);
256 struct crypto_tfm
*tfm
= crypto_acomp_tfm(acomp
);
257 struct crypto_scomp
**tfm_ctx
= crypto_tfm_ctx(tfm
);
258 struct crypto_scomp
*scomp
= *tfm_ctx
;
259 void *ctx
= *req
->__ctx
;
262 crypto_scomp_free_ctx(scomp
, ctx
);
265 static const struct crypto_type crypto_scomp_type
= {
266 .extsize
= crypto_alg_extsize
,
267 .init_tfm
= crypto_scomp_init_tfm
,
268 #ifdef CONFIG_PROC_FS
269 .show
= crypto_scomp_show
,
271 #if IS_ENABLED(CONFIG_CRYPTO_USER)
272 .report
= crypto_scomp_report
,
274 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
275 .maskset
= CRYPTO_ALG_TYPE_MASK
,
276 .type
= CRYPTO_ALG_TYPE_SCOMPRESS
,
277 .tfmsize
= offsetof(struct crypto_scomp
, base
),
280 int crypto_register_scomp(struct scomp_alg
*alg
)
282 struct crypto_alg
*base
= &alg
->calg
.base
;
284 comp_prepare_alg(&alg
->calg
);
286 base
->cra_type
= &crypto_scomp_type
;
287 base
->cra_flags
|= CRYPTO_ALG_TYPE_SCOMPRESS
;
289 return crypto_register_alg(base
);
291 EXPORT_SYMBOL_GPL(crypto_register_scomp
);
293 void crypto_unregister_scomp(struct scomp_alg
*alg
)
295 crypto_unregister_alg(&alg
->base
);
297 EXPORT_SYMBOL_GPL(crypto_unregister_scomp
);
299 int crypto_register_scomps(struct scomp_alg
*algs
, int count
)
303 for (i
= 0; i
< count
; i
++) {
304 ret
= crypto_register_scomp(&algs
[i
]);
312 for (--i
; i
>= 0; --i
)
313 crypto_unregister_scomp(&algs
[i
]);
317 EXPORT_SYMBOL_GPL(crypto_register_scomps
);
319 void crypto_unregister_scomps(struct scomp_alg
*algs
, int count
)
323 for (i
= count
- 1; i
>= 0; --i
)
324 crypto_unregister_scomp(&algs
[i
]);
326 EXPORT_SYMBOL_GPL(crypto_unregister_scomps
);
328 MODULE_LICENSE("GPL");
329 MODULE_DESCRIPTION("Synchronous compression type");