drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / crypto / scompress.c
blob1cef6bb06a811e0ca1294ee47fe6a0d82f2f9e0e
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synchronous Compression operations
5 * Copyright 2015 LG Electronics Inc.
6 * Copyright (c) 2016, Intel Corporation
7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/vmalloc.h>
22 #include <net/netlink.h>
24 #include "compress.h"
26 struct scomp_scratch {
27 spinlock_t lock;
28 void *src;
29 void *dst;
32 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
33 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
36 static const struct crypto_type crypto_scomp_type;
37 static int scomp_scratch_users;
38 static DEFINE_MUTEX(scomp_lock);
40 static int __maybe_unused crypto_scomp_report(
41 struct sk_buff *skb, struct crypto_alg *alg)
43 struct crypto_report_comp rscomp;
45 memset(&rscomp, 0, sizeof(rscomp));
47 strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
49 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
50 sizeof(rscomp), &rscomp);
53 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
54 __maybe_unused;
56 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
58 seq_puts(m, "type : scomp\n");
61 static void crypto_scomp_free_scratches(void)
63 struct scomp_scratch *scratch;
64 int i;
66 for_each_possible_cpu(i) {
67 scratch = per_cpu_ptr(&scomp_scratch, i);
69 vfree(scratch->src);
70 vfree(scratch->dst);
71 scratch->src = NULL;
72 scratch->dst = NULL;
76 static int crypto_scomp_alloc_scratches(void)
78 struct scomp_scratch *scratch;
79 int i;
81 for_each_possible_cpu(i) {
82 void *mem;
84 scratch = per_cpu_ptr(&scomp_scratch, i);
86 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
87 if (!mem)
88 goto error;
89 scratch->src = mem;
90 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 if (!mem)
92 goto error;
93 scratch->dst = mem;
95 return 0;
96 error:
97 crypto_scomp_free_scratches();
98 return -ENOMEM;
101 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
103 int ret = 0;
105 mutex_lock(&scomp_lock);
106 if (!scomp_scratch_users++)
107 ret = crypto_scomp_alloc_scratches();
108 mutex_unlock(&scomp_lock);
110 return ret;
113 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
115 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116 void **tfm_ctx = acomp_tfm_ctx(tfm);
117 struct crypto_scomp *scomp = *tfm_ctx;
118 void **ctx = acomp_request_ctx(req);
119 struct scomp_scratch *scratch;
120 void *src, *dst;
121 unsigned int dlen;
122 int ret;
124 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
125 return -EINVAL;
127 if (req->dst && !req->dlen)
128 return -EINVAL;
130 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
131 req->dlen = SCOMP_SCRATCH_SIZE;
133 dlen = req->dlen;
135 scratch = raw_cpu_ptr(&scomp_scratch);
136 spin_lock(&scratch->lock);
138 if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
139 src = page_to_virt(sg_page(req->src)) + req->src->offset;
140 } else {
141 scatterwalk_map_and_copy(scratch->src, req->src, 0,
142 req->slen, 0);
143 src = scratch->src;
146 if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
147 dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
148 else
149 dst = scratch->dst;
151 if (dir)
152 ret = crypto_scomp_compress(scomp, src, req->slen,
153 dst, &req->dlen, *ctx);
154 else
155 ret = crypto_scomp_decompress(scomp, src, req->slen,
156 dst, &req->dlen, *ctx);
157 if (!ret) {
158 if (!req->dst) {
159 req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
160 if (!req->dst) {
161 ret = -ENOMEM;
162 goto out;
164 } else if (req->dlen > dlen) {
165 ret = -ENOSPC;
166 goto out;
168 if (dst == scratch->dst) {
169 scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
170 req->dlen, 1);
171 } else {
172 int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
173 int i;
174 struct page *dst_page = sg_page(req->dst);
176 for (i = 0; i < nr_pages; i++)
177 flush_dcache_page(dst_page + i);
180 out:
181 spin_unlock(&scratch->lock);
182 return ret;
185 static int scomp_acomp_compress(struct acomp_req *req)
187 return scomp_acomp_comp_decomp(req, 1);
190 static int scomp_acomp_decompress(struct acomp_req *req)
192 return scomp_acomp_comp_decomp(req, 0);
195 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
197 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
199 crypto_free_scomp(*ctx);
201 mutex_lock(&scomp_lock);
202 if (!--scomp_scratch_users)
203 crypto_scomp_free_scratches();
204 mutex_unlock(&scomp_lock);
207 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
209 struct crypto_alg *calg = tfm->__crt_alg;
210 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
211 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
212 struct crypto_scomp *scomp;
214 if (!crypto_mod_get(calg))
215 return -EAGAIN;
217 scomp = crypto_create_tfm(calg, &crypto_scomp_type);
218 if (IS_ERR(scomp)) {
219 crypto_mod_put(calg);
220 return PTR_ERR(scomp);
223 *ctx = scomp;
224 tfm->exit = crypto_exit_scomp_ops_async;
226 crt->compress = scomp_acomp_compress;
227 crt->decompress = scomp_acomp_decompress;
228 crt->dst_free = sgl_free;
229 crt->reqsize = sizeof(void *);
231 return 0;
234 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
236 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
237 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
238 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
239 struct crypto_scomp *scomp = *tfm_ctx;
240 void *ctx;
242 ctx = crypto_scomp_alloc_ctx(scomp);
243 if (IS_ERR(ctx)) {
244 kfree(req);
245 return NULL;
248 *req->__ctx = ctx;
250 return req;
253 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
255 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
256 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
257 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
258 struct crypto_scomp *scomp = *tfm_ctx;
259 void *ctx = *req->__ctx;
261 if (ctx)
262 crypto_scomp_free_ctx(scomp, ctx);
265 static const struct crypto_type crypto_scomp_type = {
266 .extsize = crypto_alg_extsize,
267 .init_tfm = crypto_scomp_init_tfm,
268 #ifdef CONFIG_PROC_FS
269 .show = crypto_scomp_show,
270 #endif
271 #if IS_ENABLED(CONFIG_CRYPTO_USER)
272 .report = crypto_scomp_report,
273 #endif
274 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
275 .maskset = CRYPTO_ALG_TYPE_MASK,
276 .type = CRYPTO_ALG_TYPE_SCOMPRESS,
277 .tfmsize = offsetof(struct crypto_scomp, base),
280 int crypto_register_scomp(struct scomp_alg *alg)
282 struct crypto_alg *base = &alg->calg.base;
284 comp_prepare_alg(&alg->calg);
286 base->cra_type = &crypto_scomp_type;
287 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
289 return crypto_register_alg(base);
291 EXPORT_SYMBOL_GPL(crypto_register_scomp);
293 void crypto_unregister_scomp(struct scomp_alg *alg)
295 crypto_unregister_alg(&alg->base);
297 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
299 int crypto_register_scomps(struct scomp_alg *algs, int count)
301 int i, ret;
303 for (i = 0; i < count; i++) {
304 ret = crypto_register_scomp(&algs[i]);
305 if (ret)
306 goto err;
309 return 0;
311 err:
312 for (--i; i >= 0; --i)
313 crypto_unregister_scomp(&algs[i]);
315 return ret;
317 EXPORT_SYMBOL_GPL(crypto_register_scomps);
319 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
321 int i;
323 for (i = count - 1; i >= 0; --i)
324 crypto_unregister_scomp(&algs[i]);
326 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
328 MODULE_LICENSE("GPL");
329 MODULE_DESCRIPTION("Synchronous compression type");