]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - crypto/scompress.c
nouveau/svm: Split assignment from if conditional
[thirdparty/kernel/linux.git] / crypto / scompress.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synchronous Compression operations
4 *
5 * Copyright 2015 LG Electronics Inc.
6 * Copyright (c) 2016, Intel Corporation
7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9
10 #include <crypto/internal/acompress.h>
11 #include <crypto/internal/scompress.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/cryptouser.h>
14 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/scatterlist.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/vmalloc.h>
22 #include <net/netlink.h>
23
24 #include "compress.h"
25
26 struct scomp_scratch {
27 spinlock_t lock;
28 void *src;
29 void *dst;
30 };
31
32 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
33 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
34 };
35
36 static const struct crypto_type crypto_scomp_type;
37 static int scomp_scratch_users;
38 static DEFINE_MUTEX(scomp_lock);
39
40 static int __maybe_unused crypto_scomp_report(
41 struct sk_buff *skb, struct crypto_alg *alg)
42 {
43 struct crypto_report_comp rscomp;
44
45 memset(&rscomp, 0, sizeof(rscomp));
46
47 strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
48
49 return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
50 sizeof(rscomp), &rscomp);
51 }
52
53 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
54 __maybe_unused;
55
56 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
57 {
58 seq_puts(m, "type : scomp\n");
59 }
60
61 static void crypto_scomp_free_scratches(void)
62 {
63 struct scomp_scratch *scratch;
64 int i;
65
66 for_each_possible_cpu(i) {
67 scratch = per_cpu_ptr(&scomp_scratch, i);
68
69 vfree(scratch->src);
70 vfree(scratch->dst);
71 scratch->src = NULL;
72 scratch->dst = NULL;
73 }
74 }
75
76 static int crypto_scomp_alloc_scratches(void)
77 {
78 struct scomp_scratch *scratch;
79 int i;
80
81 for_each_possible_cpu(i) {
82 void *mem;
83
84 scratch = per_cpu_ptr(&scomp_scratch, i);
85
86 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
87 if (!mem)
88 goto error;
89 scratch->src = mem;
90 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 if (!mem)
92 goto error;
93 scratch->dst = mem;
94 }
95 return 0;
96 error:
97 crypto_scomp_free_scratches();
98 return -ENOMEM;
99 }
100
101 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102 {
103 int ret = 0;
104
105 mutex_lock(&scomp_lock);
106 if (!scomp_scratch_users++)
107 ret = crypto_scomp_alloc_scratches();
108 mutex_unlock(&scomp_lock);
109
110 return ret;
111 }
112
113 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114 {
115 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116 void **tfm_ctx = acomp_tfm_ctx(tfm);
117 struct crypto_scomp *scomp = *tfm_ctx;
118 void **ctx = acomp_request_ctx(req);
119 struct scomp_scratch *scratch;
120 int ret;
121
122 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
123 return -EINVAL;
124
125 if (req->dst && !req->dlen)
126 return -EINVAL;
127
128 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
129 req->dlen = SCOMP_SCRATCH_SIZE;
130
131 scratch = raw_cpu_ptr(&scomp_scratch);
132 spin_lock(&scratch->lock);
133
134 scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
135 if (dir)
136 ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
137 scratch->dst, &req->dlen, *ctx);
138 else
139 ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
140 scratch->dst, &req->dlen, *ctx);
141 if (!ret) {
142 if (!req->dst) {
143 req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
144 if (!req->dst) {
145 ret = -ENOMEM;
146 goto out;
147 }
148 }
149 scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
150 1);
151 }
152 out:
153 spin_unlock(&scratch->lock);
154 return ret;
155 }
156
157 static int scomp_acomp_compress(struct acomp_req *req)
158 {
159 return scomp_acomp_comp_decomp(req, 1);
160 }
161
162 static int scomp_acomp_decompress(struct acomp_req *req)
163 {
164 return scomp_acomp_comp_decomp(req, 0);
165 }
166
167 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
168 {
169 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
170
171 crypto_free_scomp(*ctx);
172
173 mutex_lock(&scomp_lock);
174 if (!--scomp_scratch_users)
175 crypto_scomp_free_scratches();
176 mutex_unlock(&scomp_lock);
177 }
178
179 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
180 {
181 struct crypto_alg *calg = tfm->__crt_alg;
182 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
183 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
184 struct crypto_scomp *scomp;
185
186 if (!crypto_mod_get(calg))
187 return -EAGAIN;
188
189 scomp = crypto_create_tfm(calg, &crypto_scomp_type);
190 if (IS_ERR(scomp)) {
191 crypto_mod_put(calg);
192 return PTR_ERR(scomp);
193 }
194
195 *ctx = scomp;
196 tfm->exit = crypto_exit_scomp_ops_async;
197
198 crt->compress = scomp_acomp_compress;
199 crt->decompress = scomp_acomp_decompress;
200 crt->dst_free = sgl_free;
201 crt->reqsize = sizeof(void *);
202
203 return 0;
204 }
205
206 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
207 {
208 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
209 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
210 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
211 struct crypto_scomp *scomp = *tfm_ctx;
212 void *ctx;
213
214 ctx = crypto_scomp_alloc_ctx(scomp);
215 if (IS_ERR(ctx)) {
216 kfree(req);
217 return NULL;
218 }
219
220 *req->__ctx = ctx;
221
222 return req;
223 }
224
225 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
226 {
227 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
228 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
229 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
230 struct crypto_scomp *scomp = *tfm_ctx;
231 void *ctx = *req->__ctx;
232
233 if (ctx)
234 crypto_scomp_free_ctx(scomp, ctx);
235 }
236
237 static const struct crypto_type crypto_scomp_type = {
238 .extsize = crypto_alg_extsize,
239 .init_tfm = crypto_scomp_init_tfm,
240 #ifdef CONFIG_PROC_FS
241 .show = crypto_scomp_show,
242 #endif
243 #if IS_ENABLED(CONFIG_CRYPTO_USER)
244 .report = crypto_scomp_report,
245 #endif
246 #ifdef CONFIG_CRYPTO_STATS
247 .report_stat = crypto_acomp_report_stat,
248 #endif
249 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
250 .maskset = CRYPTO_ALG_TYPE_MASK,
251 .type = CRYPTO_ALG_TYPE_SCOMPRESS,
252 .tfmsize = offsetof(struct crypto_scomp, base),
253 };
254
255 int crypto_register_scomp(struct scomp_alg *alg)
256 {
257 struct crypto_alg *base = &alg->calg.base;
258
259 comp_prepare_alg(&alg->calg);
260
261 base->cra_type = &crypto_scomp_type;
262 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
263
264 return crypto_register_alg(base);
265 }
266 EXPORT_SYMBOL_GPL(crypto_register_scomp);
267
268 void crypto_unregister_scomp(struct scomp_alg *alg)
269 {
270 crypto_unregister_alg(&alg->base);
271 }
272 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
273
274 int crypto_register_scomps(struct scomp_alg *algs, int count)
275 {
276 int i, ret;
277
278 for (i = 0; i < count; i++) {
279 ret = crypto_register_scomp(&algs[i]);
280 if (ret)
281 goto err;
282 }
283
284 return 0;
285
286 err:
287 for (--i; i >= 0; --i)
288 crypto_unregister_scomp(&algs[i]);
289
290 return ret;
291 }
292 EXPORT_SYMBOL_GPL(crypto_register_scomps);
293
294 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
295 {
296 int i;
297
298 for (i = count - 1; i >= 0; --i)
299 crypto_unregister_scomp(&algs[i]);
300 }
301 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
302
303 MODULE_LICENSE("GPL");
304 MODULE_DESCRIPTION("Synchronous compression type");