1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
4 *
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */
7
8 #include <asm/neon.h>
9 #include <asm/hwcap.h>
10 #include <asm/simd.h>
11 #include <crypto/aes.h>
12 #include <crypto/ctr.h>
13 #include <crypto/sha2.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/internal/simd.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/module.h>
19 #include <linux/cpufeature.h>
20 #include <crypto/xts.h>
21
22 #include "aes-ce-setkey.h"
23
24 #ifdef USE_V8_CRYPTO_EXTENSIONS
25 #define MODE "ce"
26 #define PRIO 300
27 #define STRIDE 5
28 #define aes_expandkey ce_aes_expandkey
29 #define aes_ecb_encrypt ce_aes_ecb_encrypt
30 #define aes_ecb_decrypt ce_aes_ecb_decrypt
31 #define aes_cbc_encrypt ce_aes_cbc_encrypt
32 #define aes_cbc_decrypt ce_aes_cbc_decrypt
33 #define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
34 #define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
35 #define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt
36 #define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt
37 #define aes_ctr_encrypt ce_aes_ctr_encrypt
38 #define aes_xts_encrypt ce_aes_xts_encrypt
39 #define aes_xts_decrypt ce_aes_xts_decrypt
40 #define aes_mac_update ce_aes_mac_update
41 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
42 #else
43 #define MODE "neon"
44 #define PRIO 200
45 #define STRIDE 4
46 #define aes_ecb_encrypt neon_aes_ecb_encrypt
47 #define aes_ecb_decrypt neon_aes_ecb_decrypt
48 #define aes_cbc_encrypt neon_aes_cbc_encrypt
49 #define aes_cbc_decrypt neon_aes_cbc_decrypt
50 #define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
51 #define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
52 #define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt
53 #define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt
54 #define aes_ctr_encrypt neon_aes_ctr_encrypt
55 #define aes_xts_encrypt neon_aes_xts_encrypt
56 #define aes_xts_decrypt neon_aes_xts_decrypt
57 #define aes_mac_update neon_aes_mac_update
58 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
59 #endif
60 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
61 MODULE_ALIAS_CRYPTO("ecb(aes)");
62 MODULE_ALIAS_CRYPTO("cbc(aes)");
63 MODULE_ALIAS_CRYPTO("ctr(aes)");
64 MODULE_ALIAS_CRYPTO("xts(aes)");
65 #endif
66 MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
67 MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)");
68 MODULE_ALIAS_CRYPTO("cmac(aes)");
69 MODULE_ALIAS_CRYPTO("xcbc(aes)");
70 MODULE_ALIAS_CRYPTO("cbcmac(aes)");
71
72 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
73 MODULE_LICENSE("GPL v2");
74
75 /* defined in aes-modes.S */
76 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
77 int rounds, int blocks);
78 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
79 int rounds, int blocks);
80
81 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
82 int rounds, int blocks, u8 iv[]);
83 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
84 int rounds, int blocks, u8 iv[]);
85
86 asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
87 int rounds, int bytes, u8 const iv[]);
88 asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
89 int rounds, int bytes, u8 const iv[]);
90
91 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
92 int rounds, int bytes, u8 ctr[], u8 finalbuf[]);
93
94 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
95 int rounds, int bytes, u32 const rk2[], u8 iv[],
96 int first);
97 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
98 int rounds, int bytes, u32 const rk2[], u8 iv[],
99 int first);
100
101 asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
102 int rounds, int blocks, u8 iv[],
103 u32 const rk2[]);
104 asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
105 int rounds, int blocks, u8 iv[],
106 u32 const rk2[]);
107
108 asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
109 int blocks, u8 dg[], int enc_before,
110 int enc_after);
111
112 struct crypto_aes_xts_ctx {
113 struct crypto_aes_ctx key1;
114 struct crypto_aes_ctx __aligned(8) key2;
115 };
116
117 struct crypto_aes_essiv_cbc_ctx {
118 struct crypto_aes_ctx key1;
119 struct crypto_aes_ctx __aligned(8) key2;
120 struct crypto_shash *hash;
121 };
122
123 struct mac_tfm_ctx {
124 struct crypto_aes_ctx key;
125 u8 __aligned(8) consts[];
126 };
127
128 struct mac_desc_ctx {
129 unsigned int len;
130 u8 dg[AES_BLOCK_SIZE];
131 };
132
skcipher_aes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)133 static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
134 unsigned int key_len)
135 {
136 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
137
138 return aes_expandkey(ctx, in_key, key_len);
139 }
140
xts_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)141 static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
142 const u8 *in_key, unsigned int key_len)
143 {
144 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
145 int ret;
146
147 ret = xts_verify_key(tfm, in_key, key_len);
148 if (ret)
149 return ret;
150
151 ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
152 if (!ret)
153 ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
154 key_len / 2);
155 return ret;
156 }
157
essiv_cbc_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)158 static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
159 const u8 *in_key,
160 unsigned int key_len)
161 {
162 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
163 u8 digest[SHA256_DIGEST_SIZE];
164 int ret;
165
166 ret = aes_expandkey(&ctx->key1, in_key, key_len);
167 if (ret)
168 return ret;
169
170 crypto_shash_tfm_digest(ctx->hash, in_key, key_len, digest);
171
172 return aes_expandkey(&ctx->key2, digest, sizeof(digest));
173 }
174
ecb_encrypt(struct skcipher_request * req)175 static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
176 {
177 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
178 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
179 int err, rounds = 6 + ctx->key_length / 4;
180 struct skcipher_walk walk;
181 unsigned int blocks;
182
183 err = skcipher_walk_virt(&walk, req, false);
184
185 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
186 kernel_neon_begin();
187 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
188 ctx->key_enc, rounds, blocks);
189 kernel_neon_end();
190 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
191 }
192 return err;
193 }
194
ecb_decrypt(struct skcipher_request * req)195 static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
196 {
197 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
198 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
199 int err, rounds = 6 + ctx->key_length / 4;
200 struct skcipher_walk walk;
201 unsigned int blocks;
202
203 err = skcipher_walk_virt(&walk, req, false);
204
205 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
206 kernel_neon_begin();
207 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
208 ctx->key_dec, rounds, blocks);
209 kernel_neon_end();
210 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
211 }
212 return err;
213 }
214
cbc_encrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)215 static int cbc_encrypt_walk(struct skcipher_request *req,
216 struct skcipher_walk *walk)
217 {
218 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
219 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
220 int err = 0, rounds = 6 + ctx->key_length / 4;
221 unsigned int blocks;
222
223 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
224 kernel_neon_begin();
225 aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
226 ctx->key_enc, rounds, blocks, walk->iv);
227 kernel_neon_end();
228 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
229 }
230 return err;
231 }
232
cbc_encrypt(struct skcipher_request * req)233 static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
234 {
235 struct skcipher_walk walk;
236 int err;
237
238 err = skcipher_walk_virt(&walk, req, false);
239 if (err)
240 return err;
241 return cbc_encrypt_walk(req, &walk);
242 }
243
cbc_decrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)244 static int cbc_decrypt_walk(struct skcipher_request *req,
245 struct skcipher_walk *walk)
246 {
247 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
248 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
249 int err = 0, rounds = 6 + ctx->key_length / 4;
250 unsigned int blocks;
251
252 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
253 kernel_neon_begin();
254 aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
255 ctx->key_dec, rounds, blocks, walk->iv);
256 kernel_neon_end();
257 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
258 }
259 return err;
260 }
261
cbc_decrypt(struct skcipher_request * req)262 static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
263 {
264 struct skcipher_walk walk;
265 int err;
266
267 err = skcipher_walk_virt(&walk, req, false);
268 if (err)
269 return err;
270 return cbc_decrypt_walk(req, &walk);
271 }
272
cts_cbc_encrypt(struct skcipher_request * req)273 static int cts_cbc_encrypt(struct skcipher_request *req)
274 {
275 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
276 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
277 int err, rounds = 6 + ctx->key_length / 4;
278 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
279 struct scatterlist *src = req->src, *dst = req->dst;
280 struct scatterlist sg_src[2], sg_dst[2];
281 struct skcipher_request subreq;
282 struct skcipher_walk walk;
283
284 skcipher_request_set_tfm(&subreq, tfm);
285 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
286 NULL, NULL);
287
288 if (req->cryptlen <= AES_BLOCK_SIZE) {
289 if (req->cryptlen < AES_BLOCK_SIZE)
290 return -EINVAL;
291 cbc_blocks = 1;
292 }
293
294 if (cbc_blocks > 0) {
295 skcipher_request_set_crypt(&subreq, req->src, req->dst,
296 cbc_blocks * AES_BLOCK_SIZE,
297 req->iv);
298
299 err = skcipher_walk_virt(&walk, &subreq, false) ?:
300 cbc_encrypt_walk(&subreq, &walk);
301 if (err)
302 return err;
303
304 if (req->cryptlen == AES_BLOCK_SIZE)
305 return 0;
306
307 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
308 if (req->dst != req->src)
309 dst = scatterwalk_ffwd(sg_dst, req->dst,
310 subreq.cryptlen);
311 }
312
313 /* handle ciphertext stealing */
314 skcipher_request_set_crypt(&subreq, src, dst,
315 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
316 req->iv);
317
318 err = skcipher_walk_virt(&walk, &subreq, false);
319 if (err)
320 return err;
321
322 kernel_neon_begin();
323 aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
324 ctx->key_enc, rounds, walk.nbytes, walk.iv);
325 kernel_neon_end();
326
327 return skcipher_walk_done(&walk, 0);
328 }
329
cts_cbc_decrypt(struct skcipher_request * req)330 static int cts_cbc_decrypt(struct skcipher_request *req)
331 {
332 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
333 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
334 int err, rounds = 6 + ctx->key_length / 4;
335 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
336 struct scatterlist *src = req->src, *dst = req->dst;
337 struct scatterlist sg_src[2], sg_dst[2];
338 struct skcipher_request subreq;
339 struct skcipher_walk walk;
340
341 skcipher_request_set_tfm(&subreq, tfm);
342 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
343 NULL, NULL);
344
345 if (req->cryptlen <= AES_BLOCK_SIZE) {
346 if (req->cryptlen < AES_BLOCK_SIZE)
347 return -EINVAL;
348 cbc_blocks = 1;
349 }
350
351 if (cbc_blocks > 0) {
352 skcipher_request_set_crypt(&subreq, req->src, req->dst,
353 cbc_blocks * AES_BLOCK_SIZE,
354 req->iv);
355
356 err = skcipher_walk_virt(&walk, &subreq, false) ?:
357 cbc_decrypt_walk(&subreq, &walk);
358 if (err)
359 return err;
360
361 if (req->cryptlen == AES_BLOCK_SIZE)
362 return 0;
363
364 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
365 if (req->dst != req->src)
366 dst = scatterwalk_ffwd(sg_dst, req->dst,
367 subreq.cryptlen);
368 }
369
370 /* handle ciphertext stealing */
371 skcipher_request_set_crypt(&subreq, src, dst,
372 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
373 req->iv);
374
375 err = skcipher_walk_virt(&walk, &subreq, false);
376 if (err)
377 return err;
378
379 kernel_neon_begin();
380 aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
381 ctx->key_dec, rounds, walk.nbytes, walk.iv);
382 kernel_neon_end();
383
384 return skcipher_walk_done(&walk, 0);
385 }
386
essiv_cbc_init_tfm(struct crypto_skcipher * tfm)387 static int __maybe_unused essiv_cbc_init_tfm(struct crypto_skcipher *tfm)
388 {
389 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
390
391 ctx->hash = crypto_alloc_shash("sha256", 0, 0);
392
393 return PTR_ERR_OR_ZERO(ctx->hash);
394 }
395
essiv_cbc_exit_tfm(struct crypto_skcipher * tfm)396 static void __maybe_unused essiv_cbc_exit_tfm(struct crypto_skcipher *tfm)
397 {
398 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
399
400 crypto_free_shash(ctx->hash);
401 }
402
essiv_cbc_encrypt(struct skcipher_request * req)403 static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
404 {
405 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
406 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
407 int err, rounds = 6 + ctx->key1.key_length / 4;
408 struct skcipher_walk walk;
409 unsigned int blocks;
410
411 err = skcipher_walk_virt(&walk, req, false);
412
413 blocks = walk.nbytes / AES_BLOCK_SIZE;
414 if (blocks) {
415 kernel_neon_begin();
416 aes_essiv_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
417 ctx->key1.key_enc, rounds, blocks,
418 req->iv, ctx->key2.key_enc);
419 kernel_neon_end();
420 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
421 }
422 return err ?: cbc_encrypt_walk(req, &walk);
423 }
424
essiv_cbc_decrypt(struct skcipher_request * req)425 static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
426 {
427 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
428 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
429 int err, rounds = 6 + ctx->key1.key_length / 4;
430 struct skcipher_walk walk;
431 unsigned int blocks;
432
433 err = skcipher_walk_virt(&walk, req, false);
434
435 blocks = walk.nbytes / AES_BLOCK_SIZE;
436 if (blocks) {
437 kernel_neon_begin();
438 aes_essiv_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
439 ctx->key1.key_dec, rounds, blocks,
440 req->iv, ctx->key2.key_enc);
441 kernel_neon_end();
442 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
443 }
444 return err ?: cbc_decrypt_walk(req, &walk);
445 }
446
ctr_encrypt(struct skcipher_request * req)447 static int __maybe_unused ctr_encrypt(struct skcipher_request *req)
448 {
449 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
450 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
451 int err, rounds = 6 + ctx->key_length / 4;
452 struct skcipher_walk walk;
453
454 err = skcipher_walk_virt(&walk, req, false);
455
456 while (walk.nbytes > 0) {
457 const u8 *src = walk.src.virt.addr;
458 unsigned int nbytes = walk.nbytes;
459 u8 *dst = walk.dst.virt.addr;
460 u8 buf[AES_BLOCK_SIZE];
461 unsigned int tail;
462
463 if (unlikely(nbytes < AES_BLOCK_SIZE))
464 src = memcpy(buf, src, nbytes);
465 else if (nbytes < walk.total)
466 nbytes &= ~(AES_BLOCK_SIZE - 1);
467
468 kernel_neon_begin();
469 aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
470 walk.iv, buf);
471 kernel_neon_end();
472
473 tail = nbytes % (STRIDE * AES_BLOCK_SIZE);
474 if (tail > 0 && tail < AES_BLOCK_SIZE)
475 /*
476 * The final partial block could not be returned using
477 * an overlapping store, so it was passed via buf[]
478 * instead.
479 */
480 memcpy(dst + nbytes - tail, buf, tail);
481
482 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
483 }
484
485 return err;
486 }
487
xts_encrypt(struct skcipher_request * req)488 static int __maybe_unused xts_encrypt(struct skcipher_request *req)
489 {
490 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
491 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
492 int err, first, rounds = 6 + ctx->key1.key_length / 4;
493 int tail = req->cryptlen % AES_BLOCK_SIZE;
494 struct scatterlist sg_src[2], sg_dst[2];
495 struct skcipher_request subreq;
496 struct scatterlist *src, *dst;
497 struct skcipher_walk walk;
498
499 if (req->cryptlen < AES_BLOCK_SIZE)
500 return -EINVAL;
501
502 err = skcipher_walk_virt(&walk, req, false);
503
504 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
505 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
506 AES_BLOCK_SIZE) - 2;
507
508 skcipher_walk_abort(&walk);
509
510 skcipher_request_set_tfm(&subreq, tfm);
511 skcipher_request_set_callback(&subreq,
512 skcipher_request_flags(req),
513 NULL, NULL);
514 skcipher_request_set_crypt(&subreq, req->src, req->dst,
515 xts_blocks * AES_BLOCK_SIZE,
516 req->iv);
517 req = &subreq;
518 err = skcipher_walk_virt(&walk, req, false);
519 } else {
520 tail = 0;
521 }
522
523 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
524 int nbytes = walk.nbytes;
525
526 if (walk.nbytes < walk.total)
527 nbytes &= ~(AES_BLOCK_SIZE - 1);
528
529 kernel_neon_begin();
530 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
531 ctx->key1.key_enc, rounds, nbytes,
532 ctx->key2.key_enc, walk.iv, first);
533 kernel_neon_end();
534 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
535 }
536
537 if (err || likely(!tail))
538 return err;
539
540 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
541 if (req->dst != req->src)
542 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
543
544 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
545 req->iv);
546
547 err = skcipher_walk_virt(&walk, &subreq, false);
548 if (err)
549 return err;
550
551 kernel_neon_begin();
552 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
553 ctx->key1.key_enc, rounds, walk.nbytes,
554 ctx->key2.key_enc, walk.iv, first);
555 kernel_neon_end();
556
557 return skcipher_walk_done(&walk, 0);
558 }
559
xts_decrypt(struct skcipher_request * req)560 static int __maybe_unused xts_decrypt(struct skcipher_request *req)
561 {
562 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
563 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
564 int err, first, rounds = 6 + ctx->key1.key_length / 4;
565 int tail = req->cryptlen % AES_BLOCK_SIZE;
566 struct scatterlist sg_src[2], sg_dst[2];
567 struct skcipher_request subreq;
568 struct scatterlist *src, *dst;
569 struct skcipher_walk walk;
570
571 if (req->cryptlen < AES_BLOCK_SIZE)
572 return -EINVAL;
573
574 err = skcipher_walk_virt(&walk, req, false);
575
576 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
577 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
578 AES_BLOCK_SIZE) - 2;
579
580 skcipher_walk_abort(&walk);
581
582 skcipher_request_set_tfm(&subreq, tfm);
583 skcipher_request_set_callback(&subreq,
584 skcipher_request_flags(req),
585 NULL, NULL);
586 skcipher_request_set_crypt(&subreq, req->src, req->dst,
587 xts_blocks * AES_BLOCK_SIZE,
588 req->iv);
589 req = &subreq;
590 err = skcipher_walk_virt(&walk, req, false);
591 } else {
592 tail = 0;
593 }
594
595 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
596 int nbytes = walk.nbytes;
597
598 if (walk.nbytes < walk.total)
599 nbytes &= ~(AES_BLOCK_SIZE - 1);
600
601 kernel_neon_begin();
602 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
603 ctx->key1.key_dec, rounds, nbytes,
604 ctx->key2.key_enc, walk.iv, first);
605 kernel_neon_end();
606 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
607 }
608
609 if (err || likely(!tail))
610 return err;
611
612 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
613 if (req->dst != req->src)
614 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
615
616 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
617 req->iv);
618
619 err = skcipher_walk_virt(&walk, &subreq, false);
620 if (err)
621 return err;
622
623
624 kernel_neon_begin();
625 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
626 ctx->key1.key_dec, rounds, walk.nbytes,
627 ctx->key2.key_enc, walk.iv, first);
628 kernel_neon_end();
629
630 return skcipher_walk_done(&walk, 0);
631 }
632
633 static struct skcipher_alg aes_algs[] = { {
634 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
635 .base = {
636 .cra_name = "ecb(aes)",
637 .cra_driver_name = "ecb-aes-" MODE,
638 .cra_priority = PRIO,
639 .cra_blocksize = AES_BLOCK_SIZE,
640 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
641 .cra_module = THIS_MODULE,
642 },
643 .min_keysize = AES_MIN_KEY_SIZE,
644 .max_keysize = AES_MAX_KEY_SIZE,
645 .setkey = skcipher_aes_setkey,
646 .encrypt = ecb_encrypt,
647 .decrypt = ecb_decrypt,
648 }, {
649 .base = {
650 .cra_name = "cbc(aes)",
651 .cra_driver_name = "cbc-aes-" MODE,
652 .cra_priority = PRIO,
653 .cra_blocksize = AES_BLOCK_SIZE,
654 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
655 .cra_module = THIS_MODULE,
656 },
657 .min_keysize = AES_MIN_KEY_SIZE,
658 .max_keysize = AES_MAX_KEY_SIZE,
659 .ivsize = AES_BLOCK_SIZE,
660 .setkey = skcipher_aes_setkey,
661 .encrypt = cbc_encrypt,
662 .decrypt = cbc_decrypt,
663 }, {
664 .base = {
665 .cra_name = "ctr(aes)",
666 .cra_driver_name = "ctr-aes-" MODE,
667 .cra_priority = PRIO,
668 .cra_blocksize = 1,
669 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
670 .cra_module = THIS_MODULE,
671 },
672 .min_keysize = AES_MIN_KEY_SIZE,
673 .max_keysize = AES_MAX_KEY_SIZE,
674 .ivsize = AES_BLOCK_SIZE,
675 .chunksize = AES_BLOCK_SIZE,
676 .setkey = skcipher_aes_setkey,
677 .encrypt = ctr_encrypt,
678 .decrypt = ctr_encrypt,
679 }, {
680 .base = {
681 .cra_name = "xts(aes)",
682 .cra_driver_name = "xts-aes-" MODE,
683 .cra_priority = PRIO,
684 .cra_blocksize = AES_BLOCK_SIZE,
685 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
686 .cra_module = THIS_MODULE,
687 },
688 .min_keysize = 2 * AES_MIN_KEY_SIZE,
689 .max_keysize = 2 * AES_MAX_KEY_SIZE,
690 .ivsize = AES_BLOCK_SIZE,
691 .walksize = 2 * AES_BLOCK_SIZE,
692 .setkey = xts_set_key,
693 .encrypt = xts_encrypt,
694 .decrypt = xts_decrypt,
695 }, {
696 #endif
697 .base = {
698 .cra_name = "cts(cbc(aes))",
699 .cra_driver_name = "cts-cbc-aes-" MODE,
700 .cra_priority = PRIO,
701 .cra_blocksize = AES_BLOCK_SIZE,
702 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
703 .cra_module = THIS_MODULE,
704 },
705 .min_keysize = AES_MIN_KEY_SIZE,
706 .max_keysize = AES_MAX_KEY_SIZE,
707 .ivsize = AES_BLOCK_SIZE,
708 .walksize = 2 * AES_BLOCK_SIZE,
709 .setkey = skcipher_aes_setkey,
710 .encrypt = cts_cbc_encrypt,
711 .decrypt = cts_cbc_decrypt,
712 }, {
713 .base = {
714 .cra_name = "essiv(cbc(aes),sha256)",
715 .cra_driver_name = "essiv-cbc-aes-sha256-" MODE,
716 .cra_priority = PRIO + 1,
717 .cra_blocksize = AES_BLOCK_SIZE,
718 .cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx),
719 .cra_module = THIS_MODULE,
720 },
721 .min_keysize = AES_MIN_KEY_SIZE,
722 .max_keysize = AES_MAX_KEY_SIZE,
723 .ivsize = AES_BLOCK_SIZE,
724 .setkey = essiv_cbc_set_key,
725 .encrypt = essiv_cbc_encrypt,
726 .decrypt = essiv_cbc_decrypt,
727 .init = essiv_cbc_init_tfm,
728 .exit = essiv_cbc_exit_tfm,
729 } };
730
cbcmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)731 static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
732 unsigned int key_len)
733 {
734 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
735
736 return aes_expandkey(&ctx->key, in_key, key_len);
737 }
738
cmac_gf128_mul_by_x(be128 * y,const be128 * x)739 static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
740 {
741 u64 a = be64_to_cpu(x->a);
742 u64 b = be64_to_cpu(x->b);
743
744 y->a = cpu_to_be64((a << 1) | (b >> 63));
745 y->b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
746 }
747
cmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)748 static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
749 unsigned int key_len)
750 {
751 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
752 be128 *consts = (be128 *)ctx->consts;
753 int rounds = 6 + key_len / 4;
754 int err;
755
756 err = cbcmac_setkey(tfm, in_key, key_len);
757 if (err)
758 return err;
759
760 /* encrypt the zero vector */
761 kernel_neon_begin();
762 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
763 rounds, 1);
764 kernel_neon_end();
765
766 cmac_gf128_mul_by_x(consts, consts);
767 cmac_gf128_mul_by_x(consts + 1, consts);
768
769 return 0;
770 }
771
xcbc_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)772 static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
773 unsigned int key_len)
774 {
775 static u8 const ks[3][AES_BLOCK_SIZE] = {
776 { [0 ... AES_BLOCK_SIZE - 1] = 0x1 },
777 { [0 ... AES_BLOCK_SIZE - 1] = 0x2 },
778 { [0 ... AES_BLOCK_SIZE - 1] = 0x3 },
779 };
780
781 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
782 int rounds = 6 + key_len / 4;
783 u8 key[AES_BLOCK_SIZE];
784 int err;
785
786 err = cbcmac_setkey(tfm, in_key, key_len);
787 if (err)
788 return err;
789
790 kernel_neon_begin();
791 aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
792 aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
793 kernel_neon_end();
794
795 return cbcmac_setkey(tfm, key, sizeof(key));
796 }
797
mac_init(struct shash_desc * desc)798 static int mac_init(struct shash_desc *desc)
799 {
800 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
801
802 memset(ctx->dg, 0, AES_BLOCK_SIZE);
803 ctx->len = 0;
804
805 return 0;
806 }
807
mac_do_update(struct crypto_aes_ctx * ctx,u8 const in[],int blocks,u8 dg[],int enc_before,int enc_after)808 static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
809 u8 dg[], int enc_before, int enc_after)
810 {
811 int rounds = 6 + ctx->key_length / 4;
812
813 if (crypto_simd_usable()) {
814 int rem;
815
816 do {
817 kernel_neon_begin();
818 rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
819 dg, enc_before, enc_after);
820 kernel_neon_end();
821 in += (blocks - rem) * AES_BLOCK_SIZE;
822 blocks = rem;
823 enc_before = 0;
824 } while (blocks);
825 } else {
826 if (enc_before)
827 aes_encrypt(ctx, dg, dg);
828
829 while (blocks--) {
830 crypto_xor(dg, in, AES_BLOCK_SIZE);
831 in += AES_BLOCK_SIZE;
832
833 if (blocks || enc_after)
834 aes_encrypt(ctx, dg, dg);
835 }
836 }
837 }
838
mac_update(struct shash_desc * desc,const u8 * p,unsigned int len)839 static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
840 {
841 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
842 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
843
844 while (len > 0) {
845 unsigned int l;
846
847 if ((ctx->len % AES_BLOCK_SIZE) == 0 &&
848 (ctx->len + len) > AES_BLOCK_SIZE) {
849
850 int blocks = len / AES_BLOCK_SIZE;
851
852 len %= AES_BLOCK_SIZE;
853
854 mac_do_update(&tctx->key, p, blocks, ctx->dg,
855 (ctx->len != 0), (len != 0));
856
857 p += blocks * AES_BLOCK_SIZE;
858
859 if (!len) {
860 ctx->len = AES_BLOCK_SIZE;
861 break;
862 }
863 ctx->len = 0;
864 }
865
866 l = min(len, AES_BLOCK_SIZE - ctx->len);
867
868 if (l <= AES_BLOCK_SIZE) {
869 crypto_xor(ctx->dg + ctx->len, p, l);
870 ctx->len += l;
871 len -= l;
872 p += l;
873 }
874 }
875
876 return 0;
877 }
878
cbcmac_final(struct shash_desc * desc,u8 * out)879 static int cbcmac_final(struct shash_desc *desc, u8 *out)
880 {
881 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
882 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
883
884 mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
885
886 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
887
888 return 0;
889 }
890
cmac_final(struct shash_desc * desc,u8 * out)891 static int cmac_final(struct shash_desc *desc, u8 *out)
892 {
893 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
894 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
895 u8 *consts = tctx->consts;
896
897 if (ctx->len != AES_BLOCK_SIZE) {
898 ctx->dg[ctx->len] ^= 0x80;
899 consts += AES_BLOCK_SIZE;
900 }
901
902 mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
903
904 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
905
906 return 0;
907 }
908
909 static struct shash_alg mac_algs[] = { {
910 .base.cra_name = "cmac(aes)",
911 .base.cra_driver_name = "cmac-aes-" MODE,
912 .base.cra_priority = PRIO,
913 .base.cra_blocksize = AES_BLOCK_SIZE,
914 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
915 2 * AES_BLOCK_SIZE,
916 .base.cra_module = THIS_MODULE,
917
918 .digestsize = AES_BLOCK_SIZE,
919 .init = mac_init,
920 .update = mac_update,
921 .final = cmac_final,
922 .setkey = cmac_setkey,
923 .descsize = sizeof(struct mac_desc_ctx),
924 }, {
925 .base.cra_name = "xcbc(aes)",
926 .base.cra_driver_name = "xcbc-aes-" MODE,
927 .base.cra_priority = PRIO,
928 .base.cra_blocksize = AES_BLOCK_SIZE,
929 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
930 2 * AES_BLOCK_SIZE,
931 .base.cra_module = THIS_MODULE,
932
933 .digestsize = AES_BLOCK_SIZE,
934 .init = mac_init,
935 .update = mac_update,
936 .final = cmac_final,
937 .setkey = xcbc_setkey,
938 .descsize = sizeof(struct mac_desc_ctx),
939 }, {
940 .base.cra_name = "cbcmac(aes)",
941 .base.cra_driver_name = "cbcmac-aes-" MODE,
942 .base.cra_priority = PRIO,
943 .base.cra_blocksize = 1,
944 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
945 .base.cra_module = THIS_MODULE,
946
947 .digestsize = AES_BLOCK_SIZE,
948 .init = mac_init,
949 .update = mac_update,
950 .final = cbcmac_final,
951 .setkey = cbcmac_setkey,
952 .descsize = sizeof(struct mac_desc_ctx),
953 } };
954
aes_exit(void)955 static void aes_exit(void)
956 {
957 crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
958 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
959 }
960
aes_init(void)961 static int __init aes_init(void)
962 {
963 int err;
964
965 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
966 if (err)
967 return err;
968
969 err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs));
970 if (err)
971 goto unregister_ciphers;
972
973 return 0;
974
975 unregister_ciphers:
976 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
977 return err;
978 }
979
980 #ifdef USE_V8_CRYPTO_EXTENSIONS
981 module_cpu_feature_match(AES, aes_init);
982 #else
983 module_init(aes_init);
984 EXPORT_SYMBOL(neon_aes_ecb_encrypt);
985 EXPORT_SYMBOL(neon_aes_cbc_encrypt);
986 EXPORT_SYMBOL(neon_aes_xts_encrypt);
987 EXPORT_SYMBOL(neon_aes_xts_decrypt);
988 #endif
989 module_exit(aes_exit);
990