mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge tag 'v5.20-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Make proc files report fips module name and version Algorithms: - Move generic SHA1 code into lib/crypto - Implement Chinese Remainder Theorem for RSA - Remove blake2s - Add XCTR with x86/arm64 acceleration - Add POLYVAL with x86/arm64 acceleration - Add HCTR2 - Add ARIA Drivers: - Add support for new CCP/PSP device ID in ccp" * tag 'v5.20-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (89 commits) crypto: tcrypt - Remove the static variable initialisations to NULL crypto: arm64/poly1305 - fix a read out-of-bound crypto: hisilicon/zip - Use the bitmap API to allocate bitmaps crypto: hisilicon/sec - fix auth key size error crypto: ccree - Remove a useless dma_supported() call crypto: ccp - Add support for new CCP/PSP device ID crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq crypto: testmgr - some more fixes to RSA test vectors cyrpto: powerpc/aes - delete the rebundant word "block" in comments hwrng: via - Fix comment typo crypto: twofish - Fix comment typo crypto: rmd160 - fix Kconfig "its" grammar crypto: keembay-ocs-ecc - Drop if with an always false condition Documentation: qat: rewrite description Documentation: qat: Use code block for qat sysfs example crypto: lib - add module license to libsha1 crypto: lib - make the sha1 library optional crypto: lib - move lib/sha1.c into lib/crypto/ crypto: fips - make proc files report fips module name and version ...
This commit is contained in:
@@ -33,6 +33,27 @@ config CRYPTO_FIPS
|
||||
certification. You should say no unless you know what
|
||||
this is.
|
||||
|
||||
config CRYPTO_FIPS_NAME
|
||||
string "FIPS Module Name"
|
||||
default "Linux Kernel Cryptographic API"
|
||||
depends on CRYPTO_FIPS
|
||||
help
|
||||
This option sets the FIPS Module name reported by the Crypto API via
|
||||
the /proc/sys/crypto/fips_name file.
|
||||
|
||||
config CRYPTO_FIPS_CUSTOM_VERSION
|
||||
bool "Use Custom FIPS Module Version"
|
||||
depends on CRYPTO_FIPS
|
||||
default n
|
||||
|
||||
config CRYPTO_FIPS_VERSION
|
||||
string "FIPS Module Version"
|
||||
default "(none)"
|
||||
depends on CRYPTO_FIPS_CUSTOM_VERSION
|
||||
help
|
||||
This option provides the ability to override the FIPS Module Version.
|
||||
By default the KERNELRELEASE value is used.
|
||||
|
||||
config CRYPTO_ALGAPI
|
||||
tristate
|
||||
select CRYPTO_ALGAPI2
|
||||
@@ -461,6 +482,15 @@ config CRYPTO_PCBC
|
||||
PCBC: Propagating Cipher Block Chaining mode
|
||||
This block cipher algorithm is required for RxRPC.
|
||||
|
||||
config CRYPTO_XCTR
|
||||
tristate
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
XCTR: XOR Counter mode. This blockcipher mode is a variant of CTR mode
|
||||
using XORs and little-endian addition rather than big-endian arithmetic.
|
||||
XCTR mode is used to implement HCTR2.
|
||||
|
||||
config CRYPTO_XTS
|
||||
tristate "XTS support"
|
||||
select CRYPTO_SKCIPHER
|
||||
@@ -524,6 +554,17 @@ config CRYPTO_ADIANTUM
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_HCTR2
|
||||
tristate "HCTR2 support"
|
||||
select CRYPTO_XCTR
|
||||
select CRYPTO_POLYVAL
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
HCTR2 is a length-preserving encryption mode for storage encryption that
|
||||
is efficient on processors with instructions to accelerate AES and
|
||||
carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and
|
||||
ARM processors with the ARMv8 crypto extensions.
|
||||
|
||||
config CRYPTO_ESSIV
|
||||
tristate "ESSIV support for block encryption"
|
||||
select CRYPTO_AUTHENC
|
||||
@@ -704,26 +745,8 @@ config CRYPTO_BLAKE2B
|
||||
|
||||
See https://blake2.net for further information.
|
||||
|
||||
config CRYPTO_BLAKE2S
|
||||
tristate "BLAKE2s digest algorithm"
|
||||
select CRYPTO_LIB_BLAKE2S_GENERIC
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Implementation of cryptographic hash function BLAKE2s
|
||||
optimized for 8-32bit platforms and can produce digests of any size
|
||||
between 1 to 32. The keyed hash is also implemented.
|
||||
|
||||
This module provides the following algorithms:
|
||||
|
||||
- blake2s-128
|
||||
- blake2s-160
|
||||
- blake2s-224
|
||||
- blake2s-256
|
||||
|
||||
See https://blake2.net for further information.
|
||||
|
||||
config CRYPTO_BLAKE2S_X86
|
||||
tristate "BLAKE2s digest algorithm (x86 accelerated version)"
|
||||
bool "BLAKE2s digest algorithm (x86 accelerated version)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_LIB_BLAKE2S_GENERIC
|
||||
select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
|
||||
@@ -777,6 +800,23 @@ config CRYPTO_GHASH
|
||||
GHASH is the hash function used in GCM (Galois/Counter Mode).
|
||||
It is not a general-purpose cryptographic hash function.
|
||||
|
||||
config CRYPTO_POLYVAL
|
||||
tristate
|
||||
select CRYPTO_GF128MUL
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
POLYVAL is the hash function used in HCTR2. It is not a general-purpose
|
||||
cryptographic hash function.
|
||||
|
||||
config CRYPTO_POLYVAL_CLMUL_NI
|
||||
tristate "POLYVAL hash function (CLMUL-NI accelerated)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_POLYVAL
|
||||
help
|
||||
This is the x86_64 CLMUL-NI accelerated implementation of POLYVAL. It is
|
||||
used to efficiently implement HCTR2 on x86-64 processors that support
|
||||
carry-less multiplication instructions.
|
||||
|
||||
config CRYPTO_POLY1305
|
||||
tristate "Poly1305 authenticator algorithm"
|
||||
select CRYPTO_HASH
|
||||
@@ -861,7 +901,7 @@ config CRYPTO_RMD160
|
||||
|
||||
RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
|
||||
to be used as a secure replacement for the 128-bit hash functions
|
||||
MD4, MD5 and it's predecessor RIPEMD
|
||||
MD4, MD5 and its predecessor RIPEMD
|
||||
(not to be confused with RIPEMD-128).
|
||||
|
||||
It's speed is comparable to SHA1 and there are no known attacks
|
||||
@@ -873,6 +913,7 @@ config CRYPTO_RMD160
|
||||
config CRYPTO_SHA1
|
||||
tristate "SHA1 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_LIB_SHA1
|
||||
help
|
||||
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
|
||||
|
||||
@@ -1214,7 +1255,7 @@ config CRYPTO_AES_NI_INTEL
|
||||
In addition to AES cipher algorithm support, the acceleration
|
||||
for some popular block cipher mode is supported too, including
|
||||
ECB, CBC, LRW, XTS. The 64 bit version has additional
|
||||
acceleration for CTR.
|
||||
acceleration for CTR and XCTR.
|
||||
|
||||
config CRYPTO_AES_SPARC64
|
||||
tristate "AES cipher algorithms (SPARC64)"
|
||||
@@ -1603,6 +1644,21 @@ config CRYPTO_SEED
|
||||
See also:
|
||||
<http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp>
|
||||
|
||||
config CRYPTO_ARIA
|
||||
tristate "ARIA cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
help
|
||||
ARIA cipher algorithm (RFC5794).
|
||||
|
||||
ARIA is a standard encryption algorithm of the Republic of Korea.
|
||||
The ARIA specifies three key sizes and rounds.
|
||||
128-bit: 12 rounds.
|
||||
192-bit: 14 rounds.
|
||||
256-bit: 16 rounds.
|
||||
|
||||
See also:
|
||||
<https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do>
|
||||
|
||||
config CRYPTO_SERPENT
|
||||
tristate "Serpent cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
|
@@ -84,7 +84,6 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
|
||||
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
|
||||
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o
|
||||
obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o
|
||||
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
||||
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
|
||||
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
@@ -94,6 +93,8 @@ obj-$(CONFIG_CRYPTO_CTS) += cts.o
|
||||
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
|
||||
obj-$(CONFIG_CRYPTO_XTS) += xts.o
|
||||
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
|
||||
obj-$(CONFIG_CRYPTO_XCTR) += xctr.o
|
||||
obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o
|
||||
obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
|
||||
obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
|
||||
obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
|
||||
@@ -147,6 +148,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
|
||||
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
|
||||
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
|
||||
obj-$(CONFIG_CRYPTO_SEED) += seed.o
|
||||
obj-$(CONFIG_CRYPTO_ARIA) += aria.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
|
||||
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
|
||||
@@ -171,6 +173,7 @@ UBSAN_SANITIZE_jitterentropy.o = n
|
||||
jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o
|
||||
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
|
||||
obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
|
||||
|
288
crypto/aria.c
Normal file
288
crypto/aria.c
Normal file
@@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* ARIA Cipher Algorithm.
|
||||
*
|
||||
* Documentation of ARIA can be found in RFC 5794.
|
||||
* Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
|
||||
*
|
||||
* Information for ARIA
|
||||
* http://210.104.33.10/ARIA/index-e.html (English)
|
||||
* http://seed.kisa.or.kr/ (Korean)
|
||||
*
|
||||
* Public domain version is distributed above.
|
||||
*/
|
||||
|
||||
#include <crypto/aria.h>
|
||||
|
||||
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
const __be32 *key = (const __be32 *)in_key;
|
||||
u32 w0[4], w1[4], w2[4], w3[4];
|
||||
u32 reg0, reg1, reg2, reg3;
|
||||
const u32 *ck;
|
||||
int rkidx = 0;
|
||||
|
||||
ck = &key_rc[(key_len - 16) / 8][0];
|
||||
|
||||
w0[0] = be32_to_cpu(key[0]);
|
||||
w0[1] = be32_to_cpu(key[1]);
|
||||
w0[2] = be32_to_cpu(key[2]);
|
||||
w0[3] = be32_to_cpu(key[3]);
|
||||
|
||||
reg0 = w0[0] ^ ck[0];
|
||||
reg1 = w0[1] ^ ck[1];
|
||||
reg2 = w0[2] ^ ck[2];
|
||||
reg3 = w0[3] ^ ck[3];
|
||||
|
||||
aria_subst_diff_odd(®0, ®1, ®2, ®3);
|
||||
|
||||
if (key_len > 16) {
|
||||
w1[0] = be32_to_cpu(key[4]);
|
||||
w1[1] = be32_to_cpu(key[5]);
|
||||
if (key_len > 24) {
|
||||
w1[2] = be32_to_cpu(key[6]);
|
||||
w1[3] = be32_to_cpu(key[7]);
|
||||
} else {
|
||||
w1[2] = 0;
|
||||
w1[3] = 0;
|
||||
}
|
||||
} else {
|
||||
w1[0] = 0;
|
||||
w1[1] = 0;
|
||||
w1[2] = 0;
|
||||
w1[3] = 0;
|
||||
}
|
||||
|
||||
w1[0] ^= reg0;
|
||||
w1[1] ^= reg1;
|
||||
w1[2] ^= reg2;
|
||||
w1[3] ^= reg3;
|
||||
|
||||
reg0 = w1[0];
|
||||
reg1 = w1[1];
|
||||
reg2 = w1[2];
|
||||
reg3 = w1[3];
|
||||
|
||||
reg0 ^= ck[4];
|
||||
reg1 ^= ck[5];
|
||||
reg2 ^= ck[6];
|
||||
reg3 ^= ck[7];
|
||||
|
||||
aria_subst_diff_even(®0, ®1, ®2, ®3);
|
||||
|
||||
reg0 ^= w0[0];
|
||||
reg1 ^= w0[1];
|
||||
reg2 ^= w0[2];
|
||||
reg3 ^= w0[3];
|
||||
|
||||
w2[0] = reg0;
|
||||
w2[1] = reg1;
|
||||
w2[2] = reg2;
|
||||
w2[3] = reg3;
|
||||
|
||||
reg0 ^= ck[8];
|
||||
reg1 ^= ck[9];
|
||||
reg2 ^= ck[10];
|
||||
reg3 ^= ck[11];
|
||||
|
||||
aria_subst_diff_odd(®0, ®1, ®2, ®3);
|
||||
|
||||
w3[0] = reg0 ^ w1[0];
|
||||
w3[1] = reg1 ^ w1[1];
|
||||
w3[2] = reg2 ^ w1[2];
|
||||
w3[3] = reg3 ^ w1[3];
|
||||
|
||||
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19);
|
||||
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31);
|
||||
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67);
|
||||
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97);
|
||||
if (key_len > 16) {
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97);
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97);
|
||||
|
||||
if (key_len > 24) {
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97);
|
||||
|
||||
rkidx++;
|
||||
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void aria_set_decrypt_key(struct aria_ctx *ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i];
|
||||
ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i];
|
||||
}
|
||||
|
||||
for (i = 1; i < ctx->rounds; i++) {
|
||||
ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]);
|
||||
ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]);
|
||||
ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]);
|
||||
ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]);
|
||||
|
||||
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
|
||||
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
|
||||
aria_diff_byte(&ctx->dec_key[i][1],
|
||||
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
|
||||
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
|
||||
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
|
||||
}
|
||||
}
|
||||
|
||||
static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (key_len != 16 && key_len != 24 && key_len != 32)
|
||||
return -EINVAL;
|
||||
|
||||
ctx->key_length = key_len;
|
||||
ctx->rounds = (key_len + 32) / 4;
|
||||
|
||||
aria_set_encrypt_key(ctx, in_key, key_len);
|
||||
aria_set_decrypt_key(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
|
||||
u32 key[][ARIA_RD_KEY_WORDS])
|
||||
{
|
||||
const __be32 *src = (const __be32 *)in;
|
||||
__be32 *dst = (__be32 *)out;
|
||||
u32 reg0, reg1, reg2, reg3;
|
||||
int rounds, rkidx = 0;
|
||||
|
||||
rounds = ctx->rounds;
|
||||
|
||||
reg0 = be32_to_cpu(src[0]);
|
||||
reg1 = be32_to_cpu(src[1]);
|
||||
reg2 = be32_to_cpu(src[2]);
|
||||
reg3 = be32_to_cpu(src[3]);
|
||||
|
||||
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
|
||||
rkidx++;
|
||||
|
||||
aria_subst_diff_odd(®0, ®1, ®2, ®3);
|
||||
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
|
||||
rkidx++;
|
||||
|
||||
while ((rounds -= 2) > 0) {
|
||||
aria_subst_diff_even(®0, ®1, ®2, ®3);
|
||||
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
|
||||
rkidx++;
|
||||
|
||||
aria_subst_diff_odd(®0, ®1, ®2, ®3);
|
||||
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
|
||||
rkidx++;
|
||||
}
|
||||
|
||||
reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]),
|
||||
(u8)(x2[get_u8(reg0, 1)] >> 8),
|
||||
(u8)(s1[get_u8(reg0, 2)]),
|
||||
(u8)(s2[get_u8(reg0, 3)]));
|
||||
reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]),
|
||||
(u8)(x2[get_u8(reg1, 1)] >> 8),
|
||||
(u8)(s1[get_u8(reg1, 2)]),
|
||||
(u8)(s2[get_u8(reg1, 3)]));
|
||||
reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]),
|
||||
(u8)(x2[get_u8(reg2, 1)] >> 8),
|
||||
(u8)(s1[get_u8(reg2, 2)]),
|
||||
(u8)(s2[get_u8(reg2, 3)]));
|
||||
reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]),
|
||||
(u8)(x2[get_u8(reg3, 1)] >> 8),
|
||||
(u8)(s1[get_u8(reg3, 2)]),
|
||||
(u8)(s2[get_u8(reg3, 3)]));
|
||||
|
||||
dst[0] = cpu_to_be32(reg0);
|
||||
dst[1] = cpu_to_be32(reg1);
|
||||
dst[2] = cpu_to_be32(reg2);
|
||||
dst[3] = cpu_to_be32(reg3);
|
||||
}
|
||||
|
||||
static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
__aria_crypt(ctx, out, in, ctx->enc_key);
|
||||
}
|
||||
|
||||
static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
__aria_crypt(ctx, out, in, ctx->dec_key);
|
||||
}
|
||||
|
||||
static struct crypto_alg aria_alg = {
|
||||
.cra_name = "aria",
|
||||
.cra_driver_name = "aria-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = ARIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aria_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
|
||||
.cia_setkey = aria_set_key,
|
||||
.cia_encrypt = aria_encrypt,
|
||||
.cia_decrypt = aria_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init aria_init(void)
|
||||
{
|
||||
return crypto_register_alg(&aria_alg);
|
||||
}
|
||||
|
||||
static void __exit aria_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&aria_alg);
|
||||
}
|
||||
|
||||
subsys_initcall(aria_init);
|
||||
module_exit(aria_fini);
|
||||
|
||||
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
|
||||
MODULE_ALIAS_CRYPTO("aria");
|
@@ -1,75 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* shash interface to the generic implementation of BLAKE2s
|
||||
*
|
||||
* Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/internal/blake2s.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static int crypto_blake2s_update_generic(struct shash_desc *desc,
|
||||
const u8 *in, unsigned int inlen)
|
||||
{
|
||||
return crypto_blake2s_update(desc, in, inlen, true);
|
||||
}
|
||||
|
||||
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
return crypto_blake2s_final(desc, out, true);
|
||||
}
|
||||
|
||||
#define BLAKE2S_ALG(name, driver_name, digest_size) \
|
||||
{ \
|
||||
.base.cra_name = name, \
|
||||
.base.cra_driver_name = driver_name, \
|
||||
.base.cra_priority = 100, \
|
||||
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
|
||||
.base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \
|
||||
.base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \
|
||||
.base.cra_module = THIS_MODULE, \
|
||||
.digestsize = digest_size, \
|
||||
.setkey = crypto_blake2s_setkey, \
|
||||
.init = crypto_blake2s_init, \
|
||||
.update = crypto_blake2s_update_generic, \
|
||||
.final = crypto_blake2s_final_generic, \
|
||||
.descsize = sizeof(struct blake2s_state), \
|
||||
}
|
||||
|
||||
static struct shash_alg blake2s_algs[] = {
|
||||
BLAKE2S_ALG("blake2s-128", "blake2s-128-generic",
|
||||
BLAKE2S_128_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-160", "blake2s-160-generic",
|
||||
BLAKE2S_160_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-224", "blake2s-224-generic",
|
||||
BLAKE2S_224_HASH_SIZE),
|
||||
BLAKE2S_ALG("blake2s-256", "blake2s-256-generic",
|
||||
BLAKE2S_256_HASH_SIZE),
|
||||
};
|
||||
|
||||
static int __init blake2s_mod_init(void)
|
||||
{
|
||||
return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
static void __exit blake2s_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
|
||||
}
|
||||
|
||||
subsys_initcall(blake2s_mod_init);
|
||||
module_exit(blake2s_mod_exit);
|
||||
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-128-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-160-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-224-generic");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256");
|
||||
MODULE_ALIAS_CRYPTO("blake2s-256-generic");
|
||||
MODULE_LICENSE("GPL v2");
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
int fips_enabled;
|
||||
EXPORT_SYMBOL_GPL(fips_enabled);
|
||||
@@ -30,13 +31,37 @@ static int fips_enable(char *str)
|
||||
|
||||
__setup("fips=", fips_enable);
|
||||
|
||||
#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME
|
||||
#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION
|
||||
#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION
|
||||
#else
|
||||
#define FIPS_MODULE_VERSION UTS_RELEASE
|
||||
#endif
|
||||
|
||||
static char fips_name[] = FIPS_MODULE_NAME;
|
||||
static char fips_version[] = FIPS_MODULE_VERSION;
|
||||
|
||||
static struct ctl_table crypto_sysctl_table[] = {
|
||||
{
|
||||
.procname = "fips_enabled",
|
||||
.data = &fips_enabled,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec
|
||||
.procname = "fips_enabled",
|
||||
.data = &fips_enabled,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{
|
||||
.procname = "fips_name",
|
||||
.data = &fips_name,
|
||||
.maxlen = 64,
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dostring
|
||||
},
|
||||
{
|
||||
.procname = "fips_version",
|
||||
.data = &fips_version,
|
||||
.maxlen = 64,
|
||||
.mode = 0444,
|
||||
.proc_handler = proc_dostring
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
581
crypto/hctr2.c
Normal file
581
crypto/hctr2.c
Normal file
@@ -0,0 +1,581 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* HCTR2 length-preserving encryption mode
|
||||
*
|
||||
* Copyright 2021 Google LLC
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* HCTR2 is a length-preserving encryption mode that is efficient on
|
||||
* processors with instructions to accelerate AES and carryless
|
||||
* multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM
|
||||
* processors with the ARMv8 crypto extensions.
|
||||
*
|
||||
* For more details, see the paper: "Length-preserving encryption with HCTR2"
|
||||
* (https://eprint.iacr.org/2021/1441.pdf)
|
||||
*/
|
||||
|
||||
#include <crypto/internal/cipher.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/polyval.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define BLOCKCIPHER_BLOCK_SIZE 16
|
||||
|
||||
/*
|
||||
* The specification allows variable-length tweaks, but Linux's crypto API
|
||||
* currently only allows algorithms to support a single length. The "natural"
|
||||
* tweak length for HCTR2 is 16, since that fits into one POLYVAL block for
|
||||
* the best performance. But longer tweaks are useful for fscrypt, to avoid
|
||||
* needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
|
||||
*/
|
||||
#define TWEAK_SIZE 32
|
||||
|
||||
struct hctr2_instance_ctx {
|
||||
struct crypto_cipher_spawn blockcipher_spawn;
|
||||
struct crypto_skcipher_spawn xctr_spawn;
|
||||
struct crypto_shash_spawn polyval_spawn;
|
||||
};
|
||||
|
||||
struct hctr2_tfm_ctx {
|
||||
struct crypto_cipher *blockcipher;
|
||||
struct crypto_skcipher *xctr;
|
||||
struct crypto_shash *polyval;
|
||||
u8 L[BLOCKCIPHER_BLOCK_SIZE];
|
||||
int hashed_tweak_offset;
|
||||
/*
|
||||
* This struct is allocated with extra space for two exported hash
|
||||
* states. Since the hash state size is not known at compile-time, we
|
||||
* can't add these to the struct directly.
|
||||
*
|
||||
* hashed_tweaklen_divisible;
|
||||
* hashed_tweaklen_remainder;
|
||||
*/
|
||||
};
|
||||
|
||||
struct hctr2_request_ctx {
|
||||
u8 first_block[BLOCKCIPHER_BLOCK_SIZE];
|
||||
u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE];
|
||||
struct scatterlist *bulk_part_dst;
|
||||
struct scatterlist *bulk_part_src;
|
||||
struct scatterlist sg_src[2];
|
||||
struct scatterlist sg_dst[2];
|
||||
/*
|
||||
* Sub-request sizes are unknown at compile-time, so they need to go
|
||||
* after the members with known sizes.
|
||||
*/
|
||||
union {
|
||||
struct shash_desc hash_desc;
|
||||
struct skcipher_request xctr_req;
|
||||
} u;
|
||||
/*
|
||||
* This struct is allocated with extra space for one exported hash
|
||||
* state. Since the hash state size is not known at compile-time, we
|
||||
* can't add it to the struct directly.
|
||||
*
|
||||
* hashed_tweak;
|
||||
*/
|
||||
};
|
||||
|
||||
static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx,
|
||||
bool has_remainder)
|
||||
{
|
||||
u8 *p = (u8 *)tctx + sizeof(*tctx);
|
||||
|
||||
if (has_remainder) /* For messages not a multiple of block length */
|
||||
p += crypto_shash_statesize(tctx->polyval);
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
|
||||
struct hctr2_request_ctx *rctx)
|
||||
{
|
||||
return (u8 *)rctx + tctx->hashed_tweak_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* The input data for each HCTR2 hash step begins with a 16-byte block that
|
||||
* contains the tweak length and a flag that indicates whether the input is evenly
|
||||
* divisible into blocks. Since this implementation only supports one tweak
|
||||
* length, we precompute the two hash states resulting from hashing the two
|
||||
* possible values of this initial block. This reduces by one block the amount of
|
||||
* data that needs to be hashed for each encryption/decryption
|
||||
*
|
||||
* These precomputed hashes are stored in hctr2_tfm_ctx.
|
||||
*/
|
||||
static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder)
|
||||
{
|
||||
SHASH_DESC_ON_STACK(shash, tfm->polyval);
|
||||
__le64 tweak_length_block[2];
|
||||
int err;
|
||||
|
||||
shash->tfm = tctx->polyval;
|
||||
memset(tweak_length_block, 0, sizeof(tweak_length_block));
|
||||
|
||||
tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder);
|
||||
err = crypto_shash_init(shash);
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_shash_update(shash, (u8 *)tweak_length_block,
|
||||
POLYVAL_BLOCK_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder));
|
||||
}
|
||||
|
||||
static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
u8 hbar[BLOCKCIPHER_BLOCK_SIZE];
|
||||
int err;
|
||||
|
||||
crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(tctx->blockcipher,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_cipher_setkey(tctx->blockcipher, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(tctx->xctr,
|
||||
crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_skcipher_setkey(tctx->xctr, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(hbar, 0, sizeof(hbar));
|
||||
crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar);
|
||||
|
||||
memset(tctx->L, 0, sizeof(tctx->L));
|
||||
tctx->L[0] = 0x01;
|
||||
crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L);
|
||||
|
||||
crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
memzero_explicit(hbar, sizeof(hbar));
|
||||
|
||||
return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false);
|
||||
}
|
||||
|
||||
static int hctr2_hash_tweak(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct shash_desc *hash_desc = &rctx->u.hash_desc;
|
||||
int err;
|
||||
bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE;
|
||||
|
||||
hash_desc->tfm = tctx->polyval;
|
||||
err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder));
|
||||
if (err)
|
||||
return err;
|
||||
err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
// Store the hashed tweak, since we need it when computing both
|
||||
// H(T || N) and H(T || V).
|
||||
return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx));
|
||||
}
|
||||
|
||||
static int hctr2_hash_message(struct skcipher_request *req,
|
||||
struct scatterlist *sgl,
|
||||
u8 digest[POLYVAL_DIGEST_SIZE])
|
||||
{
|
||||
static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 };
|
||||
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
struct shash_desc *hash_desc = &rctx->u.hash_desc;
|
||||
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
||||
struct sg_mapping_iter miter;
|
||||
unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE;
|
||||
int i;
|
||||
int err = 0;
|
||||
int n = 0;
|
||||
|
||||
sg_miter_start(&miter, sgl, sg_nents(sgl),
|
||||
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
|
||||
for (i = 0; i < bulk_len; i += n) {
|
||||
sg_miter_next(&miter);
|
||||
n = min_t(unsigned int, miter.length, bulk_len - i);
|
||||
err = crypto_shash_update(hash_desc, miter.addr, n);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
sg_miter_stop(&miter);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (remainder) {
|
||||
err = crypto_shash_update(hash_desc, padding,
|
||||
BLOCKCIPHER_BLOCK_SIZE - remainder);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return crypto_shash_final(hash_desc, digest);
|
||||
}
|
||||
|
||||
static int hctr2_finish(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
u8 digest[POLYVAL_DIGEST_SIZE];
|
||||
struct shash_desc *hash_desc = &rctx->u.hash_desc;
|
||||
int err;
|
||||
|
||||
// U = UU ^ H(T || V)
|
||||
// or M = MM ^ H(T || N)
|
||||
hash_desc->tfm = tctx->polyval;
|
||||
err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx));
|
||||
if (err)
|
||||
return err;
|
||||
err = hctr2_hash_message(req, rctx->bulk_part_dst, digest);
|
||||
if (err)
|
||||
return err;
|
||||
crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE);
|
||||
|
||||
// Copy U (or M) into dst scatterlist
|
||||
scatterwalk_map_and_copy(rctx->first_block, req->dst,
|
||||
0, BLOCKCIPHER_BLOCK_SIZE, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hctr2_xctr_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct skcipher_request *req = areq->data;
|
||||
|
||||
if (!err)
|
||||
err = hctr2_finish(req);
|
||||
|
||||
skcipher_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int hctr2_crypt(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
|
||||
u8 digest[POLYVAL_DIGEST_SIZE];
|
||||
int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
||||
int err;
|
||||
|
||||
// Requests must be at least one block
|
||||
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
// Copy M (or U) into a temporary buffer
|
||||
scatterwalk_map_and_copy(rctx->first_block, req->src,
|
||||
0, BLOCKCIPHER_BLOCK_SIZE, 0);
|
||||
|
||||
// Create scatterlists for N and V
|
||||
rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src,
|
||||
BLOCKCIPHER_BLOCK_SIZE);
|
||||
rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
|
||||
BLOCKCIPHER_BLOCK_SIZE);
|
||||
|
||||
// MM = M ^ H(T || N)
|
||||
// or UU = U ^ H(T || V)
|
||||
err = hctr2_hash_tweak(req);
|
||||
if (err)
|
||||
return err;
|
||||
err = hctr2_hash_message(req, rctx->bulk_part_src, digest);
|
||||
if (err)
|
||||
return err;
|
||||
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
|
||||
|
||||
// UU = E(MM)
|
||||
// or MM = D(UU)
|
||||
if (enc)
|
||||
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block,
|
||||
digest);
|
||||
else
|
||||
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block,
|
||||
digest);
|
||||
|
||||
// S = MM ^ UU ^ L
|
||||
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
|
||||
crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE);
|
||||
|
||||
// V = XCTR(S, N)
|
||||
// or N = XCTR(S, V)
|
||||
skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr);
|
||||
skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src,
|
||||
rctx->bulk_part_dst, bulk_len,
|
||||
rctx->xctr_iv);
|
||||
skcipher_request_set_callback(&rctx->u.xctr_req,
|
||||
req->base.flags,
|
||||
hctr2_xctr_done, req);
|
||||
return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?:
|
||||
hctr2_finish(req);
|
||||
}
|
||||
|
||||
static int hctr2_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return hctr2_crypt(req, true);
|
||||
}
|
||||
|
||||
static int hctr2_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return hctr2_crypt(req, false);
|
||||
}
|
||||
|
||||
static int hctr2_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
||||
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_skcipher *xctr;
|
||||
struct crypto_cipher *blockcipher;
|
||||
struct crypto_shash *polyval;
|
||||
unsigned int subreq_size;
|
||||
int err;
|
||||
|
||||
xctr = crypto_spawn_skcipher(&ictx->xctr_spawn);
|
||||
if (IS_ERR(xctr))
|
||||
return PTR_ERR(xctr);
|
||||
|
||||
blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
|
||||
if (IS_ERR(blockcipher)) {
|
||||
err = PTR_ERR(blockcipher);
|
||||
goto err_free_xctr;
|
||||
}
|
||||
|
||||
polyval = crypto_spawn_shash(&ictx->polyval_spawn);
|
||||
if (IS_ERR(polyval)) {
|
||||
err = PTR_ERR(polyval);
|
||||
goto err_free_blockcipher;
|
||||
}
|
||||
|
||||
tctx->xctr = xctr;
|
||||
tctx->blockcipher = blockcipher;
|
||||
tctx->polyval = polyval;
|
||||
|
||||
BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) !=
|
||||
sizeof(struct hctr2_request_ctx));
|
||||
subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) +
|
||||
crypto_shash_descsize(polyval),
|
||||
sizeof_field(struct hctr2_request_ctx, u.xctr_req) +
|
||||
crypto_skcipher_reqsize(xctr));
|
||||
|
||||
tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) +
|
||||
subreq_size;
|
||||
crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset +
|
||||
crypto_shash_statesize(polyval));
|
||||
return 0;
|
||||
|
||||
err_free_blockcipher:
|
||||
crypto_free_cipher(blockcipher);
|
||||
err_free_xctr:
|
||||
crypto_free_skcipher(xctr);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void hctr2_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(tctx->blockcipher);
|
||||
crypto_free_skcipher(tctx->xctr);
|
||||
crypto_free_shash(tctx->polyval);
|
||||
}
|
||||
|
||||
static void hctr2_free_instance(struct skcipher_instance *inst)
|
||||
{
|
||||
struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst);
|
||||
|
||||
crypto_drop_cipher(&ictx->blockcipher_spawn);
|
||||
crypto_drop_skcipher(&ictx->xctr_spawn);
|
||||
crypto_drop_shash(&ictx->polyval_spawn);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int hctr2_create_common(struct crypto_template *tmpl,
|
||||
struct rtattr **tb,
|
||||
const char *xctr_name,
|
||||
const char *polyval_name)
|
||||
{
|
||||
u32 mask;
|
||||
struct skcipher_instance *inst;
|
||||
struct hctr2_instance_ctx *ictx;
|
||||
struct skcipher_alg *xctr_alg;
|
||||
struct crypto_alg *blockcipher_alg;
|
||||
struct shash_alg *polyval_alg;
|
||||
char blockcipher_name[CRYPTO_MAX_ALG_NAME];
|
||||
int len;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
ictx = skcipher_instance_ctx(inst);
|
||||
|
||||
/* Stream cipher, xctr(block_cipher) */
|
||||
err = crypto_grab_skcipher(&ictx->xctr_spawn,
|
||||
skcipher_crypto_instance(inst),
|
||||
xctr_name, 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
if (strncmp(xctr_alg->base.cra_name, "xctr(", 5))
|
||||
goto err_free_inst;
|
||||
len = strscpy(blockcipher_name, xctr_alg->base.cra_name + 5,
|
||||
sizeof(blockcipher_name));
|
||||
if (len < 1)
|
||||
goto err_free_inst;
|
||||
if (blockcipher_name[len - 1] != ')')
|
||||
goto err_free_inst;
|
||||
blockcipher_name[len - 1] = 0;
|
||||
|
||||
/* Block cipher, e.g. "aes" */
|
||||
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
|
||||
skcipher_crypto_instance(inst),
|
||||
blockcipher_name, 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
|
||||
|
||||
/* Require blocksize of 16 bytes */
|
||||
err = -EINVAL;
|
||||
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
|
||||
goto err_free_inst;
|
||||
|
||||
/* Polyval ε-∆U hash function */
|
||||
err = crypto_grab_shash(&ictx->polyval_spawn,
|
||||
skcipher_crypto_instance(inst),
|
||||
polyval_name, 0, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn);
|
||||
|
||||
/* Ensure Polyval is being used */
|
||||
err = -EINVAL;
|
||||
if (strcmp(polyval_alg->base.cra_name, "polyval") != 0)
|
||||
goto err_free_inst;
|
||||
|
||||
/* Instance fields */
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "hctr2(%s)",
|
||||
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"hctr2_base(%s,%s)",
|
||||
xctr_alg->base.cra_driver_name,
|
||||
polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) +
|
||||
polyval_alg->statesize * 2;
|
||||
inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask |
|
||||
polyval_alg->base.cra_alignmask;
|
||||
/*
|
||||
* The hash function is called twice, so it is weighted higher than the
|
||||
* xctr and blockcipher.
|
||||
*/
|
||||
inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority +
|
||||
4 * polyval_alg->base.cra_priority +
|
||||
blockcipher_alg->cra_priority) / 7;
|
||||
|
||||
inst->alg.setkey = hctr2_setkey;
|
||||
inst->alg.encrypt = hctr2_encrypt;
|
||||
inst->alg.decrypt = hctr2_decrypt;
|
||||
inst->alg.init = hctr2_init_tfm;
|
||||
inst->alg.exit = hctr2_exit_tfm;
|
||||
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg);
|
||||
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg);
|
||||
inst->alg.ivsize = TWEAK_SIZE;
|
||||
|
||||
inst->free = hctr2_free_instance;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
err_free_inst:
|
||||
hctr2_free_instance(inst);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
const char *xctr_name;
|
||||
const char *polyval_name;
|
||||
|
||||
xctr_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(xctr_name))
|
||||
return PTR_ERR(xctr_name);
|
||||
|
||||
polyval_name = crypto_attr_alg_name(tb[2]);
|
||||
if (IS_ERR(polyval_name))
|
||||
return PTR_ERR(polyval_name);
|
||||
|
||||
return hctr2_create_common(tmpl, tb, xctr_name, polyval_name);
|
||||
}
|
||||
|
||||
static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
const char *blockcipher_name;
|
||||
char xctr_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
blockcipher_name = crypto_attr_alg_name(tb[1]);
|
||||
if (IS_ERR(blockcipher_name))
|
||||
return PTR_ERR(blockcipher_name);
|
||||
|
||||
if (snprintf(xctr_name, CRYPTO_MAX_ALG_NAME, "xctr(%s)",
|
||||
blockcipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return hctr2_create_common(tmpl, tb, xctr_name, "polyval");
|
||||
}
|
||||
|
||||
static struct crypto_template hctr2_tmpls[] = {
|
||||
{
|
||||
/* hctr2_base(xctr_name, polyval_name) */
|
||||
.name = "hctr2_base",
|
||||
.create = hctr2_create_base,
|
||||
.module = THIS_MODULE,
|
||||
}, {
|
||||
/* hctr2(blockcipher_name) */
|
||||
.name = "hctr2",
|
||||
.create = hctr2_create,
|
||||
.module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init hctr2_module_init(void)
|
||||
{
|
||||
return crypto_register_templates(hctr2_tmpls, ARRAY_SIZE(hctr2_tmpls));
|
||||
}
|
||||
|
||||
static void __exit hctr2_module_exit(void)
|
||||
{
|
||||
return crypto_unregister_templates(hctr2_tmpls,
|
||||
ARRAY_SIZE(hctr2_tmpls));
|
||||
}
|
||||
|
||||
subsys_initcall(hctr2_module_init);
|
||||
module_exit(hctr2_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("hctr2");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
245
crypto/polyval-generic.c
Normal file
245
crypto/polyval-generic.c
Normal file
@@ -0,0 +1,245 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* POLYVAL: hash function for HCTR2.
|
||||
*
|
||||
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
|
||||
* Copyright (c) 2009 Intel Corp.
|
||||
* Author: Huang Ying <ying.huang@intel.com>
|
||||
* Copyright 2021 Google LLC
|
||||
*/
|
||||
|
||||
/*
|
||||
* Code based on crypto/ghash-generic.c
|
||||
*
|
||||
* POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different
|
||||
* modulus for finite field multiplication which makes hardware accelerated
|
||||
* implementations on little-endian machines faster. POLYVAL is used in the
|
||||
* kernel to implement HCTR2, but was originally specified for AES-GCM-SIV
|
||||
* (RFC 8452).
|
||||
*
|
||||
* For more information see:
|
||||
* Length-preserving encryption with HCTR2:
|
||||
* https://eprint.iacr.org/2021/1441.pdf
|
||||
* AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption:
|
||||
* https://datatracker.ietf.org/doc/html/rfc8452
|
||||
*
|
||||
* Like GHASH, POLYVAL is not a cryptographic hash function and should
|
||||
* not be used outside of crypto modes explicitly designed to use POLYVAL.
|
||||
*
|
||||
* This implementation uses a convenient trick involving the GHASH and POLYVAL
|
||||
* fields. This trick allows multiplication in the POLYVAL field to be
|
||||
* implemented by using multiplication in the GHASH field as a subroutine. An
|
||||
* element of the POLYVAL field can be converted to an element of the GHASH
|
||||
* field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of
|
||||
* a. Similarly, an element of the GHASH field can be converted back to the
|
||||
* POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see:
|
||||
* https://datatracker.ietf.org/doc/html/rfc8452#appendix-A
|
||||
*
|
||||
* By using this trick, we do not need to implement the POLYVAL field for the
|
||||
* generic implementation.
|
||||
*
|
||||
* Warning: this generic implementation is not intended to be used in practice
|
||||
* and is not constant time. For practical use, a hardware accelerated
|
||||
* implementation of POLYVAL should be used instead.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/polyval.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct polyval_tfm_ctx {
|
||||
struct gf128mul_4k *gf128;
|
||||
};
|
||||
|
||||
struct polyval_desc_ctx {
|
||||
union {
|
||||
u8 buffer[POLYVAL_BLOCK_SIZE];
|
||||
be128 buffer128;
|
||||
};
|
||||
u32 bytes;
|
||||
};
|
||||
|
||||
static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
|
||||
const u8 src[POLYVAL_BLOCK_SIZE])
|
||||
{
|
||||
u64 a = get_unaligned((const u64 *)&src[0]);
|
||||
u64 b = get_unaligned((const u64 *)&src[8]);
|
||||
|
||||
put_unaligned(swab64(a), (u64 *)&dst[8]);
|
||||
put_unaligned(swab64(b), (u64 *)&dst[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Performs multiplication in the POLYVAL field using the GHASH field as a
|
||||
* subroutine. This function is used as a fallback for hardware accelerated
|
||||
* implementations when simd registers are unavailable.
|
||||
*
|
||||
* Note: This function is not used for polyval-generic, instead we use the 4k
|
||||
* lookup table implementation for finite field multiplication.
|
||||
*/
|
||||
void polyval_mul_non4k(u8 *op1, const u8 *op2)
|
||||
{
|
||||
be128 a, b;
|
||||
|
||||
// Assume one argument is in Montgomery form and one is not.
|
||||
copy_and_reverse((u8 *)&a, op1);
|
||||
copy_and_reverse((u8 *)&b, op2);
|
||||
gf128mul_x_lle(&a, &a);
|
||||
gf128mul_lle(&a, &b);
|
||||
copy_and_reverse(op1, (u8 *)&a);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(polyval_mul_non4k);
|
||||
|
||||
/*
|
||||
* Perform a POLYVAL update using non4k multiplication. This function is used
|
||||
* as a fallback for hardware accelerated implementations when simd registers
|
||||
* are unavailable.
|
||||
*
|
||||
* Note: This function is not used for polyval-generic, instead we use the 4k
|
||||
* lookup table implementation of finite field multiplication.
|
||||
*/
|
||||
void polyval_update_non4k(const u8 *key, const u8 *in,
|
||||
size_t nblocks, u8 *accumulator)
|
||||
{
|
||||
while (nblocks--) {
|
||||
crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
|
||||
polyval_mul_non4k(accumulator, key);
|
||||
in += POLYVAL_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(polyval_update_non4k);
|
||||
|
||||
static int polyval_setkey(struct crypto_shash *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
|
||||
be128 k;
|
||||
|
||||
if (keylen != POLYVAL_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
gf128mul_free_4k(ctx->gf128);
|
||||
|
||||
BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE);
|
||||
copy_and_reverse((u8 *)&k, key);
|
||||
gf128mul_x_lle(&k, &k);
|
||||
|
||||
ctx->gf128 = gf128mul_init_4k_lle(&k);
|
||||
memzero_explicit(&k, POLYVAL_BLOCK_SIZE);
|
||||
|
||||
if (!ctx->gf128)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int polyval_init(struct shash_desc *desc)
|
||||
{
|
||||
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
memset(dctx, 0, sizeof(*dctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int polyval_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
u8 *pos;
|
||||
u8 tmp[POLYVAL_BLOCK_SIZE];
|
||||
int n;
|
||||
|
||||
if (dctx->bytes) {
|
||||
n = min(srclen, dctx->bytes);
|
||||
pos = dctx->buffer + dctx->bytes - 1;
|
||||
|
||||
dctx->bytes -= n;
|
||||
srclen -= n;
|
||||
|
||||
while (n--)
|
||||
*pos-- ^= *src++;
|
||||
|
||||
if (!dctx->bytes)
|
||||
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
|
||||
}
|
||||
|
||||
while (srclen >= POLYVAL_BLOCK_SIZE) {
|
||||
copy_and_reverse(tmp, src);
|
||||
crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
|
||||
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
|
||||
src += POLYVAL_BLOCK_SIZE;
|
||||
srclen -= POLYVAL_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
if (srclen) {
|
||||
dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
|
||||
pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
|
||||
while (srclen--)
|
||||
*pos-- ^= *src++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int polyval_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
if (dctx->bytes)
|
||||
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
|
||||
copy_and_reverse(dst, dctx->buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void polyval_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
gf128mul_free_4k(ctx->gf128);
|
||||
}
|
||||
|
||||
static struct shash_alg polyval_alg = {
|
||||
.digestsize = POLYVAL_DIGEST_SIZE,
|
||||
.init = polyval_init,
|
||||
.update = polyval_update,
|
||||
.final = polyval_final,
|
||||
.setkey = polyval_setkey,
|
||||
.descsize = sizeof(struct polyval_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "polyval",
|
||||
.cra_driver_name = "polyval-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_blocksize = POLYVAL_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = polyval_exit_tfm,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init polyval_mod_init(void)
|
||||
{
|
||||
return crypto_register_shash(&polyval_alg);
|
||||
}
|
||||
|
||||
static void __exit polyval_mod_exit(void)
|
||||
{
|
||||
crypto_unregister_shash(&polyval_alg);
|
||||
}
|
||||
|
||||
subsys_initcall(polyval_mod_init);
|
||||
module_exit(polyval_mod_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("POLYVAL hash function");
|
||||
MODULE_ALIAS_CRYPTO("polyval");
|
||||
MODULE_ALIAS_CRYPTO("polyval-generic");
|
78
crypto/rsa.c
78
crypto/rsa.c
@@ -17,6 +17,11 @@ struct rsa_mpi_key {
|
||||
MPI n;
|
||||
MPI e;
|
||||
MPI d;
|
||||
MPI p;
|
||||
MPI q;
|
||||
MPI dp;
|
||||
MPI dq;
|
||||
MPI qinv;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -35,16 +40,49 @@ static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
|
||||
|
||||
/*
|
||||
* RSADP function [RFC3447 sec 5.1.2]
|
||||
* m = c^d mod n;
|
||||
* m_1 = c^dP mod p;
|
||||
* m_2 = c^dQ mod q;
|
||||
* h = (m_1 - m_2) * qInv mod p;
|
||||
* m = m_2 + q * h;
|
||||
*/
|
||||
static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
|
||||
static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
|
||||
{
|
||||
MPI m2, m12_or_qh;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* (1) Validate 0 <= c < n */
|
||||
if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* (2) m = c^d mod n */
|
||||
return mpi_powm(m, c, key->d, key->n);
|
||||
m2 = mpi_alloc(0);
|
||||
m12_or_qh = mpi_alloc(0);
|
||||
if (!m2 || !m12_or_qh)
|
||||
goto err_free_mpi;
|
||||
|
||||
/* (2i) m_1 = c^dP mod p */
|
||||
ret = mpi_powm(m_or_m1_or_h, c, key->dp, key->p);
|
||||
if (ret)
|
||||
goto err_free_mpi;
|
||||
|
||||
/* (2i) m_2 = c^dQ mod q */
|
||||
ret = mpi_powm(m2, c, key->dq, key->q);
|
||||
if (ret)
|
||||
goto err_free_mpi;
|
||||
|
||||
/* (2iii) h = (m_1 - m_2) * qInv mod p */
|
||||
mpi_sub(m12_or_qh, m_or_m1_or_h, m2);
|
||||
mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
|
||||
|
||||
/* (2iv) m = m_2 + q * h */
|
||||
mpi_mul(m12_or_qh, key->q, m_or_m1_or_h);
|
||||
mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
|
||||
|
||||
ret = 0;
|
||||
|
||||
err_free_mpi:
|
||||
mpi_free(m12_or_qh);
|
||||
mpi_free(m2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
|
||||
@@ -112,7 +150,7 @@ static int rsa_dec(struct akcipher_request *req)
|
||||
if (!c)
|
||||
goto err_free_m;
|
||||
|
||||
ret = _rsa_dec(pkey, m, c);
|
||||
ret = _rsa_dec_crt(pkey, m, c);
|
||||
if (ret)
|
||||
goto err_free_c;
|
||||
|
||||
@@ -134,9 +172,19 @@ static void rsa_free_mpi_key(struct rsa_mpi_key *key)
|
||||
mpi_free(key->d);
|
||||
mpi_free(key->e);
|
||||
mpi_free(key->n);
|
||||
mpi_free(key->p);
|
||||
mpi_free(key->q);
|
||||
mpi_free(key->dp);
|
||||
mpi_free(key->dq);
|
||||
mpi_free(key->qinv);
|
||||
key->d = NULL;
|
||||
key->e = NULL;
|
||||
key->n = NULL;
|
||||
key->p = NULL;
|
||||
key->q = NULL;
|
||||
key->dp = NULL;
|
||||
key->dq = NULL;
|
||||
key->qinv = NULL;
|
||||
}
|
||||
|
||||
static int rsa_check_key_length(unsigned int len)
|
||||
@@ -217,6 +265,26 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
|
||||
if (!mpi_key->n)
|
||||
goto err;
|
||||
|
||||
mpi_key->p = mpi_read_raw_data(raw_key.p, raw_key.p_sz);
|
||||
if (!mpi_key->p)
|
||||
goto err;
|
||||
|
||||
mpi_key->q = mpi_read_raw_data(raw_key.q, raw_key.q_sz);
|
||||
if (!mpi_key->q)
|
||||
goto err;
|
||||
|
||||
mpi_key->dp = mpi_read_raw_data(raw_key.dp, raw_key.dp_sz);
|
||||
if (!mpi_key->dp)
|
||||
goto err;
|
||||
|
||||
mpi_key->dq = mpi_read_raw_data(raw_key.dq, raw_key.dq_sz);
|
||||
if (!mpi_key->dq)
|
||||
goto err;
|
||||
|
||||
mpi_key->qinv = mpi_read_raw_data(raw_key.qinv, raw_key.qinv_sz);
|
||||
if (!mpi_key->qinv)
|
||||
goto err;
|
||||
|
||||
if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
|
||||
rsa_free_mpi_key(mpi_key);
|
||||
return -EINVAL;
|
||||
|
@@ -58,7 +58,7 @@
|
||||
*/
|
||||
static unsigned int sec;
|
||||
|
||||
static char *alg = NULL;
|
||||
static char *alg;
|
||||
static u32 type;
|
||||
static u32 mask;
|
||||
static int mode;
|
||||
@@ -71,7 +71,7 @@ static const char *check[] = {
|
||||
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
|
||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt",
|
||||
"camellia", "seed", "rmd160",
|
||||
"camellia", "seed", "rmd160", "aria",
|
||||
"lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384",
|
||||
"sha3-512", "streebog256", "streebog512",
|
||||
NULL
|
||||
@@ -1556,6 +1556,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("rfc3686(ctr(aes))");
|
||||
ret += tcrypt_test("ofb(aes)");
|
||||
ret += tcrypt_test("cfb(aes)");
|
||||
ret += tcrypt_test("xctr(aes)");
|
||||
break;
|
||||
|
||||
case 11:
|
||||
@@ -1669,10 +1670,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("rmd160");
|
||||
break;
|
||||
|
||||
case 41:
|
||||
ret += tcrypt_test("blake2s-256");
|
||||
break;
|
||||
|
||||
case 42:
|
||||
ret += tcrypt_test("blake2b-512");
|
||||
break;
|
||||
@@ -1729,6 +1726,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("ccm(sm4)");
|
||||
break;
|
||||
|
||||
case 57:
|
||||
ret += tcrypt_test("polyval");
|
||||
break;
|
||||
|
||||
case 58:
|
||||
ret += tcrypt_test("gcm(aria)");
|
||||
break;
|
||||
|
||||
case 100:
|
||||
ret += tcrypt_test("hmac(md5)");
|
||||
break;
|
||||
@@ -1865,6 +1870,12 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
ret += tcrypt_test("cfb(sm4)");
|
||||
ret += tcrypt_test("ctr(sm4)");
|
||||
break;
|
||||
case 192:
|
||||
ret += tcrypt_test("ecb(aria)");
|
||||
ret += tcrypt_test("cbc(aria)");
|
||||
ret += tcrypt_test("cfb(aria)");
|
||||
ret += tcrypt_test("ctr(aria)");
|
||||
break;
|
||||
case 200:
|
||||
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
@@ -2186,6 +2197,37 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
16, 16, aead_speed_template_19, num_mb);
|
||||
break;
|
||||
|
||||
case 226:
|
||||
test_cipher_speed("hctr2(aes)", ENCRYPT, sec, NULL,
|
||||
0, speed_template_32);
|
||||
break;
|
||||
|
||||
case 227:
|
||||
test_cipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cbc(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("cfb(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
break;
|
||||
|
||||
case 228:
|
||||
test_aead_speed("gcm(aria)", ENCRYPT, sec,
|
||||
NULL, 0, 16, 8, speed_template_16_24_32);
|
||||
test_aead_speed("gcm(aria)", DECRYPT, sec,
|
||||
NULL, 0, 16, 8, speed_template_16_24_32);
|
||||
break;
|
||||
|
||||
case 300:
|
||||
if (alg) {
|
||||
test_hash_speed(alg, sec, generic_hash_speed_template);
|
||||
@@ -2240,10 +2282,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
test_hash_speed("rmd160", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
fallthrough;
|
||||
case 316:
|
||||
test_hash_speed("blake2s-256", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
fallthrough;
|
||||
case 317:
|
||||
test_hash_speed("blake2b-512", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
@@ -2352,10 +2390,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
|
||||
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
fallthrough;
|
||||
case 416:
|
||||
test_ahash_speed("blake2s-256", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
fallthrough;
|
||||
case 417:
|
||||
test_ahash_speed("blake2b-512", sec, generic_hash_speed_template);
|
||||
if (mode > 400 && mode < 500) break;
|
||||
|
@@ -4375,30 +4375,6 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.hash = __VECS(blake2b_512_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-128",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_128_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-160",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_160_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-224",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_224_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "blake2s-256",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(blakes2s_256_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "cbc(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -4412,6 +4388,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(anubis_cbc_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cbc(aria)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aria_cbc_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cbc(blowfish)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -4529,6 +4511,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_cfb_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cfb(aria)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aria_cfb_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "cfb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -4598,6 +4586,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_ctr_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ctr(aria)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aria_ctr_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ctr(blowfish)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -4858,6 +4852,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(arc4_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(aria)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aria_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(blowfish)",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -5074,6 +5074,13 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.aead = __VECS(aes_gcm_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "gcm(aria)",
|
||||
.generic_driver = "gcm_base(ctr(aria-generic),ghash-generic)",
|
||||
.test = alg_test_aead,
|
||||
.suite = {
|
||||
.aead = __VECS(aria_gcm_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "gcm(sm4)",
|
||||
.generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
|
||||
@@ -5088,6 +5095,14 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.hash = __VECS(ghash_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "hctr2(aes)",
|
||||
.generic_driver =
|
||||
"hctr2_base(xctr(aes-generic),polyval-generic)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_hctr2_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "hmac(md5)",
|
||||
.test = alg_test_hash,
|
||||
@@ -5342,6 +5357,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.hash = __VECS(poly1305_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "polyval",
|
||||
.test = alg_test_hash,
|
||||
.suite = {
|
||||
.hash = __VECS(polyval_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "rfc3686(ctr(aes))",
|
||||
.test = alg_test_skcipher,
|
||||
@@ -5548,6 +5569,12 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.suite = {
|
||||
.cipher = __VECS(xchacha20_tv_template)
|
||||
},
|
||||
}, {
|
||||
.alg = "xctr(aes)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = __VECS(aes_xctr_tv_template)
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(aes)",
|
||||
.generic_driver = "xts(ecb(aes-generic))",
|
||||
|
4830
crypto/testmgr.h
4830
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
@@ -298,7 +298,7 @@ static const u32 mds[4][256] = {
|
||||
* multiplication is inefficient without hardware support. To multiply
|
||||
* faster, I make use of the fact x is a generator for the nonzero elements,
|
||||
* so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
|
||||
* some n in 0..254. Note that that caret is exponentiation in GF(2^8),
|
||||
* some n in 0..254. Note that caret is exponentiation in GF(2^8),
|
||||
* *not* polynomial notation. So if I want to compute pq where p and q are
|
||||
* in GF(2^8), I can just say:
|
||||
* 1. if p=0 or q=0 then pq=0
|
||||
|
191
crypto/xctr.c
Normal file
191
crypto/xctr.c
Normal file
@@ -0,0 +1,191 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* XCTR: XOR Counter mode - Adapted from ctr.c
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
|
||||
* Copyright 2021 Google LLC
|
||||
*/
|
||||
|
||||
/*
|
||||
* XCTR mode is a blockcipher mode of operation used to implement HCTR2. XCTR is
|
||||
* closely related to the CTR mode of operation; the main difference is that CTR
|
||||
* generates the keystream using E(CTR + IV) whereas XCTR generates the
|
||||
* keystream using E(CTR ^ IV). This allows implementations to avoid dealing
|
||||
* with multi-limb integers (as is required in CTR mode). XCTR is also specified
|
||||
* using little-endian arithmetic which makes it slightly faster on LE machines.
|
||||
*
|
||||
* See the HCTR2 paper for more details:
|
||||
* Length-preserving encryption with HCTR2
|
||||
* (https://eprint.iacr.org/2021/1441.pdf)
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/cipher.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* For now this implementation is limited to 16-byte blocks for simplicity */
|
||||
#define XCTR_BLOCKSIZE 16
|
||||
|
||||
static void crypto_xctr_crypt_final(struct skcipher_walk *walk,
|
||||
struct crypto_cipher *tfm, u32 byte_ctr)
|
||||
{
|
||||
u8 keystream[XCTR_BLOCKSIZE];
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
|
||||
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
crypto_cipher_encrypt_one(tfm, keystream, walk->iv);
|
||||
crypto_xor_cpy(dst, keystream, src, nbytes);
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
}
|
||||
|
||||
static int crypto_xctr_crypt_segment(struct skcipher_walk *walk,
|
||||
struct crypto_cipher *tfm, u32 byte_ctr)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
|
||||
|
||||
do {
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
fn(crypto_cipher_tfm(tfm), dst, walk->iv);
|
||||
crypto_xor(dst, src, XCTR_BLOCKSIZE);
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
|
||||
le32_add_cpu(&ctr32, 1);
|
||||
|
||||
src += XCTR_BLOCKSIZE;
|
||||
dst += XCTR_BLOCKSIZE;
|
||||
} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
|
||||
struct crypto_cipher *tfm, u32 byte_ctr)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *data = walk->src.virt.addr;
|
||||
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
|
||||
|
||||
do {
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
fn(crypto_cipher_tfm(tfm), keystream, walk->iv);
|
||||
crypto_xor(data, keystream, XCTR_BLOCKSIZE);
|
||||
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
|
||||
|
||||
le32_add_cpu(&ctr32, 1);
|
||||
|
||||
data += XCTR_BLOCKSIZE;
|
||||
} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_xctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
u32 byte_ctr = 0;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= XCTR_BLOCKSIZE) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_xctr_crypt_inplace(&walk, cipher,
|
||||
byte_ctr);
|
||||
else
|
||||
nbytes = crypto_xctr_crypt_segment(&walk, cipher,
|
||||
byte_ctr);
|
||||
|
||||
byte_ctr += walk.nbytes - nbytes;
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_xctr_crypt_final(&walk, cipher, byte_ctr);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_xctr_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
inst = skcipher_alloc_instance_simple(tmpl, tb);
|
||||
if (IS_ERR(inst))
|
||||
return PTR_ERR(inst);
|
||||
|
||||
alg = skcipher_ialg_simple(inst);
|
||||
|
||||
/* Block size must be 16 bytes. */
|
||||
err = -EINVAL;
|
||||
if (alg->cra_blocksize != XCTR_BLOCKSIZE)
|
||||
goto out_free_inst;
|
||||
|
||||
/* XCTR mode is a stream cipher. */
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
|
||||
/*
|
||||
* To simplify the implementation, configure the skcipher walk to only
|
||||
* give a partial block at the very end, never earlier.
|
||||
*/
|
||||
inst->alg.chunksize = alg->cra_blocksize;
|
||||
|
||||
inst->alg.encrypt = crypto_xctr_crypt;
|
||||
inst->alg.decrypt = crypto_xctr_crypt;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err) {
|
||||
out_free_inst:
|
||||
inst->free(inst);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_xctr_tmpl = {
|
||||
.name = "xctr",
|
||||
.create = crypto_xctr_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_xctr_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_xctr_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_xctr_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_xctr_tmpl);
|
||||
}
|
||||
|
||||
subsys_initcall(crypto_xctr_module_init);
|
||||
module_exit(crypto_xctr_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("XCTR block cipher mode of operation");
|
||||
MODULE_ALIAS_CRYPTO("xctr");
|
||||
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
|
Reference in New Issue
Block a user