crypto: x86/cast6 - drop CTR mode implementation

CAST6 in CTR mode is never used by the kernel directly, and is highly
unlikely to be relied upon by dm-crypt or algif_skcipher. So let's drop
the accelerated CTR mode implementation, and instead, rely on the CTR
template and the bare cipher.

Acked-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Ard Biesheuvel 2021-01-05 17:47:57 +01:00 committed by Herbert Xu
parent e2d60e2f59
commit 7a6623cc68
3 changed files with 1 additions and 76 deletions

View file

@ -410,31 +410,3 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
FRAME_END
ret;
SYM_FUNC_END(cast6_cbc_dec_8way)
SYM_FUNC_START(cast6_ctr_8way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
FRAME_BEGIN
pushq %r12;
pushq %r15
movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
RD2, RX, RKR, RKM);
call __cast6_enc_blk8;
store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
popq %r15;
popq %r12;
FRAME_END
ret;
SYM_FUNC_END(cast6_ctr_8way)

View file

@ -23,8 +23,6 @@ asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
le128 *iv);
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@ -32,19 +30,6 @@ static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
return cast6_setkey(&tfm->base, key, keylen);
}
static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
{
be128 ctrblk;
u128 *dst = (u128 *)d;
const u128 *src = (const u128 *)s;
le128_to_be128(&ctrblk, iv);
le128_inc(iv);
__cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
u128_xor(dst, src, (u128 *)&ctrblk);
}
static const struct common_glue_ctx cast6_enc = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
@ -58,19 +43,6 @@ static const struct common_glue_ctx cast6_enc = {
} }
};
static const struct common_glue_ctx cast6_ctr = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
.funcs = { {
.num_blocks = CAST6_PARALLEL_BLOCKS,
.fn_u = { .ctr = cast6_ctr_8way }
}, {
.num_blocks = 1,
.fn_u = { .ctr = cast6_crypt_ctr }
} }
};
static const struct common_glue_ctx cast6_dec = {
.num_funcs = 2,
.fpu_blocks_limit = CAST6_PARALLEL_BLOCKS,
@ -117,11 +89,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
}
static int ctr_crypt(struct skcipher_request *req)
{
return glue_ctr_req_128bit(&cast6_ctr, req);
}
static struct skcipher_alg cast6_algs[] = {
{
.base.cra_name = "__ecb(cast6)",
@ -150,21 +117,6 @@ static struct skcipher_alg cast6_algs[] = {
.setkey = cast6_setkey_skcipher,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
}, {
.base.cra_name = "__ctr(cast6)",
.base.cra_driver_name = "__ctr-cast6-avx",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct cast6_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CAST6_MIN_KEY_SIZE,
.max_keysize = CAST6_MAX_KEY_SIZE,
.ivsize = CAST6_BLOCK_SIZE,
.chunksize = CAST6_BLOCK_SIZE,
.setkey = cast6_setkey_skcipher,
.encrypt = ctr_crypt,
.decrypt = ctr_crypt,
},
};

View file

@ -1397,6 +1397,7 @@ config CRYPTO_CAST6_AVX_X86_64
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_SIMD
imply CRYPTO_XTS
imply CRYPTO_CTR
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.