2019-06-04 08:11:33 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
|
|
|
* linux/arch/arm64/crypto/aes-modes.S - chaining mode wrappers for AES
|
|
|
|
*
|
2017-02-03 14:49:37 +00:00
|
|
|
* Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* included by aes-ce.S and aes-neon.S */
|
|
|
|
|
|
|
|
.text
|
|
|
|
.align 4
|
|
|
|
|
2019-06-24 17:38:30 +00:00
|
|
|
#ifndef MAX_STRIDE
|
|
|
|
#define MAX_STRIDE 4
|
|
|
|
#endif
|
|
|
|
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#if MAX_STRIDE == 4
|
|
|
|
#define ST4(x...) x
|
|
|
|
#define ST5(x...)
|
|
|
|
#else
|
|
|
|
#define ST4(x...)
|
|
|
|
#define ST5(x...) x
|
|
|
|
#endif
|
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_encrypt_block4x)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_decrypt_block4x)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-06-24 17:38:30 +00:00
|
|
|
#if MAX_STRIDE == 5
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_encrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
|
|
|
|
ret
|
2019-12-13 15:49:10 +00:00
|
|
|
SYM_FUNC_END(aes_decrypt_block5x)
|
2019-06-24 17:38:30 +00:00
|
|
|
#endif
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
|
|
|
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks)
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks)
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_ecb_encrypt)
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
enc_prepare w3, x2, x5
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LecbencloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lecbenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LecbencloopNx
|
|
|
|
.Lecbenc1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lecbencout
|
|
|
|
.Lecbencloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
|
|
|
encrypt_block v0, w3, x2, x5, w6
|
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lecbencloop
|
|
|
|
.Lecbencout:
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ecb_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_ecb_decrypt)
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
2018-04-30 16:18:24 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x5
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LecbdecloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lecbdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_decrypt_block4x )
|
|
|
|
ST5( ld1 {v4.16b}, [x1], #16 )
|
|
|
|
ST5( bl aes_decrypt_block5x )
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LecbdecloopNx
|
|
|
|
.Lecbdec1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lecbdecout
|
|
|
|
.Lecbdecloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next ct block */
|
|
|
|
decrypt_block v0, w3, x2, x5, w6
|
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lecbdecloop
|
|
|
|
.Lecbdecout:
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ecb_decrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks, u8 iv[])
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
* int blocks, u8 iv[])
|
2019-08-19 14:17:36 +00:00
|
|
|
* aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
|
|
|
|
* int rounds, int blocks, u8 iv[],
|
|
|
|
* u32 const rk2[]);
|
|
|
|
* aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
|
|
|
|
* int rounds, int blocks, u8 iv[],
|
|
|
|
* u32 const rk2[]);
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_essiv_cbc_encrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
ld1 {v4.16b}, [x5] /* get iv */
|
|
|
|
|
|
|
|
mov w8, #14 /* AES-256: 14 rounds */
|
|
|
|
enc_prepare w8, x6, x7
|
|
|
|
encrypt_block v4, w8, x6, x7, w9
|
|
|
|
enc_switch_key w3, x2, x6
|
|
|
|
b .Lcbcencloop4x
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_encrypt)
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x5] /* get iv */
|
|
|
|
enc_prepare w3, x2, x6
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2018-03-10 15:21:52 +00:00
|
|
|
.Lcbcencloop4x:
|
2018-09-10 14:41:13 +00:00
|
|
|
subs w4, w4, #4
|
2018-03-10 15:21:52 +00:00
|
|
|
bmi .Lcbcenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v0, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v1.16b, v1.16b, v0.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v1, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v2.16b, v2.16b, v1.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v2, w3, x2, x6, w7
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v3.16b, v3.16b, v2.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v3, w3, x2, x6, w7
|
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2018-03-10 15:21:52 +00:00
|
|
|
mov v4.16b, v3.16b
|
|
|
|
b .Lcbcencloop4x
|
|
|
|
.Lcbcenc1x:
|
2018-09-10 14:41:13 +00:00
|
|
|
adds w4, w4, #4
|
2018-03-10 15:21:52 +00:00
|
|
|
beq .Lcbcencout
|
|
|
|
.Lcbcencloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
2018-03-10 15:21:52 +00:00
|
|
|
eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v4, w3, x2, x6, w7
|
|
|
|
st1 {v4.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lcbcencloop
|
2018-03-10 15:21:52 +00:00
|
|
|
.Lcbcencout:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x5] /* return iv */
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_encrypt)
|
|
|
|
AES_FUNC_END(aes_essiv_cbc_encrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_essiv_cbc_decrypt)
|
2019-08-19 14:17:36 +00:00
|
|
|
ld1 {cbciv.16b}, [x5] /* get iv */
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2019-08-19 14:17:36 +00:00
|
|
|
mov w8, #14 /* AES-256: 14 rounds */
|
|
|
|
enc_prepare w8, x6, x7
|
|
|
|
encrypt_block cbciv, w8, x6, x7, w9
|
|
|
|
b .Lessivcbcdecstart
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_decrypt)
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ld1 {cbciv.16b}, [x5] /* get iv */
|
2019-08-19 14:17:36 +00:00
|
|
|
.Lessivcbcdecstart:
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x6
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
.LcbcdecloopNx:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
subs w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lcbcdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#if MAX_STRIDE == 5
|
|
|
|
ld1 {v4.16b}, [x1], #16 /* get 1 ct block */
|
|
|
|
mov v5.16b, v0.16b
|
|
|
|
mov v6.16b, v1.16b
|
|
|
|
mov v7.16b, v2.16b
|
|
|
|
bl aes_decrypt_block5x
|
|
|
|
sub x1, x1, #32
|
|
|
|
eor v0.16b, v0.16b, cbciv.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */
|
|
|
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v4.16b, v4.16b, v5.16b
|
|
|
|
#else
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v0.16b
|
|
|
|
mov v5.16b, v1.16b
|
|
|
|
mov v6.16b, v2.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_decrypt_block4x
|
2018-09-10 14:41:13 +00:00
|
|
|
sub x1, x1, #16
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
eor v0.16b, v0.16b, cbciv.16b
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v4.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v2.16b, v2.16b, v5.16b
|
|
|
|
eor v3.16b, v3.16b, v6.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
#endif
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( st1 {v4.16b}, [x0], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LcbcdecloopNx
|
|
|
|
.Lcbcdec1x:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
adds w4, w4, #MAX_STRIDE
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lcbcdecout
|
|
|
|
.Lcbcdecloop:
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v0.16b, v1.16b /* ...and copy to v0 */
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */
|
|
|
|
mov cbciv.16b, v1.16b /* ct is next iv */
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b}, [x0], #16
|
|
|
|
subs w4, w4, #1
|
2014-03-21 09:19:17 +00:00
|
|
|
bne .Lcbcdecloop
|
|
|
|
.Lcbcdecout:
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
st1 {cbciv.16b}, [x5] /* return iv */
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_decrypt)
|
|
|
|
AES_FUNC_END(aes_essiv_cbc_decrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
|
2018-09-10 14:41:14 +00:00
|
|
|
/*
|
|
|
|
* aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
* int rounds, int bytes, u8 const iv[])
|
|
|
|
* aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
|
|
|
|
* int rounds, int bytes, u8 const iv[])
|
|
|
|
*/
|
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_cts_encrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
sub x4, x4, #16
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
ld1 {v3.16b}, [x8]
|
|
|
|
ld1 {v4.16b}, [x9]
|
|
|
|
|
|
|
|
ld1 {v0.16b}, [x1], x4 /* overlapping loads */
|
|
|
|
ld1 {v1.16b}, [x1]
|
|
|
|
|
|
|
|
ld1 {v5.16b}, [x5] /* get iv */
|
|
|
|
enc_prepare w3, x2, x6
|
|
|
|
|
|
|
|
eor v0.16b, v0.16b, v5.16b /* xor with iv */
|
|
|
|
tbl v1.16b, {v1.16b}, v4.16b
|
|
|
|
encrypt_block v0, w3, x2, x6, w7
|
|
|
|
|
|
|
|
eor v1.16b, v1.16b, v0.16b
|
|
|
|
tbl v0.16b, {v0.16b}, v3.16b
|
|
|
|
encrypt_block v1, w3, x2, x6, w7
|
|
|
|
|
|
|
|
add x4, x0, x4
|
|
|
|
st1 {v0.16b}, [x4] /* overlapping stores */
|
|
|
|
st1 {v1.16b}, [x0]
|
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_cts_encrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_cbc_cts_decrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
sub x4, x4, #16
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
ld1 {v3.16b}, [x8]
|
|
|
|
ld1 {v4.16b}, [x9]
|
|
|
|
|
|
|
|
ld1 {v0.16b}, [x1], x4 /* overlapping loads */
|
|
|
|
ld1 {v1.16b}, [x1]
|
|
|
|
|
|
|
|
ld1 {v5.16b}, [x5] /* get iv */
|
|
|
|
dec_prepare w3, x2, x6
|
|
|
|
|
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
2019-09-03 16:43:31 +00:00
|
|
|
tbl v2.16b, {v0.16b}, v3.16b
|
|
|
|
eor v2.16b, v2.16b, v1.16b
|
2018-09-10 14:41:14 +00:00
|
|
|
|
|
|
|
tbx v0.16b, {v1.16b}, v4.16b
|
|
|
|
decrypt_block v0, w3, x2, x6, w7
|
|
|
|
eor v0.16b, v0.16b, v5.16b /* xor with iv */
|
|
|
|
|
|
|
|
add x4, x0, x4
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
st1 {v0.16b}, [x0]
|
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_cbc_cts_decrypt)
|
2018-09-10 14:41:14 +00:00
|
|
|
|
|
|
|
.section ".rodata", "a"
|
|
|
|
.align 6
|
|
|
|
.Lcts_permute_table:
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
|
|
|
|
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
|
|
|
|
.previous
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
/*
|
2022-05-20 18:14:57 +00:00
|
|
|
* This macro generates the code for CTR and XCTR mode.
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
2022-05-20 18:14:57 +00:00
|
|
|
.macro ctr_encrypt xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
// Arguments
|
|
|
|
OUT .req x0
|
|
|
|
IN .req x1
|
|
|
|
KEY .req x2
|
|
|
|
ROUNDS_W .req w3
|
|
|
|
BYTES_W .req w4
|
|
|
|
IV .req x5
|
|
|
|
BYTE_CTR_W .req w6 // XCTR only
|
|
|
|
// Intermediate values
|
|
|
|
CTR_W .req w11 // XCTR only
|
|
|
|
CTR .req x11 // XCTR only
|
|
|
|
IV_PART .req x12
|
|
|
|
BLOCKS .req x13
|
|
|
|
BLOCKS_W .req w13
|
|
|
|
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
enc_prepare ROUNDS_W, KEY, IV_PART
|
|
|
|
ld1 {vctr.16b}, [IV]
|
2017-01-17 13:46:29 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* Keep 64 bits of the IV in a register. For CTR mode this lets us
|
|
|
|
* easily increment the IV. For XCTR mode this lets us efficiently XOR
|
|
|
|
* the 64-bit counter with the IV.
|
|
|
|
*/
|
2022-05-20 18:14:57 +00:00
|
|
|
.if \xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
umov IV_PART, vctr.d[0]
|
|
|
|
lsr CTR_W, BYTE_CTR_W, #4
|
2022-05-20 18:14:57 +00:00
|
|
|
.else
|
2022-05-20 18:14:58 +00:00
|
|
|
umov IV_PART, vctr.d[1]
|
|
|
|
rev IV_PART, IV_PART
|
2022-05-20 18:14:57 +00:00
|
|
|
.endif
|
2020-12-17 18:55:16 +00:00
|
|
|
|
2022-05-20 18:14:57 +00:00
|
|
|
.LctrloopNx\xctr:
|
2022-05-20 18:14:58 +00:00
|
|
|
add BLOCKS_W, BYTES_W, #15
|
|
|
|
sub BYTES_W, BYTES_W, #MAX_STRIDE << 4
|
|
|
|
lsr BLOCKS_W, BLOCKS_W, #4
|
2020-12-17 18:55:16 +00:00
|
|
|
mov w8, #MAX_STRIDE
|
2022-05-20 18:14:58 +00:00
|
|
|
cmp BLOCKS_W, w8
|
|
|
|
csel BLOCKS_W, BLOCKS_W, w8, lt
|
2020-12-17 18:55:16 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* Set up the counter values in v0-v{MAX_STRIDE-1}.
|
|
|
|
*
|
|
|
|
* If we are encrypting less than MAX_STRIDE blocks, the tail block
|
|
|
|
* handling code expects the last keystream block to be in
|
|
|
|
* v{MAX_STRIDE-1}. For example: if encrypting two blocks with
|
|
|
|
* MAX_STRIDE=5, then v3 and v4 should have the next two counter blocks.
|
|
|
|
*/
|
2022-05-20 18:14:57 +00:00
|
|
|
.if \xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
add CTR, CTR, BLOCKS
|
2022-05-20 18:14:57 +00:00
|
|
|
.else
|
2022-05-20 18:14:58 +00:00
|
|
|
adds IV_PART, IV_PART, BLOCKS
|
2022-05-20 18:14:57 +00:00
|
|
|
.endif
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
mov v0.16b, vctr.16b
|
|
|
|
mov v1.16b, vctr.16b
|
|
|
|
mov v2.16b, vctr.16b
|
|
|
|
mov v3.16b, vctr.16b
|
|
|
|
ST5( mov v4.16b, vctr.16b )
|
2022-05-20 18:14:57 +00:00
|
|
|
.if \xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
sub x6, CTR, #MAX_STRIDE - 1
|
|
|
|
sub x7, CTR, #MAX_STRIDE - 2
|
|
|
|
sub x8, CTR, #MAX_STRIDE - 3
|
|
|
|
sub x9, CTR, #MAX_STRIDE - 4
|
|
|
|
ST5( sub x10, CTR, #MAX_STRIDE - 5 )
|
|
|
|
eor x6, x6, IV_PART
|
|
|
|
eor x7, x7, IV_PART
|
|
|
|
eor x8, x8, IV_PART
|
|
|
|
eor x9, x9, IV_PART
|
|
|
|
ST5( eor x10, x10, IV_PART )
|
2022-05-20 18:14:57 +00:00
|
|
|
mov v0.d[0], x6
|
|
|
|
mov v1.d[0], x7
|
|
|
|
mov v2.d[0], x8
|
|
|
|
mov v3.d[0], x9
|
|
|
|
ST5( mov v4.d[0], x10 )
|
|
|
|
.else
|
|
|
|
bcs 0f
|
|
|
|
.subsection 1
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* This subsection handles carries.
|
|
|
|
*
|
|
|
|
* Conditional branching here is allowed with respect to time
|
|
|
|
* invariance since the branches are dependent on the IV instead
|
|
|
|
* of the plaintext or key. This code is rarely executed in
|
|
|
|
* practice anyway.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Apply carry to outgoing counter. */
|
2022-05-20 18:14:57 +00:00
|
|
|
0: umov x8, vctr.d[0]
|
|
|
|
rev x8, x8
|
|
|
|
add x8, x8, #1
|
|
|
|
rev x8, x8
|
|
|
|
ins vctr.d[0], x8
|
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* Apply carry to counter blocks if needed.
|
|
|
|
*
|
|
|
|
* Since the carry flag was set, we know 0 <= IV_PART <
|
|
|
|
* MAX_STRIDE. Using the value of IV_PART we can determine how
|
|
|
|
* many counter blocks need to be updated.
|
|
|
|
*/
|
|
|
|
cbz IV_PART, 2f
|
2022-05-20 18:14:57 +00:00
|
|
|
adr x16, 1f
|
2022-05-20 18:14:58 +00:00
|
|
|
sub x16, x16, IV_PART, lsl #3
|
2022-05-20 18:14:57 +00:00
|
|
|
br x16
|
|
|
|
bti c
|
|
|
|
mov v0.d[0], vctr.d[0]
|
|
|
|
bti c
|
|
|
|
mov v1.d[0], vctr.d[0]
|
|
|
|
bti c
|
|
|
|
mov v2.d[0], vctr.d[0]
|
|
|
|
bti c
|
|
|
|
mov v3.d[0], vctr.d[0]
|
|
|
|
ST5( bti c )
|
|
|
|
ST5( mov v4.d[0], vctr.d[0] )
|
|
|
|
1: b 2f
|
|
|
|
.previous
|
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
2: rev x7, IV_PART
|
2022-05-20 18:14:57 +00:00
|
|
|
ins vctr.d[1], x7
|
2022-05-20 18:14:58 +00:00
|
|
|
sub x7, IV_PART, #MAX_STRIDE - 1
|
|
|
|
sub x8, IV_PART, #MAX_STRIDE - 2
|
|
|
|
sub x9, IV_PART, #MAX_STRIDE - 3
|
2022-05-20 18:14:57 +00:00
|
|
|
rev x7, x7
|
|
|
|
rev x8, x8
|
|
|
|
mov v1.d[1], x7
|
|
|
|
rev x9, x9
|
2022-05-20 18:14:58 +00:00
|
|
|
ST5( sub x10, IV_PART, #MAX_STRIDE - 4 )
|
2022-05-20 18:14:57 +00:00
|
|
|
mov v2.d[1], x8
|
|
|
|
ST5( rev x10, x10 )
|
|
|
|
mov v3.d[1], x9
|
|
|
|
ST5( mov v4.d[1], x10 )
|
|
|
|
.endif
|
2022-05-20 18:14:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are at least MAX_STRIDE blocks left, XOR the data with
|
|
|
|
* keystream and store. Otherwise jump to tail handling.
|
|
|
|
*/
|
|
|
|
tbnz BYTES_W, #31, .Lctrtail\xctr
|
|
|
|
ld1 {v5.16b-v7.16b}, [IN], #48
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v5.16b, v0.16b
|
2022-05-20 18:14:58 +00:00
|
|
|
ST4( ld1 {v5.16b}, [IN], #16 )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v6.16b, v1.16b
|
2022-05-20 18:14:58 +00:00
|
|
|
ST5( ld1 {v5.16b-v6.16b}, [IN], #32 )
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v2.16b, v7.16b, v2.16b
|
|
|
|
eor v3.16b, v5.16b, v3.16b
|
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
This implements 5-way interleaving for ECB, CBC decryption and CTR,
resulting in a speedup of ~11% on Marvell ThunderX2, which has a
very deep pipeline and therefore a high issue latency for NEON
instructions operating on the same registers.
Note that XTS is left alone: implementing 5-way interleave there
would either involve spilling of the calculated tweaks to the
stack, or recalculating them after the encryption operation, and
doing either of those would most likely penalize low end cores.
For ECB, this is not a concern at all, given that we have plenty
of spare registers. For CTR and CBC decryption, we take advantage
of the fact that v16 is not used by the CE version of the code
(which is the only one targeted by the optimization), and so we
can reshuffle the code a bit and avoid having to spill to memory
(with the exception of one extra reload in the CBC routine)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-24 17:38:31 +00:00
|
|
|
ST5( eor v4.16b, v6.16b, v4.16b )
|
2022-05-20 18:14:58 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [OUT], #64
|
|
|
|
ST5( st1 {v4.16b}, [OUT], #16 )
|
|
|
|
cbz BYTES_W, .Lctrout\xctr
|
2022-05-20 18:14:57 +00:00
|
|
|
b .LctrloopNx\xctr
|
2017-01-17 13:46:29 +00:00
|
|
|
|
2022-05-20 18:14:57 +00:00
|
|
|
.Lctrout\xctr:
|
|
|
|
.if !\xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
st1 {vctr.16b}, [IV] /* return next CTR value */
|
2022-05-20 18:14:57 +00:00
|
|
|
.endif
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2017-01-17 13:46:29 +00:00
|
|
|
ret
|
|
|
|
|
2022-05-20 18:14:57 +00:00
|
|
|
.Lctrtail\xctr:
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* Handle up to MAX_STRIDE * 16 - 1 bytes of plaintext
|
|
|
|
*
|
|
|
|
* This code expects the last keystream block to be in v{MAX_STRIDE-1}.
|
|
|
|
* For example: if encrypting two blocks with MAX_STRIDE=5, then v3 and
|
|
|
|
* v4 should have the next two counter blocks.
|
|
|
|
*
|
|
|
|
* This allows us to store the ciphertext by writing to overlapping
|
|
|
|
* regions of memory. Any invalid ciphertext blocks get overwritten by
|
|
|
|
* correctly computed blocks. This approach greatly simplifies the
|
|
|
|
* logic for storing the ciphertext.
|
|
|
|
*/
|
2020-12-17 18:55:16 +00:00
|
|
|
mov x16, #16
|
2022-05-20 18:14:58 +00:00
|
|
|
ands w7, BYTES_W, #0xf
|
|
|
|
csel x13, x7, x16, ne
|
2020-12-17 18:55:16 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
ST5( cmp BYTES_W, #64 - (MAX_STRIDE << 4))
|
2020-12-17 18:55:16 +00:00
|
|
|
ST5( csel x14, x16, xzr, gt )
|
2022-05-20 18:14:58 +00:00
|
|
|
cmp BYTES_W, #48 - (MAX_STRIDE << 4)
|
2020-12-17 18:55:16 +00:00
|
|
|
csel x15, x16, xzr, gt
|
2022-05-20 18:14:58 +00:00
|
|
|
cmp BYTES_W, #32 - (MAX_STRIDE << 4)
|
2020-12-17 18:55:16 +00:00
|
|
|
csel x16, x16, xzr, gt
|
2022-05-20 18:14:58 +00:00
|
|
|
cmp BYTES_W, #16 - (MAX_STRIDE << 4)
|
2020-12-17 18:55:16 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
adr_l x9, .Lcts_permute_table
|
|
|
|
add x9, x9, x13
|
2022-05-20 18:14:57 +00:00
|
|
|
ble .Lctrtail1x\xctr
|
2020-12-17 18:55:16 +00:00
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
ST5( ld1 {v5.16b}, [IN], x14 )
|
|
|
|
ld1 {v6.16b}, [IN], x15
|
|
|
|
ld1 {v7.16b}, [IN], x16
|
2020-12-17 18:55:16 +00:00
|
|
|
|
|
|
|
ST4( bl aes_encrypt_block4x )
|
|
|
|
ST5( bl aes_encrypt_block5x )
|
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
ld1 {v8.16b}, [IN], x13
|
|
|
|
ld1 {v9.16b}, [IN]
|
|
|
|
ld1 {v10.16b}, [x9]
|
2020-12-17 18:55:16 +00:00
|
|
|
|
|
|
|
ST4( eor v6.16b, v6.16b, v0.16b )
|
|
|
|
ST4( eor v7.16b, v7.16b, v1.16b )
|
|
|
|
ST4( tbl v3.16b, {v3.16b}, v10.16b )
|
|
|
|
ST4( eor v8.16b, v8.16b, v2.16b )
|
|
|
|
ST4( eor v9.16b, v9.16b, v3.16b )
|
|
|
|
|
|
|
|
ST5( eor v5.16b, v5.16b, v0.16b )
|
|
|
|
ST5( eor v6.16b, v6.16b, v1.16b )
|
|
|
|
ST5( tbl v4.16b, {v4.16b}, v10.16b )
|
|
|
|
ST5( eor v7.16b, v7.16b, v2.16b )
|
|
|
|
ST5( eor v8.16b, v8.16b, v3.16b )
|
|
|
|
ST5( eor v9.16b, v9.16b, v4.16b )
|
|
|
|
|
2022-05-20 18:14:58 +00:00
|
|
|
ST5( st1 {v5.16b}, [OUT], x14 )
|
|
|
|
st1 {v6.16b}, [OUT], x15
|
|
|
|
st1 {v7.16b}, [OUT], x16
|
|
|
|
add x13, x13, OUT
|
2020-12-17 18:55:16 +00:00
|
|
|
st1 {v9.16b}, [x13] // overlapping stores
|
2022-05-20 18:14:58 +00:00
|
|
|
st1 {v8.16b}, [OUT]
|
2022-05-20 18:14:57 +00:00
|
|
|
b .Lctrout\xctr
|
2017-01-17 13:46:29 +00:00
|
|
|
|
2022-05-20 18:14:57 +00:00
|
|
|
.Lctrtail1x\xctr:
|
2022-05-20 18:14:58 +00:00
|
|
|
/*
|
|
|
|
* Handle <= 16 bytes of plaintext
|
|
|
|
*
|
|
|
|
* This code always reads and writes 16 bytes. To avoid out of bounds
|
|
|
|
* accesses, XCTR and CTR modes must use a temporary buffer when
|
|
|
|
* encrypting/decrypting less than 16 bytes.
|
|
|
|
*
|
|
|
|
* This code is unusual in that it loads the input and stores the output
|
|
|
|
* relative to the end of the buffers rather than relative to the start.
|
|
|
|
* This causes unusual behaviour when encrypting/decrypting less than 16
|
|
|
|
* bytes; the end of the data is expected to be at the end of the
|
|
|
|
* temporary buffer rather than the start of the data being at the start
|
|
|
|
* of the temporary buffer.
|
|
|
|
*/
|
|
|
|
sub x8, x7, #16
|
|
|
|
csel x7, x7, x8, eq
|
|
|
|
add IN, IN, x7
|
|
|
|
add OUT, OUT, x7
|
|
|
|
ld1 {v5.16b}, [IN]
|
|
|
|
ld1 {v6.16b}, [OUT]
|
2020-12-17 18:55:16 +00:00
|
|
|
ST5( mov v3.16b, v4.16b )
|
2022-05-20 18:14:58 +00:00
|
|
|
encrypt_block v3, ROUNDS_W, KEY, x8, w7
|
|
|
|
ld1 {v10.16b-v11.16b}, [x9]
|
2022-01-27 09:52:11 +00:00
|
|
|
tbl v3.16b, {v3.16b}, v10.16b
|
|
|
|
sshr v11.16b, v11.16b, #7
|
2020-12-17 18:55:16 +00:00
|
|
|
eor v5.16b, v5.16b, v3.16b
|
2022-01-27 09:52:11 +00:00
|
|
|
bif v5.16b, v6.16b, v11.16b
|
2022-05-20 18:14:58 +00:00
|
|
|
st1 {v5.16b}, [OUT]
|
2022-05-20 18:14:57 +00:00
|
|
|
b .Lctrout\xctr
|
2022-05-20 18:14:58 +00:00
|
|
|
|
|
|
|
// Arguments
|
|
|
|
.unreq OUT
|
|
|
|
.unreq IN
|
|
|
|
.unreq KEY
|
|
|
|
.unreq ROUNDS_W
|
|
|
|
.unreq BYTES_W
|
|
|
|
.unreq IV
|
|
|
|
.unreq BYTE_CTR_W // XCTR only
|
|
|
|
// Intermediate values
|
|
|
|
.unreq CTR_W // XCTR only
|
|
|
|
.unreq CTR // XCTR only
|
|
|
|
.unreq IV_PART
|
|
|
|
.unreq BLOCKS
|
|
|
|
.unreq BLOCKS_W
|
2022-05-20 18:14:57 +00:00
|
|
|
.endm
|
|
|
|
|
|
|
|
/*
|
|
|
|
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
|
|
|
* int bytes, u8 ctr[])
|
2022-05-20 18:14:58 +00:00
|
|
|
*
|
|
|
|
* The input and output buffers must always be at least 16 bytes even if
|
|
|
|
* encrypting/decrypting less than 16 bytes. Otherwise out of bounds
|
|
|
|
* accesses will occur. The data to be encrypted/decrypted is expected
|
|
|
|
* to be at the end of this 16-byte temporary buffer rather than the
|
|
|
|
* start.
|
2022-05-20 18:14:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
AES_FUNC_START(aes_ctr_encrypt)
|
|
|
|
ctr_encrypt 0
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_ctr_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2022-05-20 18:14:57 +00:00
|
|
|
/*
|
|
|
|
* aes_xctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
|
|
|
* int bytes, u8 const iv[], int byte_ctr)
|
2022-05-20 18:14:58 +00:00
|
|
|
*
|
|
|
|
* The input and output buffers must always be at least 16 bytes even if
|
|
|
|
* encrypting/decrypting less than 16 bytes. Otherwise out of bounds
|
|
|
|
* accesses will occur. The data to be encrypted/decrypted is expected
|
|
|
|
* to be at the end of this 16-byte temporary buffer rather than the
|
|
|
|
* start.
|
2022-05-20 18:14:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
AES_FUNC_START(aes_xctr_encrypt)
|
|
|
|
ctr_encrypt 1
|
|
|
|
AES_FUNC_END(aes_xctr_encrypt)
|
|
|
|
|
2014-03-21 09:19:17 +00:00
|
|
|
|
|
|
|
/*
|
2019-09-03 16:43:33 +00:00
|
|
|
* aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
|
|
|
|
* int bytes, u8 const rk2[], u8 iv[], int first)
|
2014-03-21 09:19:17 +00:00
|
|
|
* aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds,
|
2019-09-03 16:43:33 +00:00
|
|
|
* int bytes, u8 const rk2[], u8 iv[], int first)
|
2014-03-21 09:19:17 +00:00
|
|
|
*/
|
|
|
|
|
2018-09-10 14:41:15 +00:00
|
|
|
.macro next_tweak, out, in, tmp
|
2014-03-21 09:19:17 +00:00
|
|
|
sshr \tmp\().2d, \in\().2d, #63
|
2018-09-10 14:41:15 +00:00
|
|
|
and \tmp\().16b, \tmp\().16b, xtsmask.16b
|
2014-03-21 09:19:17 +00:00
|
|
|
add \out\().2d, \in\().2d, \in\().2d
|
|
|
|
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
|
|
|
|
eor \out\().16b, \out\().16b, \tmp\().16b
|
|
|
|
.endm
|
|
|
|
|
2018-09-10 14:41:15 +00:00
|
|
|
.macro xts_load_mask, tmp
|
|
|
|
movi xtsmask.2s, #0x1
|
|
|
|
movi \tmp\().2s, #0x87
|
|
|
|
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
|
|
|
|
.endm
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_xts_encrypt)
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
2018-03-10 15:21:51 +00:00
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x6]
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_load_mask v8
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
cbz w7, .Lxtsencnotfirst
|
|
|
|
|
|
|
|
enc_prepare w3, x5, x8
|
2019-09-03 16:43:34 +00:00
|
|
|
xts_cts_skip_tw w7, .LxtsencNx
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
|
|
|
enc_switch_key w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsencNx
|
|
|
|
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
.Lxtsencnotfirst:
|
2018-09-10 14:41:13 +00:00
|
|
|
enc_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsencloopNx:
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsencNx:
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lxtsenc1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v5, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v6, v5, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v7, v6, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_encrypt_block4x
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v7.16b
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsencret
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_reload_mask v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsencloopNx
|
|
|
|
.Lxtsenc1x:
|
2019-09-03 16:43:33 +00:00
|
|
|
adds w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lxtsencout
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #16
|
|
|
|
bmi .LxtsencctsNx
|
2014-03-21 09:19:17 +00:00
|
|
|
.Lxtsencloop:
|
2019-09-03 16:43:33 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16
|
|
|
|
.Lxtsencctsout:
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
encrypt_block v0, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsencout
|
|
|
|
subs w4, w4, #16
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2019-09-03 16:43:33 +00:00
|
|
|
bmi .Lxtsenccts
|
|
|
|
st1 {v0.16b}, [x0], #16
|
2014-03-21 09:19:17 +00:00
|
|
|
b .Lxtsencloop
|
|
|
|
.Lxtsencout:
|
2019-09-03 16:43:33 +00:00
|
|
|
st1 {v0.16b}, [x0]
|
|
|
|
.Lxtsencret:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x6]
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
|
|
|
|
2019-09-03 16:43:33 +00:00
|
|
|
.LxtsencctsNx:
|
|
|
|
mov v0.16b, v3.16b
|
|
|
|
sub x0, x0, #16
|
|
|
|
.Lxtsenccts:
|
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
|
|
|
|
add x1, x1, w4, sxtw /* rewind input pointer */
|
|
|
|
add w4, w4, #16 /* # bytes in final block */
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
add x4, x0, x4 /* output address of final block */
|
|
|
|
|
|
|
|
ld1 {v1.16b}, [x1] /* load final block */
|
|
|
|
ld1 {v2.16b}, [x8]
|
|
|
|
ld1 {v3.16b}, [x9]
|
|
|
|
|
|
|
|
tbl v2.16b, {v0.16b}, v2.16b
|
|
|
|
tbx v0.16b, {v1.16b}, v3.16b
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
mov w4, wzr
|
|
|
|
b .Lxtsencctsout
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_xts_encrypt)
|
2014-03-21 09:19:17 +00:00
|
|
|
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_xts_decrypt)
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_push 0
|
2018-03-10 15:21:51 +00:00
|
|
|
|
2019-09-03 16:43:33 +00:00
|
|
|
/* subtract 16 bytes if we are doing CTS */
|
|
|
|
sub w8, w4, #0x10
|
|
|
|
tst w4, #0xf
|
|
|
|
csel w4, w4, w8, eq
|
|
|
|
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v4.16b}, [x6]
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_load_mask v8
|
2019-09-03 16:43:34 +00:00
|
|
|
xts_cts_skip_tw w7, .Lxtsdecskiptw
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
cbz w7, .Lxtsdecnotfirst
|
|
|
|
|
|
|
|
enc_prepare w3, x5, x8
|
|
|
|
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
2019-09-03 16:43:34 +00:00
|
|
|
.Lxtsdecskiptw:
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
dec_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsdecNx
|
|
|
|
|
crypto: arm64/aes-blk - move kernel mode neon en/disable into loop
When kernel mode NEON was first introduced on arm64, the preserve and
restore of the userland NEON state was completely unoptimized, and
involved saving all registers on each call to kernel_neon_begin(),
and restoring them on each call to kernel_neon_end(). For this reason,
the NEON crypto code that was introduced at the time keeps the NEON
enabled throughout the execution of the crypto API methods, which may
include calls back into the crypto API that could result in memory
allocation or other actions that we should avoid when running with
preemption disabled.
Since then, we have optimized the kernel mode NEON handling, which now
restores lazily (upon return to userland), and so the preserve action
is only costly the first time it is called after entering the kernel.
So let's put the kernel_neon_begin() and kernel_neon_end() calls around
the actual invocations of the NEON crypto code, and run the remainder of
the code with kernel mode NEON disabled (and preemption enabled)
Note that this requires some reshuffling of the registers in the asm
code, because the XTS routines can no longer rely on the registers to
retain their contents between invocations.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-03-10 15:21:48 +00:00
|
|
|
.Lxtsdecnotfirst:
|
2018-09-10 14:41:13 +00:00
|
|
|
dec_prepare w3, x2, x8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsdecloopNx:
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
.LxtsdecNx:
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
bmi .Lxtsdec1x
|
2018-09-10 14:41:13 +00:00
|
|
|
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v5, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v6, v5, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v7, v6, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
2018-03-10 15:21:51 +00:00
|
|
|
bl aes_decrypt_block4x
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v3.16b, v3.16b, v7.16b
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
|
|
|
eor v1.16b, v1.16b, v5.16b
|
|
|
|
eor v2.16b, v2.16b, v6.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b-v3.16b}, [x0], #64
|
2014-03-21 09:19:17 +00:00
|
|
|
mov v4.16b, v7.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
cbz w4, .Lxtsdecout
|
2018-10-08 11:16:59 +00:00
|
|
|
xts_reload_mask v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .LxtsdecloopNx
|
|
|
|
.Lxtsdec1x:
|
2019-09-03 16:43:33 +00:00
|
|
|
adds w4, w4, #64
|
2014-03-21 09:19:17 +00:00
|
|
|
beq .Lxtsdecout
|
2019-09-03 16:43:33 +00:00
|
|
|
subs w4, w4, #16
|
2014-03-21 09:19:17 +00:00
|
|
|
.Lxtsdecloop:
|
2019-09-03 16:43:33 +00:00
|
|
|
ld1 {v0.16b}, [x1], #16
|
|
|
|
bmi .Lxtsdeccts
|
|
|
|
.Lxtsdecctsout:
|
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
decrypt_block v0, w3, x2, x8, w7
|
2014-03-21 09:19:17 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v0.16b}, [x0], #16
|
2019-09-03 16:43:33 +00:00
|
|
|
cbz w4, .Lxtsdecout
|
|
|
|
subs w4, w4, #16
|
2018-09-10 14:41:15 +00:00
|
|
|
next_tweak v4, v4, v8
|
2014-03-21 09:19:17 +00:00
|
|
|
b .Lxtsdecloop
|
|
|
|
.Lxtsdecout:
|
2018-09-10 14:41:13 +00:00
|
|
|
st1 {v4.16b}, [x6]
|
2022-11-29 16:48:50 +00:00
|
|
|
frame_pop
|
2014-03-21 09:19:17 +00:00
|
|
|
ret
|
2019-09-03 16:43:33 +00:00
|
|
|
|
|
|
|
.Lxtsdeccts:
|
|
|
|
adr_l x8, .Lcts_permute_table
|
|
|
|
|
|
|
|
add x1, x1, w4, sxtw /* rewind input pointer */
|
|
|
|
add w4, w4, #16 /* # bytes in final block */
|
|
|
|
add x9, x8, #32
|
|
|
|
add x8, x8, x4
|
|
|
|
sub x9, x9, x4
|
|
|
|
add x4, x0, x4 /* output address of final block */
|
|
|
|
|
|
|
|
next_tweak v5, v4, v8
|
|
|
|
|
|
|
|
ld1 {v1.16b}, [x1] /* load final block */
|
|
|
|
ld1 {v2.16b}, [x8]
|
|
|
|
ld1 {v3.16b}, [x9]
|
|
|
|
|
|
|
|
eor v0.16b, v0.16b, v5.16b
|
|
|
|
decrypt_block v0, w3, x2, x8, w7
|
|
|
|
eor v0.16b, v0.16b, v5.16b
|
|
|
|
|
|
|
|
tbl v2.16b, {v0.16b}, v2.16b
|
|
|
|
tbx v0.16b, {v1.16b}, v3.16b
|
|
|
|
|
|
|
|
st1 {v2.16b}, [x4] /* overlapping stores */
|
|
|
|
mov w4, wzr
|
|
|
|
b .Lxtsdecctsout
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_xts_decrypt)
|
2017-02-03 14:49:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
|
|
|
|
* int blocks, u8 dg[], int enc_before, int enc_after)
|
|
|
|
*/
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_START(aes_mac_update)
|
2021-02-03 11:36:24 +00:00
|
|
|
ld1 {v0.16b}, [x4] /* get dg */
|
2017-02-03 14:49:37 +00:00
|
|
|
enc_prepare w2, x1, x7
|
2018-03-10 15:21:53 +00:00
|
|
|
cbz w5, .Lmacloop4x
|
2017-02-03 14:49:37 +00:00
|
|
|
|
2018-03-10 15:21:53 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
|
|
|
|
|
|
|
.Lmacloop4x:
|
2021-02-03 11:36:24 +00:00
|
|
|
subs w3, w3, #4
|
2018-03-10 15:21:53 +00:00
|
|
|
bmi .Lmac1x
|
2021-02-03 11:36:24 +00:00
|
|
|
ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v2.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v3.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2018-03-10 15:21:53 +00:00
|
|
|
eor v0.16b, v0.16b, v4.16b
|
2021-02-03 11:36:24 +00:00
|
|
|
cmp w3, wzr
|
|
|
|
csinv x5, x6, xzr, eq
|
2018-03-10 15:21:53 +00:00
|
|
|
cbz w5, .Lmacout
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
|
|
|
st1 {v0.16b}, [x4] /* return dg */
|
2021-03-02 09:01:12 +00:00
|
|
|
cond_yield .Lmacout, x7, x8
|
2018-03-10 15:21:53 +00:00
|
|
|
b .Lmacloop4x
|
|
|
|
.Lmac1x:
|
2021-02-03 11:36:24 +00:00
|
|
|
add w3, w3, #4
|
2017-02-03 14:49:37 +00:00
|
|
|
.Lmacloop:
|
2021-02-03 11:36:24 +00:00
|
|
|
cbz w3, .Lmacout
|
|
|
|
ld1 {v1.16b}, [x0], #16 /* get next pt block */
|
2017-02-03 14:49:37 +00:00
|
|
|
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
|
|
|
|
|
2021-02-03 11:36:24 +00:00
|
|
|
subs w3, w3, #1
|
|
|
|
csinv x5, x6, xzr, eq
|
2017-02-03 14:49:37 +00:00
|
|
|
cbz w5, .Lmacout
|
|
|
|
|
2018-04-30 16:18:24 +00:00
|
|
|
.Lmacenc:
|
2021-02-03 11:36:24 +00:00
|
|
|
encrypt_block v0, w2, x1, x7, w8
|
2017-02-03 14:49:37 +00:00
|
|
|
b .Lmacloop
|
|
|
|
|
|
|
|
.Lmacout:
|
2021-02-03 11:36:24 +00:00
|
|
|
st1 {v0.16b}, [x4] /* return dg */
|
|
|
|
mov w0, w3
|
2017-02-03 14:49:37 +00:00
|
|
|
ret
|
2020-02-18 19:58:26 +00:00
|
|
|
AES_FUNC_END(aes_mac_update)
|