[PATCH 0/1][Xenial] Restore reverted commit "crypto: arm64/sha - avoid non-standard inline asm tricks"

Previous Topic Next Topic
 
classic Classic list List threaded Threaded
5 messages Options
Reply | Threaded
Open this post in threaded view
|

[PATCH 0/1][Xenial] Restore reverted commit "crypto: arm64/sha - avoid non-standard inline asm tricks"

dann frazier-4
This reapplies a patch that we reverted due to a regression (LP: #1905336).
A proper fix has since been applied via stable:

commit 9e1c3df42e03 ("arm64: assembler: make adr_l work in modules under KASLR")

So we can now safely restore this commit.

Ard Biesheuvel (1):
  crypto: arm64/sha - avoid non-standard inline asm tricks

 arch/arm64/crypto/sha1-ce-core.S |  6 ++++--
 arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
 arch/arm64/crypto/sha2-ce-core.S |  6 ++++--
 arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
 4 files changed, 16 insertions(+), 20 deletions(-)

--
2.30.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

[PATCH 1/1][Xenial] crypto: arm64/sha - avoid non-standard inline asm tricks

dann frazier-4
From: Ard Biesheuvel <[hidden email]>

BugLink: https://bugs.launchpad.net/bugs/1907489

commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.

Replace the inline asm which exports struct offsets as ELF symbols
with proper const variables exposing the same values. This works
around an issue with Clang which does not interpret the "i" (or "I")
constraints in the same way as GCC.

Signed-off-by: Ard Biesheuvel <[hidden email]>
Tested-by: Matthias Kaehlcke <[hidden email]>
Signed-off-by: Herbert Xu <[hidden email]>
Signed-off-by: Nathan Chancellor <[hidden email]>
Signed-off-by: Greg Kroah-Hartman <[hidden email]>
Signed-off-by: Juerg Haefliger <[hidden email]>
Signed-off-by: Kleber Sacilotto de Souza <[hidden email]>
Signed-off-by: dann frazier <[hidden email]>
---
 arch/arm64/crypto/sha1-ce-core.S |  6 ++++--
 arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
 arch/arm64/crypto/sha2-ce-core.S |  6 ++++--
 arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
 4 files changed, 16 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index c98e7e849f06..8550408735a0 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
  ldr dgb, [x0, #16]
 
  /* load sha1_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
+ ldr_l w4, sha1_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
 
  /* load input */
 0: ld1 {v8.4s-v11.4s}, [x1], #64
@@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
  * the padding is handled by the C code in that case.
  */
  cbz x4, 3f
- ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
+ ldr_l w4, sha1_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
  movi v9.2d, #0
  mov x8, #0x80000000
  movi v10.2d, #0
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 01e48b8970b1..1b7b4684c35b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -17,9 +17,6 @@
 #include <linux/crypto.h>
 #include <linux/module.h>
 
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
 MODULE_AUTHOR("Ard Biesheuvel <[hidden email]>");
 MODULE_LICENSE("GPL v2");
@@ -32,6 +29,9 @@ struct sha1_ce_state {
 asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
   int blocks);
 
+const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
+const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
+
 static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
   unsigned int len)
 {
@@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
  struct sha1_ce_state *sctx = shash_desc_ctx(desc);
  bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
 
- ASM_EXPORT(sha1_ce_offsetof_count,
-   offsetof(struct sha1_ce_state, sst.count));
- ASM_EXPORT(sha1_ce_offsetof_finalize,
-   offsetof(struct sha1_ce_state, finalize));
-
  /*
  * Allow the asm code to perform the finalization if there is no
  * partial data and the input is a round multiple of the block size.
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 01cfee066837..679c6c002f4f 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
  ld1 {dgav.4s, dgbv.4s}, [x0]
 
  /* load sha256_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
+ ldr_l w4, sha256_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
 
  /* load input */
 0: ld1 {v16.4s-v19.4s}, [x1], #64
@@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
  * the padding is handled by the C code in that case.
  */
  cbz x4, 3f
- ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
+ ldr_l w4, sha256_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
  movi v17.2d, #0
  mov x8, #0x80000000
  movi v18.2d, #0
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 7a7f95b94869..356ca9397a86 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -17,9 +17,6 @@
 #include <linux/crypto.h>
 #include <linux/module.h>
 
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
 MODULE_AUTHOR("Ard Biesheuvel <[hidden email]>");
 MODULE_LICENSE("GPL v2");
@@ -32,6 +29,11 @@ struct sha256_ce_state {
 asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
   int blocks);
 
+const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
+      sst.count);
+const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
+ finalize);
+
 static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
     unsigned int len)
 {
@@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
  struct sha256_ce_state *sctx = shash_desc_ctx(desc);
  bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
 
- ASM_EXPORT(sha256_ce_offsetof_count,
-   offsetof(struct sha256_ce_state, sst.count));
- ASM_EXPORT(sha256_ce_offsetof_finalize,
-   offsetof(struct sha256_ce_state, finalize));
-
  /*
  * Allow the asm code to perform the finalization if there is no
  * partial data and the input is a round multiple of the block size.
--
2.30.0


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK: [PATCH 0/1][Xenial] Restore reverted commit "crypto: arm64/sha - avoid non-standard inline asm tricks"

Kamal Mostafa-2
In reply to this post by dann frazier-4
LGTM, thanks for the follow-up on this Dann.

Acked-by: Kamal Mostafa <[hidden email]>

 -Kamal

On Fri, Jan 15, 2021 at 04:32:30PM -0700, dann frazier wrote:

> This reapplies a patch that we reverted due to a regression (LP: #1905336).
> A proper fix has since been applied via stable:
>
> commit 9e1c3df42e03 ("arm64: assembler: make adr_l work in modules under KASLR")
>
> So we can now safely restore this commit.
>
> Ard Biesheuvel (1):
>   crypto: arm64/sha - avoid non-standard inline asm tricks
>
>  arch/arm64/crypto/sha1-ce-core.S |  6 ++++--
>  arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
>  arch/arm64/crypto/sha2-ce-core.S |  6 ++++--
>  arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
>  4 files changed, 16 insertions(+), 20 deletions(-)
>
> --
> 2.30.0
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

ACK: [PATCH 1/1][Xenial] crypto: arm64/sha - avoid non-standard inline asm tricks

Kleber Souza
In reply to this post by dann frazier-4
On 16.01.21 00:32, dann frazier wrote:

> From: Ard Biesheuvel <[hidden email]>
>
> BugLink: https://bugs.launchpad.net/bugs/1907489
>
> commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.
>
> Replace the inline asm which exports struct offsets as ELF symbols
> with proper const variables exposing the same values. This works
> around an issue with Clang which does not interpret the "i" (or "I")
> constraints in the same way as GCC.
>
> Signed-off-by: Ard Biesheuvel <[hidden email]>
> Tested-by: Matthias Kaehlcke <[hidden email]>
> Signed-off-by: Herbert Xu <[hidden email]>
> Signed-off-by: Nathan Chancellor <[hidden email]>
> Signed-off-by: Greg Kroah-Hartman <[hidden email]>
> Signed-off-by: Juerg Haefliger <[hidden email]>
> Signed-off-by: Kleber Sacilotto de Souza <[hidden email]>
> Signed-off-by: dann frazier <[hidden email]>

Acked-by: Kleber Sacilotto de Souza <[hidden email]>

> ---
>   arch/arm64/crypto/sha1-ce-core.S |  6 ++++--
>   arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
>   arch/arm64/crypto/sha2-ce-core.S |  6 ++++--
>   arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
>   4 files changed, 16 insertions(+), 20 deletions(-)
>
> diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
> index c98e7e849f06..8550408735a0 100644
> --- a/arch/arm64/crypto/sha1-ce-core.S
> +++ b/arch/arm64/crypto/sha1-ce-core.S
> @@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
>   ldr dgb, [x0, #16]
>  
>   /* load sha1_ce_state::finalize */
> - ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
> + ldr_l w4, sha1_ce_offsetof_finalize, x4
> + ldr w4, [x0, x4]
>  
>   /* load input */
>   0: ld1 {v8.4s-v11.4s}, [x1], #64
> @@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
>   * the padding is handled by the C code in that case.
>   */
>   cbz x4, 3f
> - ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
> + ldr_l w4, sha1_ce_offsetof_count, x4
> + ldr x4, [x0, x4]
>   movi v9.2d, #0
>   mov x8, #0x80000000
>   movi v10.2d, #0
> diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
> index 01e48b8970b1..1b7b4684c35b 100644
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -17,9 +17,6 @@
>   #include <linux/crypto.h>
>   #include <linux/module.h>
>  
> -#define ASM_EXPORT(sym, val) \
> - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> -
>   MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
>   MODULE_AUTHOR("Ard Biesheuvel <[hidden email]>");
>   MODULE_LICENSE("GPL v2");
> @@ -32,6 +29,9 @@ struct sha1_ce_state {
>   asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
>    int blocks);
>  
> +const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> +const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
> +
>   static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
>    unsigned int len)
>   {
> @@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
>   struct sha1_ce_state *sctx = shash_desc_ctx(desc);
>   bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
>  
> - ASM_EXPORT(sha1_ce_offsetof_count,
> -   offsetof(struct sha1_ce_state, sst.count));
> - ASM_EXPORT(sha1_ce_offsetof_finalize,
> -   offsetof(struct sha1_ce_state, finalize));
> -
>   /*
>   * Allow the asm code to perform the finalization if there is no
>   * partial data and the input is a round multiple of the block size.
> diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
> index 01cfee066837..679c6c002f4f 100644
> --- a/arch/arm64/crypto/sha2-ce-core.S
> +++ b/arch/arm64/crypto/sha2-ce-core.S
> @@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
>   ld1 {dgav.4s, dgbv.4s}, [x0]
>  
>   /* load sha256_ce_state::finalize */
> - ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
> + ldr_l w4, sha256_ce_offsetof_finalize, x4
> + ldr w4, [x0, x4]
>  
>   /* load input */
>   0: ld1 {v16.4s-v19.4s}, [x1], #64
> @@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
>   * the padding is handled by the C code in that case.
>   */
>   cbz x4, 3f
> - ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
> + ldr_l w4, sha256_ce_offsetof_count, x4
> + ldr x4, [x0, x4]
>   movi v17.2d, #0
>   mov x8, #0x80000000
>   movi v18.2d, #0
> diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
> index 7a7f95b94869..356ca9397a86 100644
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -17,9 +17,6 @@
>   #include <linux/crypto.h>
>   #include <linux/module.h>
>  
> -#define ASM_EXPORT(sym, val) \
> - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> -
>   MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
>   MODULE_AUTHOR("Ard Biesheuvel <[hidden email]>");
>   MODULE_LICENSE("GPL v2");
> @@ -32,6 +29,11 @@ struct sha256_ce_state {
>   asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
>    int blocks);
>  
> +const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> +      sst.count);
> +const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> + finalize);
> +
>   static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
>      unsigned int len)
>   {
> @@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
>   struct sha256_ce_state *sctx = shash_desc_ctx(desc);
>   bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
>  
> - ASM_EXPORT(sha256_ce_offsetof_count,
> -   offsetof(struct sha256_ce_state, sst.count));
> - ASM_EXPORT(sha256_ce_offsetof_finalize,
> -   offsetof(struct sha256_ce_state, finalize));
> -
>   /*
>   * Allow the asm code to perform the finalization if there is no
>   * partial data and the input is a round multiple of the block size.
>


--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team
Reply | Threaded
Open this post in threaded view
|

APPLIED: [PATCH 0/1][Xenial] Restore reverted commit "crypto: arm64/sha - avoid non-standard inline asm tricks"

Kelsey Skunberg
In reply to this post by dann frazier-4
Applied to Xenial/master-next. thank you!

-Kelsey

On 2021-01-15 16:32:30 , dann frazier wrote:

> This reapplies a patch that we reverted due to a regression (LP: #1905336).
> A proper fix has since been applied via stable:
>
> commit 9e1c3df42e03 ("arm64: assembler: make adr_l work in modules under KASLR")
>
> So we can now safely restore this commit.
>
> Ard Biesheuvel (1):
>   crypto: arm64/sha - avoid non-standard inline asm tricks
>
>  arch/arm64/crypto/sha1-ce-core.S |  6 ++++--
>  arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
>  arch/arm64/crypto/sha2-ce-core.S |  6 ++++--
>  arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
>  4 files changed, 16 insertions(+), 20 deletions(-)
>
> --
> 2.30.0
>
>
> --
> kernel-team mailing list
> [hidden email]
> https://lists.ubuntu.com/mailman/listinfo/kernel-team

--
kernel-team mailing list
[hidden email]
https://lists.ubuntu.com/mailman/listinfo/kernel-team