diff --git a/0002-arm-CVE-2020-6096-fix-memcpy-and-memmove-for-negativ.patch b/0002-arm-CVE-2020-6096-fix-memcpy-and-memmove-for-negativ.patch new file mode 100644 index 0000000..1f92b6c --- /dev/null +++ b/0002-arm-CVE-2020-6096-fix-memcpy-and-memmove-for-negativ.patch @@ -0,0 +1,189 @@ +From 79a4fa341b8a89cb03f84564fd72abaa1a2db394 Mon Sep 17 00:00:00 2001 +From: Evgeny Eremin +Date: Wed, 8 Jul 2020 14:18:19 +0200 +Subject: [PATCH] arm: CVE-2020-6096: fix memcpy and memmove for negative + length [BZ #25620] + +Unsigned branch instructions could be used for r2 to fix the wrong +behavior when a negative length is passed to memcpy and memmove. +This commit fixes the generic arm implementation of memcpy amd memmove. +--- + sysdeps/arm/memcpy.S | 24 ++++++++++-------------- + sysdeps/arm/memmove.S | 24 ++++++++++-------------- + 2 files changed, 20 insertions(+), 28 deletions(-) + +diff --git a/sysdeps/arm/memcpy.S b/sysdeps/arm/memcpy.S +index 510e8adaf2..bcfbc51d99 100644 +--- a/sysdeps/arm/memcpy.S ++++ b/sysdeps/arm/memcpy.S +@@ -68,7 +68,7 @@ ENTRY(memcpy) + cfi_remember_state + + subs r2, r2, #4 +- blt 8f ++ blo 8f + ands ip, r0, #3 + PLD( pld [r1, #0] ) + bne 9f +@@ -82,7 +82,7 @@ ENTRY(memcpy) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) +- blt 5f ++ blo 5f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb r3, ip, #32 ) +@@ -98,9 +98,9 @@ ENTRY(memcpy) + #endif + + PLD( pld [r1, #0] ) +-2: PLD( subs r2, r2, #96 ) ++2: PLD( cmp r2, #96 ) + PLD( pld [r1, #28] ) +- PLD( blt 4f ) ++ PLD( blo 4f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +@@ -108,9 +108,7 @@ ENTRY(memcpy) + 4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr} + subs r2, r2, #32 + stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr} +- bge 3b +- PLD( cmn r2, #96 ) +- PLD( bge 4b ) ++ bhs 3b + + 5: ands ip, r2, #28 + rsb ip, ip, #32 +@@ -222,7 +220,7 @@ ENTRY(memcpy) + strbge r4, [r0], #1 + subs r2, r2, ip + strb lr, [r0], #1 +- blt 8b ++ blo 8b + ands ip, r1, #3 + beq 1b + +@@ -236,7 +234,7 @@ ENTRY(memcpy) + .macro forward_copy_shift pull push + + subs r2, r2, #28 +- blt 14f ++ blo 14f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb ip, ip, #32 ) +@@ -253,9 +251,9 @@ ENTRY(memcpy) + cfi_rel_offset (r10, 16) + + PLD( pld [r1, #0] ) +- PLD( subs r2, r2, #96 ) ++ PLD( cmp r2, #96 ) + PLD( pld [r1, #28] ) +- PLD( blt 13f ) ++ PLD( blo 13f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +@@ -280,9 +278,7 @@ ENTRY(memcpy) + mov ip, ip, PULL #\pull + orr ip, ip, lr, PUSH #\push + stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip} +- bge 12b +- PLD( cmn r2, #96 ) +- PLD( bge 13b ) ++ bhs 12b + + pop {r5 - r8, r10} + cfi_adjust_cfa_offset (-20) +diff --git a/sysdeps/arm/memmove.S b/sysdeps/arm/memmove.S +index 954037ef3a..0d07b76ee6 100644 +--- a/sysdeps/arm/memmove.S ++++ b/sysdeps/arm/memmove.S +@@ -85,7 +85,7 @@ ENTRY(memmove) + add r1, r1, r2 + add r0, r0, r2 + subs r2, r2, #4 +- blt 8f ++ blo 8f + ands ip, r0, #3 + PLD( pld [r1, #-4] ) + bne 9f +@@ -99,7 +99,7 @@ ENTRY(memmove) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) +- blt 5f ++ blo 5f + + CALGN( ands ip, r1, #31 ) + CALGN( sbcsne r4, ip, r2 ) @ C is always set here +@@ -114,9 +114,9 @@ ENTRY(memmove) + #endif + + PLD( pld [r1, #-4] ) +-2: PLD( subs r2, r2, #96 ) ++2: PLD( cmp r2, #96 ) + PLD( pld [r1, #-32] ) +- PLD( blt 4f ) ++ PLD( blo 4f ) + PLD( pld [r1, #-64] ) + PLD( pld [r1, #-96] ) + +@@ -124,9 +124,7 @@ ENTRY(memmove) + 4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr} + subs r2, r2, #32 + stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr} +- bge 3b +- PLD( cmn r2, #96 ) +- PLD( bge 4b ) ++ bhs 3b + + 5: ands ip, r2, #28 + rsb ip, ip, #32 +@@ -237,7 +235,7 @@ ENTRY(memmove) + strbge r4, [r0, #-1]! + subs r2, r2, ip + strb lr, [r0, #-1]! +- blt 8b ++ blo 8b + ands ip, r1, #3 + beq 1b + +@@ -251,7 +249,7 @@ ENTRY(memmove) + .macro backward_copy_shift push pull + + subs r2, r2, #28 +- blt 14f ++ blo 14f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb ip, ip, #32 ) +@@ -268,9 +266,9 @@ ENTRY(memmove) + cfi_rel_offset (r10, 16) + + PLD( pld [r1, #-4] ) +- PLD( subs r2, r2, #96 ) ++ PLD( cmp r2, #96 ) + PLD( pld [r1, #-32] ) +- PLD( blt 13f ) ++ PLD( blo 13f ) + PLD( pld [r1, #-64] ) + PLD( pld [r1, #-96] ) + +@@ -295,9 +293,7 @@ ENTRY(memmove) + mov r4, r4, PUSH #\push + orr r4, r4, r3, PULL #\pull + stmdb r0!, {r4 - r8, r10, ip, lr} +- bge 12b +- PLD( cmn r2, #96 ) +- PLD( bge 13b ) ++ bhs 12b + + pop {r5 - r8, r10} + cfi_adjust_cfa_offset (-20) +-- +2.17.1 + diff --git a/0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch b/0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch new file mode 100644 index 0000000..5f99964 --- /dev/null +++ b/0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch @@ -0,0 +1,107 @@ +From beea361050728138b82c57dda0c4810402d342b9 Mon Sep 17 00:00:00 2001 +From: Alexander Anisimov +Date: Wed, 8 Jul 2020 14:18:31 +0200 +Subject: [PATCH] arm: CVE-2020-6096: Fix multiarch memcpy for negative + length [BZ #25620] + +Unsigned branch instructions could be used for r2 to fix the wrong +behavior when a negative length is passed to memcpy. +This commit fixes the armv7 version. +--- + sysdeps/arm/armv7/multiarch/memcpy_impl.S | 22 +++++++++++----------- + 1 file changed, 11 insertions(+), 11 deletions(-) + +diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S +index bf4ac7077f..379bb56fc9 100644 +--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S ++++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S +@@ -268,7 +268,7 @@ ENTRY(memcpy) + + mov dst, dstin /* Preserve dstin, we need to return it. */ + cmp count, #64 +- bge .Lcpy_not_short ++ bhs .Lcpy_not_short + /* Deal with small copies quickly by dropping straight into the + exit block. */ + +@@ -351,10 +351,10 @@ ENTRY(memcpy) + + 1: + subs tmp2, count, #64 /* Use tmp2 for count. */ +- blt .Ltail63aligned ++ blo .Ltail63aligned + + cmp tmp2, #512 +- bge .Lcpy_body_long ++ bhs .Lcpy_body_long + + .Lcpy_body_medium: /* Count in tmp2. */ + #ifdef USE_VFP +@@ -378,7 +378,7 @@ ENTRY(memcpy) + add src, src, #64 + vstr d1, [dst, #56] + add dst, dst, #64 +- bge 1b ++ bhs 1b + tst tmp2, #0x3f + beq .Ldone + +@@ -412,7 +412,7 @@ ENTRY(memcpy) + ldrd A_l, A_h, [src, #64]! + strd A_l, A_h, [dst, #64]! + subs tmp2, tmp2, #64 +- bge 1b ++ bhs 1b + tst tmp2, #0x3f + bne 1f + ldr tmp2,[sp], #FRAME_SIZE +@@ -482,7 +482,7 @@ ENTRY(memcpy) + add src, src, #32 + + subs tmp2, tmp2, #prefetch_lines * 64 * 2 +- blt 2f ++ blo 2f + 1: + cpy_line_vfp d3, 0 + cpy_line_vfp d4, 64 +@@ -494,7 +494,7 @@ ENTRY(memcpy) + add dst, dst, #2 * 64 + add src, src, #2 * 64 + subs tmp2, tmp2, #prefetch_lines * 64 +- bge 1b ++ bhs 1b + + 2: + cpy_tail_vfp d3, 0 +@@ -615,8 +615,8 @@ ENTRY(memcpy) + 1: + pld [src, #(3 * 64)] + subs count, count, #64 +- ldrmi tmp2, [sp], #FRAME_SIZE +- bmi .Ltail63unaligned ++ ldrlo tmp2, [sp], #FRAME_SIZE ++ blo .Ltail63unaligned + pld [src, #(4 * 64)] + + #ifdef USE_NEON +@@ -633,7 +633,7 @@ ENTRY(memcpy) + neon_load_multi d0-d3, src + neon_load_multi d4-d7, src + subs count, count, #64 +- bmi 2f ++ blo 2f + 1: + pld [src, #(4 * 64)] + neon_store_multi d0-d3, dst +@@ -641,7 +641,7 @@ ENTRY(memcpy) + neon_store_multi d4-d7, dst + neon_load_multi d4-d7, src + subs count, count, #64 +- bpl 1b ++ bhs 1b + 2: + neon_store_multi d0-d3, dst + neon_store_multi d4-d7, dst +-- +2.17.1 + diff --git a/glibc.changes b/glibc.changes index 647daad..b89ac3f 100644 --- a/glibc.changes +++ b/glibc.changes @@ -1,3 +1,6 @@ +* Wed Jul 22 2020 Evgeny Eremin - 2.30+git4 +- Fix CVE-2020-6096. JB#49988 + * Thu Feb 06 2020 Niels Breet - 2.30+git3 - Use global RPM_OPT_FLAGS for arm builds. Contributes to JB#48210 diff --git a/glibc.spec b/glibc.spec index a039ba4..4aeda2c 100644 --- a/glibc.spec +++ b/glibc.spec @@ -5,7 +5,7 @@ Name: glibc Summary: GNU C library shared libraries -Version: 2.30+git3 +Version: 2.30+git4 Release: 0 License: LGPLv2+ and LGPLv2+ with exceptions and GPLv2+ URL: http://www.gnu.org/software/libc/ @@ -24,6 +24,8 @@ Patch10: eglibc-2.15-fix-neon-libdl.patch Patch11: eglibc-2.19-shlib-make.patch Patch12: glibc-2.27-bits.patch Patch13: 0001-Revert-elf-Refuse-to-dlopen-PIE-objects-BZ-24323.patch +Patch14: 0002-arm-CVE-2020-6096-fix-memcpy-and-memmove-for-negativ.patch +Patch15: 0003-arm-CVE-2020-6096-Fix-multiarch-memcpy-for-negative-.patch Provides: ldconfig # The dynamic linker supports DT_GNU_HASH @@ -223,6 +225,8 @@ cd %{glibcsrcdir} %patch11 -p1 %patch12 -p1 %patch13 -p1 +%patch14 -p1 +%patch15 -p1 %build GCC=gcc