summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Mair-Keimberger <m.mairkeimberger@gmail.com>2018-04-28 09:58:59 +0200
committerLars Wendler <polynomial-c@gentoo.org>2018-05-04 09:18:28 +0200
commit78d4a40a859636e46a150be8f53817faabb29744 (patch)
tree86785e5cd559fc2c151f527e08b314089d82b649 /dev-libs/openssl
parentmedia-libs/freetype: Workaround windows mis-detection (diff)
downloadgentoo-78d4a40a859636e46a150be8f53817faabb29744.tar.gz
gentoo-78d4a40a859636e46a150be8f53817faabb29744.tar.bz2
gentoo-78d4a40a859636e46a150be8f53817faabb29744.zip
dev-libs/openssl: remove unused patch
Closes: https://github.com/gentoo/gentoo/pull/8189
Diffstat (limited to 'dev-libs/openssl')
-rw-r--r--dev-libs/openssl/files/openssl-1.1.0g-CVE-2017-3738.patch77
1 files changed, 0 insertions, 77 deletions
diff --git a/dev-libs/openssl/files/openssl-1.1.0g-CVE-2017-3738.patch b/dev-libs/openssl/files/openssl-1.1.0g-CVE-2017-3738.patch
deleted file mode 100644
index 4b01feb8e873..000000000000
--- a/dev-libs/openssl/files/openssl-1.1.0g-CVE-2017-3738.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From e502cc86df9dafded1694fceb3228ee34d11c11a Mon Sep 17 00:00:00 2001
-From: Andy Polyakov <appro@openssl.org>
-Date: Fri, 24 Nov 2017 11:35:50 +0100
-Subject: [PATCH] bn/asm/rsaz-avx2.pl: fix digit correction bug in
- rsaz_1024_mul_avx2.
-
-Credit to OSS-Fuzz for finding this.
-
-CVE-2017-3738
-
-Reviewed-by: Rich Salz <rsalz@openssl.org>
----
- crypto/bn/asm/rsaz-avx2.pl | 15 +++++++--------
- 1 file changed, 7 insertions(+), 8 deletions(-)
-
-diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl
-index 0c1b236ef98..46d746b7d0e 100755
---- a/crypto/bn/asm/rsaz-avx2.pl
-+++ b/crypto/bn/asm/rsaz-avx2.pl
-@@ -246,7 +246,7 @@
- vmovdqu 32*8-128($ap), $ACC8
-
- lea 192(%rsp), $tp0 # 64+128=192
-- vpbroadcastq .Land_mask(%rip), $AND_MASK
-+ vmovdqu .Land_mask(%rip), $AND_MASK
- jmp .LOOP_GRANDE_SQR_1024
-
- .align 32
-@@ -1077,10 +1077,10 @@
- vpmuludq 32*6-128($np),$Yi,$TEMP1
- vpaddq $TEMP1,$ACC6,$ACC6
- vpmuludq 32*7-128($np),$Yi,$TEMP2
-- vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
-+ vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3
- vpaddq $TEMP2,$ACC7,$ACC7
- vpmuludq 32*8-128($np),$Yi,$TEMP0
-- vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
-+ vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3
- vpaddq $TEMP0,$ACC8,$ACC8
-
- mov %rbx, %rax
-@@ -1093,7 +1093,9 @@
- vmovdqu -8+32*2-128($ap),$TEMP2
-
- mov $r1, %rax
-+ vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3
- imull $n0, %eax
-+ vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3
- and \$0x1fffffff, %eax
-
- imulq 16-128($ap),%rbx
-@@ -1329,15 +1331,12 @@
- # But as we underutilize resources, it's possible to correct in
- # each iteration with marginal performance loss. But then, as
- # we do it in each iteration, we can correct less digits, and
--# avoid performance penalties completely. Also note that we
--# correct only three digits out of four. This works because
--# most significant digit is subjected to less additions.
-+# avoid performance penalties completely.
-
- $TEMP0 = $ACC9;
- $TEMP3 = $Bi;
- $TEMP4 = $Yi;
- $code.=<<___;
-- vpermq \$0, $AND_MASK, $AND_MASK
- vpaddq (%rsp), $TEMP1, $ACC0
-
- vpsrlq \$29, $ACC0, $TEMP1
-@@ -1770,7 +1769,7 @@
-
- .align 64
- .Land_mask:
-- .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
-+ .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
- .Lscatter_permd:
- .long 0,2,4,6,7,7,7,7
- .Lgather_permd: