1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
|
From e502cc86df9dafded1694fceb3228ee34d11c11a Mon Sep 17 00:00:00 2001
From: Andy Polyakov <appro@openssl.org>
Date: Fri, 24 Nov 2017 11:35:50 +0100
Subject: [PATCH] bn/asm/rsaz-avx2.pl: fix digit correction bug in
rsaz_1024_mul_avx2.
Credit to OSS-Fuzz for finding this.
CVE-2017-3738
Reviewed-by: Rich Salz <rsalz@openssl.org>
---
crypto/bn/asm/rsaz-avx2.pl | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl
index 0c1b236ef98..46d746b7d0e 100755
--- a/crypto/bn/asm/rsaz-avx2.pl
+++ b/crypto/bn/asm/rsaz-avx2.pl
@@ -246,7 +246,7 @@
vmovdqu 32*8-128($ap), $ACC8
lea 192(%rsp), $tp0 # 64+128=192
- vpbroadcastq .Land_mask(%rip), $AND_MASK
+ vmovdqu .Land_mask(%rip), $AND_MASK
jmp .LOOP_GRANDE_SQR_1024
.align 32
@@ -1077,10 +1077,10 @@
vpmuludq 32*6-128($np),$Yi,$TEMP1
vpaddq $TEMP1,$ACC6,$ACC6
vpmuludq 32*7-128($np),$Yi,$TEMP2
- vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
+ vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3
vpaddq $TEMP2,$ACC7,$ACC7
vpmuludq 32*8-128($np),$Yi,$TEMP0
- vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
+ vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3
vpaddq $TEMP0,$ACC8,$ACC8
mov %rbx, %rax
@@ -1093,7 +1093,9 @@
vmovdqu -8+32*2-128($ap),$TEMP2
mov $r1, %rax
+ vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3
imull $n0, %eax
+ vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3
and \$0x1fffffff, %eax
imulq 16-128($ap),%rbx
@@ -1329,15 +1331,12 @@
# But as we underutilize resources, it's possible to correct in
# each iteration with marginal performance loss. But then, as
# we do it in each iteration, we can correct less digits, and
-# avoid performance penalties completely. Also note that we
-# correct only three digits out of four. This works because
-# most significant digit is subjected to less additions.
+# avoid performance penalties completely.
$TEMP0 = $ACC9;
$TEMP3 = $Bi;
$TEMP4 = $Yi;
$code.=<<___;
- vpermq \$0, $AND_MASK, $AND_MASK
vpaddq (%rsp), $TEMP1, $ACC0
vpsrlq \$29, $ACC0, $TEMP1
@@ -1770,7 +1769,7 @@
.align 64
.Land_mask:
- .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
+ .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
.Lscatter_permd:
.long 0,2,4,6,7,7,7,7
.Lgather_permd:
|