summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2013-12-17 11:36:05 -0500
committerAnthony G. Basile <blueness@gentoo.org>2013-12-17 11:36:05 -0500
commit7d1698e4223dbd62e75ed988a5b504cbb7ce8581 (patch)
tree320076200597f19cb32d90f7d8ab9851021e72fd
parentGrsec/PaX: 3.0-{3.2.53,3.12.5}-201312132204 (diff)
downloadhardened-patchset-7d1698e4223dbd62e75ed988a5b504cbb7ce8581.tar.gz
hardened-patchset-7d1698e4223dbd62e75ed988a5b504cbb7ce8581.tar.bz2
hardened-patchset-7d1698e4223dbd62e75ed988a5b504cbb7ce8581.zip
Grsec/PaX: 3.0-{3.2.53,3.12.5}-20131215121220131215
-rw-r--r--3.12.5/0000_README2
-rw-r--r--3.12.5/4420_grsecurity-3.0-3.12.5-201312151212.patch (renamed from 3.12.5/4420_grsecurity-3.0-3.12.5-201312132204.patch)46
-rw-r--r--3.2.53/0000_README2
-rw-r--r--3.2.53/4420_grsecurity-3.0-3.2.53-201312151209.patch (renamed from 3.2.53/4420_grsecurity-3.0-3.2.53-201312132200.patch)1862
4 files changed, 1252 insertions, 660 deletions
diff --git a/3.12.5/0000_README b/3.12.5/0000_README
index 374fa29..74b7ff1 100644
--- a/3.12.5/0000_README
+++ b/3.12.5/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.12.5-201312132204.patch
+Patch: 4420_grsecurity-3.0-3.12.5-201312151212.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.12.5/4420_grsecurity-3.0-3.12.5-201312132204.patch b/3.12.5/4420_grsecurity-3.0-3.12.5-201312151212.patch
index 013a53f..81f0265 100644
--- a/3.12.5/4420_grsecurity-3.0-3.12.5-201312132204.patch
+++ b/3.12.5/4420_grsecurity-3.0-3.12.5-201312151212.patch
@@ -87982,7 +87982,7 @@ index ae4846f..b0acebe 100644
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
diff --git a/mm/fremap.c b/mm/fremap.c
-index 5bff081..00bd91e 100644
+index 5bff081..bfa6e93 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
@@ -87997,15 +87997,36 @@ index 5bff081..00bd91e 100644
/*
* Make sure the vma is shared, that it supports prefaulting,
* and that the remapped range is valid and fully within
-@@ -218,6 +223,8 @@ get_write_lock:
+@@ -208,9 +213,10 @@ get_write_lock:
+ if (mapping_cap_account_dirty(mapping)) {
+ unsigned long addr;
+ struct file *file = get_file(vma->vm_file);
++ /* mmap_region may free vma; grab the info now */
++ vm_flags = ACCESS_ONCE(vma->vm_flags);
+
+- addr = mmap_region(file, start, size,
+- vma->vm_flags, pgoff);
++ addr = mmap_region(file, start, size, vm_flags, pgoff);
+ fput(file);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+@@ -218,7 +224,7 @@ get_write_lock:
BUG_ON(addr != start);
err = 0;
}
-+ vm_flags = vma->vm_flags;
-+ vma = NULL;
- goto out;
+- goto out;
++ goto out_freed;
}
mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+@@ -253,6 +259,7 @@ get_write_lock:
+ out:
+ if (vma)
+ vm_flags = vma->vm_flags;
++out_freed:
+ if (likely(!has_write_lock))
+ up_read(&mm->mmap_sem);
+ else
diff --git a/mm/highmem.c b/mm/highmem.c
index b32b70c..e512eb0 100644
--- a/mm/highmem.c
@@ -110829,10 +110850,10 @@ index 0000000..7dad2cd
+nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..c1967f6
+index 0000000..5515dcb
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,3922 @@
+@@ -0,0 +1,3927 @@
+/*
+ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -110962,7 +110983,7 @@ index 0000000..c1967f6
+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20131211beta",
++ .version = "20131214beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -112176,11 +112197,16 @@ index 0000000..c1967f6
+
+ cast_rhs_type = TREE_TYPE(cast_rhs);
+ type_max_type = TREE_TYPE(type_max);
-+ type_min_type = TREE_TYPE(type_min);
+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+
+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
diff --git a/3.2.53/0000_README b/3.2.53/0000_README
index 09b1794..745fa24 100644
--- a/3.2.53/0000_README
+++ b/3.2.53/0000_README
@@ -130,7 +130,7 @@ Patch: 1052_linux-3.2.53.patch
From: http://www.kernel.org
Desc: Linux 3.2.53
-Patch: 4420_grsecurity-3.0-3.2.53-201312132200.patch
+Patch: 4420_grsecurity-3.0-3.2.53-201312151209.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.53/4420_grsecurity-3.0-3.2.53-201312132200.patch b/3.2.53/4420_grsecurity-3.0-3.2.53-201312151209.patch
index e78690c..5317515 100644
--- a/3.2.53/4420_grsecurity-3.0-3.2.53-201312132200.patch
+++ b/3.2.53/4420_grsecurity-3.0-3.2.53-201312151209.patch
@@ -10058,7 +10058,7 @@ index 43eda28..5ab5fdb 100644
unsigned int v;
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 5b577d5..3c1fed4 100644
+index 5b577d5..eb7f25e 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -8,6 +8,8 @@
@@ -10074,13 +10074,13 @@ index 5b577d5..3c1fed4 100644
je B192; \
leaq 32(r9),r9;
-+#define ret pax_force_retaddr 0, 1; ret
++#define ret pax_force_retaddr; ret
+
#define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
movq r1,r2; \
movq r3,r4; \
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 3470624..201259d 100644
+index 3470624..9b476a3 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -31,6 +31,7 @@
@@ -10091,21 +10091,242 @@ index 3470624..201259d 100644
#ifdef __x86_64__
.data
-@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
+@@ -199,7 +200,7 @@ enc: .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -208,8 +209,8 @@ enc: .octa 0x2
+ .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -217,15 +218,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -437,7 +438,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -446,8 +447,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -455,15 +456,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ movdqa SHUF_MASK(%rip), %xmm14
+@@ -1264,7 +1265,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+ *****************************************************************************/
+
+ ENTRY(aesni_gcm_dec)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1274,8 +1275,8 @@ ENTRY(aesni_gcm_dec)
+ */
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13 # %xmm13 = HashKey
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13 # %xmm13 = HashKey
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1303,10 +1304,10 @@ ENTRY(aesni_gcm_dec)
+ movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
+ mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+- mov %r13, %r12
+- and $(3<<4), %r12
++ mov %r13, %r15
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_decrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_decrypt
+ je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1356,16 +1357,16 @@ _zero_cipher_left_decrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1394,9 +1395,9 @@ _less_than_8_bytes_left_decrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+- mov arg8, %r12 # %r13 = aadLen (number of bytes)
+- shl $3, %r12 # convert into number of bits
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r13 = aadLen (number of bytes)
++ shl $3, %r15 # convert into number of bits
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1435,8 +1436,10 @@ _return_T_done_decrypt:
+ mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
-+ pax_force_retaddr 0, 1
+- pop %r12
++ pop %r15
++ pax_force_retaddr
ret
+ENDPROC(aesni_gcm_dec)
/*****************************************************************************
-@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
+@@ -1523,7 +1526,7 @@ _return_T_done_decrypt:
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+ ENTRY(aesni_gcm_enc)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1533,8 +1536,8 @@ ENTRY(aesni_gcm_enc)
+ #
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1558,13 +1561,13 @@ ENTRY(aesni_gcm_enc)
+ movdqa %xmm13, HashKey(%rsp)
+ mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
+ and $-16, %r13
+- mov %r13, %r12
++ mov %r13, %r15
+
+ # Encrypt first few blocks
+
+- and $(3<<4), %r12
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_encrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_encrypt
+ je _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1617,14 +1620,14 @@ _zero_cipher_left_encrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1657,9 +1660,9 @@ _less_than_8_bytes_left_encrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+- mov arg8, %r12 # %r12 = addLen (number of bytes)
+- shl $3, %r12
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r15 = addLen (number of bytes)
++ shl $3, %r15
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1698,8 +1701,10 @@ _return_T_done_encrypt:
+ mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
-+ pax_force_retaddr 0, 1
+- pop %r12
++ pop %r15
++ pax_force_retaddr
ret
+ENDPROC(aesni_gcm_enc)
@@ -10115,7 +10336,7 @@ index 3470624..201259d 100644
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
.align 4
@@ -10123,7 +10344,7 @@ index 3470624..201259d 100644
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
.align 4
@@ -10131,7 +10352,7 @@ index 3470624..201259d 100644
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
.align 4
@@ -10139,7 +10360,7 @@ index 3470624..201259d 100644
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10147,7 +10368,7 @@ index 3470624..201259d 100644
#ifndef __x86_64__
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_set_key)
@@ -10157,7 +10378,7 @@ index 3470624..201259d 100644
popl KLEN
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_enc)
@@ -10167,7 +10388,7 @@ index 3470624..201259d 100644
AESENC KEY STATE
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10175,7 +10396,7 @@ index 3470624..201259d 100644
AESENCLAST KEY STATE2
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10183,7 +10404,7 @@ index 3470624..201259d 100644
popl KLEN
popl KEYP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_dec)
@@ -10193,7 +10414,7 @@ index 3470624..201259d 100644
AESDEC KEY STATE
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10201,7 +10422,7 @@ index 3470624..201259d 100644
AESDECLAST KEY STATE2
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10209,7 +10430,7 @@ index 3470624..201259d 100644
popl KEYP
popl LEN
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_ecb_enc)
@@ -10219,7 +10440,7 @@ index 3470624..201259d 100644
popl KEYP
popl LEN
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_ecb_dec)
@@ -10229,7 +10450,7 @@ index 3470624..201259d 100644
popl LEN
popl IVP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_cbc_enc)
@@ -10239,7 +10460,7 @@ index 3470624..201259d 100644
popl LEN
popl IVP
#endif
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_cbc_dec)
@@ -10249,7 +10470,7 @@ index 3470624..201259d 100644
mov $1, TCTR_LOW
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10257,7 +10478,7 @@ index 3470624..201259d 100644
.Linc_low:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
-+ pax_force_retaddr_bts
++ pax_force_retaddr
ret
/*
@@ -10265,12 +10486,12 @@ index 3470624..201259d 100644
.Lctr_enc_ret:
movups IV, (IVP)
.Lctr_enc_just_ret:
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
+ENDPROC(aesni_ctr_enc)
#endif
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-index 391d245..67f35c2 100644
+index 391d245..c73d634 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -20,6 +20,8 @@
@@ -10286,11 +10507,11 @@ index 391d245..67f35c2 100644
jnz __enc_xor;
write_block();
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
__enc_xor:
xor_block();
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.align 8
@@ -10298,7 +10519,7 @@ index 391d245..67f35c2 100644
movq %r11, %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
/**********************************************************************
@@ -10306,7 +10527,7 @@ index 391d245..67f35c2 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
__enc_xor4:
@@ -10314,7 +10535,7 @@ index 391d245..67f35c2 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.align 8
@@ -10322,11 +10543,11 @@ index 391d245..67f35c2 100644
popq %rbx;
popq %rbp;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-index 6214a9b..1f4fc9a 100644
+index 6214a9b..5c0f959 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -1,3 +1,5 @@
@@ -10339,7 +10560,7 @@ index 6214a9b..1f4fc9a 100644
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
# bytesatleast65:
._bytesatleast65:
@@ -10358,7 +10579,7 @@ index 6214a9b..1f4fc9a 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
-index b2c2f57..8470cab 100644
+index b2c2f57..f30325b 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -28,6 +28,8 @@
@@ -10370,16 +10591,35 @@ index b2c2f57..8470cab 100644
#define CTX %rdi // arg1
#define BUF %rsi // arg2
#define CNT %rdx // arg3
-@@ -104,6 +106,7 @@
- pop %r12
+@@ -75,9 +77,9 @@
+ \name:
+ push %rbx
+ push %rbp
+- push %r12
++ push %r14
+
+- mov %rsp, %r12
++ mov %rsp, %r14
+ sub $64, %rsp # allocate workspace
+ and $~15, %rsp # align stack
+
+@@ -99,11 +101,12 @@
+ xor %rax, %rax
+ rep stosq
+
+- mov %r12, %rsp # deallocate workspace
++ mov %r14, %rsp # deallocate workspace
+
+- pop %r12
++ pop %r14
pop %rbp
pop %rbx
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
.size \name, .-\name
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-index 5b012a2..36d5364 100644
+index 5b012a2..9712c31 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -20,6 +20,8 @@
@@ -10395,7 +10635,7 @@ index 5b012a2..36d5364 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
__enc_xor3:
@@ -10403,7 +10643,7 @@ index 5b012a2..36d5364 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
.global twofish_dec_blk_3way
@@ -10411,11 +10651,11 @@ index 5b012a2..36d5364 100644
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret;
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index 7bcf3fc..f53832f 100644
+index 7bcf3fc..560ff4c 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -21,6 +21,7 @@
@@ -10430,7 +10670,7 @@ index 7bcf3fc..f53832f 100644
popq R1
movq $1,%rax
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
twofish_dec_blk:
@@ -10438,7 +10678,7 @@ index 7bcf3fc..f53832f 100644
popq R1
movq $1,%rax
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index fd84387..887aa7e 100644
@@ -10573,7 +10813,7 @@ index 6557769..dfa8ead 100644
if (err)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 95b4eb3..1b112f5 100644
+index 95b4eb3..87e6dc1 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,7 +13,9 @@
@@ -10586,6 +10826,24 @@ index 95b4eb3..1b112f5 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
+@@ -61,12 +63,12 @@
+ */
+ .macro LOAD_ARGS32 offset, _r9=0
+ .if \_r9
+- movl \offset+16(%rsp),%r9d
++ movl \offset+R9(%rsp),%r9d
+ .endif
+- movl \offset+40(%rsp),%ecx
+- movl \offset+48(%rsp),%edx
+- movl \offset+56(%rsp),%esi
+- movl \offset+64(%rsp),%edi
++ movl \offset+RCX(%rsp),%ecx
++ movl \offset+RDX(%rsp),%edx
++ movl \offset+RSI(%rsp),%esi
++ movl \offset+RDI(%rsp),%edi
+ movl %eax,%eax /* zero extension */
+ .endm
+
@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
ENDPROC(native_irq_enable_sysexit)
#endif
@@ -10683,7 +10941,7 @@ index 95b4eb3..1b112f5 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,13 +203,15 @@ sysenter_do_call:
+@@ -162,16 +203,18 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -10700,8 +10958,13 @@ index 95b4eb3..1b112f5 100644
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
/* clear IF, that popfq doesn't enable interrupts early */
- andl $~0x200,EFLAGS-R11(%rsp)
- movl RIP-R11(%rsp),%edx /* User %eip */
+- andl $~0x200,EFLAGS-R11(%rsp)
+- movl RIP-R11(%rsp),%edx /* User %eip */
++ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
++ movl RIP(%rsp),%edx /* User %eip */
+ CFI_REGISTER rip,rdx
+ RESTORE_ARGS 0,24,0,0,0,0
+ xorq %r8,%r8
@@ -194,6 +237,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
@@ -10806,7 +11069,7 @@ index 95b4eb3..1b112f5 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -321,13 +382,15 @@ cstar_do_call:
+@@ -321,14 +382,16 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -10819,12 +11082,14 @@ index 95b4eb3..1b112f5 100644
jnz sysretl_audit
sysretl_from_sys_call:
- andl $~TS_COMPAT,TI_status(%r10)
+- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+ pax_exit_kernel_user
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
++ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
+ movl EFLAGS-ARGOFFSET(%rsp),%r11d
@@ -355,7 +418,7 @@ sysretl_audit:
cstar_tracesys:
@@ -10976,7 +11241,7 @@ index f6f5c53..8e51d70 100644
set_fs(old_fs);
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 091508b..7692c6f 100644
+index 091508b..2cc2c2d 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -4,10 +4,10 @@
@@ -11015,13 +11280,13 @@ index 091508b..7692c6f 100644
+ .if \reload
+ pax_set_fptr_mask
+ .endif
-+ orq %r10,\rip(%rsp)
++ orq %r12,\rip(%rsp)
+ .endm
+ .macro pax_force_fptr ptr
-+ orq %r10,\ptr
++ orq %r12,\ptr
+ .endm
+ .macro pax_set_fptr_mask
-+ movabs $0x8000000000000000,%r10
++ movabs $0x8000000000000000,%r12
+ .endm
+#endif
+#else
@@ -11541,7 +11806,7 @@ index 58cb6d4..a4b806c 100644
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index 24098aa..820ea9d 100644
+index 24098aa..1e37723 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -12,6 +12,14 @@ typedef struct {
@@ -11581,7 +11846,7 @@ index 24098aa..820ea9d 100644
* atomic64_xchg - xchg atomic64 variable
* @v: pointer to type atomic64_t
* @n: value to assign
-@@ -77,12 +100,30 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
}
/**
@@ -11606,13 +11871,6 @@ index 24098aa..820ea9d 100644
* atomic64_read - read atomic64 variable
* @v: pointer to type atomic64_t
*
- * Atomically reads the value of @v and returns it.
- */
--static inline long long atomic64_read(atomic64_t *v)
-+static inline long long __intentional_overflow(-1) atomic64_read(atomic64_t *v)
- {
- long long r;
- asm volatile(ATOMIC64_ALTERNATIVE(read)
@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
}
@@ -11701,15 +11959,12 @@ index 24098aa..820ea9d 100644
* @i: integer value to subtract
* @v: pointer to type atomic64_t
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index 0e1cbfc..adf5aa7 100644
+index 0e1cbfc..5623683 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
-@@ -16,9 +16,21 @@
- * Atomically reads the value of @v.
- * Doesn't imply a read memory barrier.
+@@ -18,7 +18,19 @@
*/
--static inline long atomic64_read(const atomic64_t *v)
-+static inline long __intentional_overflow(-1) atomic64_read(const atomic64_t *v)
+ static inline long atomic64_read(const atomic64_t *v)
{
- return (*(volatile long *)&(v)->counter);
+ return (*(volatile const long *)&(v)->counter);
@@ -12015,7 +12270,7 @@ index 0e1cbfc..adf5aa7 100644
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
-index 1775d6e..c312a36 100644
+index 1775d6e..f84af0c 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -38,7 +38,7 @@
@@ -12045,6 +12300,24 @@ index 1775d6e..c312a36 100644
{
asm("bsf %1,%0"
: "=r" (word)
+@@ -372,7 +372,7 @@ static inline unsigned long ffz(unsigned long word)
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __fls(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+ asm("bsr %1,%0"
+ : "=r" (word)
+@@ -419,7 +419,7 @@ static inline int ffs(int x)
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+-static inline int fls(int x)
++static inline int __intentional_overflow(-1) fls(int x)
+ {
+ int r;
+ #ifdef CONFIG_X86_CMOV
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 5e1a2ee..c9f9533 100644
--- a/arch/x86/include/asm/boot.h
@@ -12099,6 +12372,178 @@ index 4e12668..501d239 100644
else if (pg_flags == _PGMT_WC)
return _PAGE_CACHE_WC;
else if (pg_flags == _PGMT_UC_MINUS)
+diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
+index a9e3a74..44966f3 100644
+--- a/arch/x86/include/asm/calling.h
++++ b/arch/x86/include/asm/calling.h
+@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
+ #define RSP (152)
+ #define SS (160)
+
+-#define ARGOFFSET R11
+-#define SWFRAME ORIG_RAX
++#define ARGOFFSET R15
+
+ .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
+- subq $9*8+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
+- movq_cfi rdi, 8*8
+- movq_cfi rsi, 7*8
+- movq_cfi rdx, 6*8
++ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
+
+ .if \save_rcx
+- movq_cfi rcx, 5*8
++ movq_cfi rcx, RCX
+ .endif
+
+- movq_cfi rax, 4*8
++ movq_cfi rax, RAX
+
+ .if \save_r891011
+- movq_cfi r8, 3*8
+- movq_cfi r9, 2*8
+- movq_cfi r10, 1*8
+- movq_cfi r11, 0*8
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
+ .endif
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ .endm
+
+-#define ARG_SKIP (9*8)
++#define ARG_SKIP ORIG_RAX
+
+ .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
+ rstor_r8910=1, rstor_rdx=1
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
+ .if \rstor_r11
+- movq_cfi_restore 0*8, r11
++ movq_cfi_restore R11, r11
+ .endif
+
+ .if \rstor_r8910
+- movq_cfi_restore 1*8, r10
+- movq_cfi_restore 2*8, r9
+- movq_cfi_restore 3*8, r8
++ movq_cfi_restore R10, r10
++ movq_cfi_restore R9, r9
++ movq_cfi_restore R8, r8
+ .endif
+
+ .if \rstor_rax
+- movq_cfi_restore 4*8, rax
++ movq_cfi_restore RAX, rax
+ .endif
+
+ .if \rstor_rcx
+- movq_cfi_restore 5*8, rcx
++ movq_cfi_restore RCX, rcx
+ .endif
+
+ .if \rstor_rdx
+- movq_cfi_restore 6*8, rdx
++ movq_cfi_restore RDX, rdx
+ .endif
+
+- movq_cfi_restore 7*8, rsi
+- movq_cfi_restore 8*8, rdi
++ movq_cfi_restore RSI, rsi
++ movq_cfi_restore RDI, rdi
+
+- .if ARG_SKIP+\addskip > 0
+- addq $ARG_SKIP+\addskip, %rsp
+- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
++ .if ORIG_RAX+\addskip > 0
++ addq $ORIG_RAX+\addskip, %rsp
++ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
+ .endif
+ .endm
+
+- .macro LOAD_ARGS offset, skiprax=0
+- movq \offset(%rsp), %r11
+- movq \offset+8(%rsp), %r10
+- movq \offset+16(%rsp), %r9
+- movq \offset+24(%rsp), %r8
+- movq \offset+40(%rsp), %rcx
+- movq \offset+48(%rsp), %rdx
+- movq \offset+56(%rsp), %rsi
+- movq \offset+64(%rsp), %rdi
++ .macro LOAD_ARGS skiprax=0
++ movq R11(%rsp), %r11
++ movq R10(%rsp), %r10
++ movq R9(%rsp), %r9
++ movq R8(%rsp), %r8
++ movq RCX(%rsp), %rcx
++ movq RDX(%rsp), %rdx
++ movq RSI(%rsp), %rsi
++ movq RDI(%rsp), %rdi
+ .if \skiprax
+ .else
+- movq \offset+72(%rsp), %rax
++ movq RAX(%rsp), %rax
+ .endif
+ .endm
+
+-#define REST_SKIP (6*8)
+-
+ .macro SAVE_REST
+- subq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+- movq_cfi rbx, 5*8
+- movq_cfi rbp, 4*8
+- movq_cfi r12, 3*8
+- movq_cfi r13, 2*8
+- movq_cfi r14, 1*8
+- movq_cfi r15, 0*8
++ movq_cfi rbx, RBX
++ movq_cfi rbp, RBP
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
++ movq_cfi r13, R13
++ movq_cfi r14, R14
++ movq_cfi r15, R15
+ .endm
+
+ .macro RESTORE_REST
+- movq_cfi_restore 0*8, r15
+- movq_cfi_restore 1*8, r14
+- movq_cfi_restore 2*8, r13
+- movq_cfi_restore 3*8, r12
+- movq_cfi_restore 4*8, rbp
+- movq_cfi_restore 5*8, rbx
+- addq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
++ movq_cfi_restore R15, r15
++ movq_cfi_restore R14, r14
++ movq_cfi_restore R13, r13
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12, r12
++#endif
++
++ movq_cfi_restore RBP, rbp
++ movq_cfi_restore RBX, rbx
+ .endm
+
+ .macro SAVE_ALL
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474..b02b0f9 100644
--- a/arch/x86/include/asm/checksum_32.h
@@ -14132,6 +14577,18 @@ index f7c89e2..9962bae 100644
};
static inline void get_aperfmperf(struct aperfmperf *am)
+diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h
+index 7b0a55a..ad115bf 100644
+--- a/arch/x86/include/asm/ptrace-abi.h
++++ b/arch/x86/include/asm/ptrace-abi.h
+@@ -49,7 +49,6 @@
+ #define EFLAGS 144
+ #define RSP 152
+ #define SS 160
+-#define ARGOFFSET R11
+ #endif /* __ASSEMBLY__ */
+
+ /* top of stack page */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 3b96fd4..8790004 100644
--- a/arch/x86/include/asm/ptrace.h
@@ -18177,7 +18634,7 @@ index d2d488b8..a4f589f 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 6274f5f..9337430 100644
+index 6274f5f..65df16d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,8 @@
@@ -18550,18 +19007,64 @@ index 6274f5f..9337430 100644
.endm
/*
-@@ -319,7 +606,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -301,25 +588,26 @@ ENDPROC(native_usergs_sysret64)
+ /* save partial stack frame */
+ .macro SAVE_ARGS_IRQ
+ cld
+- /* start from rbp in pt_regs and jump over */
+- movq_cfi rdi, RDI-RBP
+- movq_cfi rsi, RSI-RBP
+- movq_cfi rdx, RDX-RBP
+- movq_cfi rcx, RCX-RBP
+- movq_cfi rax, RAX-RBP
+- movq_cfi r8, R8-RBP
+- movq_cfi r9, R9-RBP
+- movq_cfi r10, R10-RBP
+- movq_cfi r11, R11-RBP
++ /* start from r15 in pt_regs and jump over */
++ movq_cfi rdi, RDI
++ movq_cfi rsi, RSI
++ movq_cfi rdx, RDX
++ movq_cfi rcx, RCX
++ movq_cfi rax, RAX
++ movq_cfi r8, R8
++ movq_cfi r9, R9
++ movq_cfi r10, R10
++ movq_cfi r11, R11
++ movq_cfi r12, R12
+
+ /* Save rbp so that we can unwind from get_irq_regs() */
+- movq_cfi rbp, 0
++ movq_cfi rbp, RBP
+
+ /* Save previous stack value */
movq %rsp, %rsi
- leaq -RBP(%rsp),%rdi /* arg1 for handler */
+- leaq -RBP(%rsp),%rdi /* arg1 for handler */
- testl $3, CS(%rdi)
-+ testb $3, CS(%rdi)
++ movq %rsp,%rdi /* arg1 for handler */
++ testb $3, CS(%rsi)
je 1f
SWAPGS
/*
-@@ -355,9 +642,10 @@ ENTRY(save_rest)
+@@ -345,19 +633,22 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ ENTRY(save_rest)
+- PARTIAL_FRAME 1 REST_SKIP+8
+- movq 5*8+16(%rsp), %r11 /* save return address */
++ PARTIAL_FRAME 1 8
+ movq_cfi rbx, RBX+16
+ movq_cfi rbp, RBP+16
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ movq_cfi r12, R12+16
++#endif
++
+ movq_cfi r13, R13+16
+ movq_cfi r14, R14+16
movq_cfi r15, R15+16
- movq %r11, 8(%rsp) /* return address */
+- movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
+ pax_force_retaddr
ret
@@ -18571,7 +19074,7 @@ index 6274f5f..9337430 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -386,9 +674,10 @@ ENTRY(save_paranoid)
+@@ -386,9 +677,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -18584,7 +19087,7 @@ index 6274f5f..9337430 100644
.popsection
/*
-@@ -410,7 +699,7 @@ ENTRY(ret_from_fork)
+@@ -410,7 +702,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -18593,7 +19096,7 @@ index 6274f5f..9337430 100644
je int_ret_from_sys_call
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -420,7 +709,7 @@ ENTRY(ret_from_fork)
+@@ -420,7 +712,7 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath
CFI_ENDPROC
@@ -18602,7 +19105,7 @@ index 6274f5f..9337430 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -456,7 +745,7 @@ END(ret_from_fork)
+@@ -456,7 +748,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -18611,7 +19114,7 @@ index 6274f5f..9337430 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -469,12 +758,18 @@ ENTRY(system_call_after_swapgs)
+@@ -469,12 +761,18 @@ ENTRY(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
@@ -18631,16 +19134,7 @@ index 6274f5f..9337430 100644
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
-@@ -484,7 +779,7 @@ ENTRY(system_call_after_swapgs)
- system_call_fastpath:
- cmpq $__NR_syscall_max,%rax
- ja badsys
-- movq %r10,%rcx
-+ movq R10-ARGOFFSET(%rsp),%rcx
- call *sys_call_table(,%rax,8) # XXX: rip relative
- movq %rax,RAX-ARGOFFSET(%rsp)
- /*
-@@ -503,6 +798,8 @@ sysret_check:
+@@ -503,6 +801,8 @@ sysret_check:
andl %edi,%edx
jnz sysret_careful
CFI_REMEMBER_STATE
@@ -18649,15 +19143,7 @@ index 6274f5f..9337430 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -554,14 +851,18 @@ badsys:
- * jump back to the normal fast path.
- */
- auditsys:
-- movq %r10,%r9 /* 6th arg: 4th syscall arg */
-+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
- movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
- movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
- movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
+@@ -561,6 +861,9 @@ auditsys:
movq %rax,%rsi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
call audit_syscall_entry
@@ -18665,11 +19151,9 @@ index 6274f5f..9337430 100644
+ pax_erase_kstack
+
LOAD_ARGS 0 /* reload call-clobbered registers */
-+ pax_set_fptr_mask
jmp system_call_fastpath
- /*
-@@ -591,16 +892,20 @@ tracesys:
+@@ -591,12 +894,15 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -18681,17 +19165,12 @@ index 6274f5f..9337430 100644
* We don't reload %rax because syscall_trace_enter() returned
* the value it wants us to use in the table lookup.
*/
- LOAD_ARGS ARGOFFSET, 1
-+ pax_set_fptr_mask
+- LOAD_ARGS ARGOFFSET, 1
++ LOAD_ARGS 1
RESTORE_REST
cmpq $__NR_syscall_max,%rax
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
-- movq %r10,%rcx /* fixup for C */
-+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
- call *sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
- /* Use IRET because user could have changed frame */
-@@ -612,7 +917,7 @@ tracesys:
+@@ -612,7 +918,7 @@ tracesys:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -18700,7 +19179,7 @@ index 6274f5f..9337430 100644
je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
-@@ -623,7 +928,9 @@ GLOBAL(int_with_check)
+@@ -623,7 +929,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
@@ -18711,7 +19190,7 @@ index 6274f5f..9337430 100644
/* Either reschedule or signal or syscall exit tracking needed. */
/* First do a reschedule test. */
-@@ -669,7 +976,7 @@ int_restore_rest:
+@@ -669,7 +977,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -18720,7 +19199,15 @@ index 6274f5f..9337430 100644
/*
* Certain special system calls that need to save a complete full stack frame.
-@@ -685,7 +992,7 @@ ENTRY(\label)
+@@ -677,15 +985,13 @@ END(system_call)
+ .macro PTREGSCALL label,func,arg
+ ENTRY(\label)
+ PARTIAL_FRAME 1 8 /* offset 8: return address */
+- subq $REST_SKIP, %rsp
+- CFI_ADJUST_CFA_OFFSET REST_SKIP
+ call save_rest
+ DEFAULT_FRAME 0 8 /* offset 8: return address */
+ leaq 8(%rsp), \arg /* pt_regs pointer */
call \func
jmp ptregscall_common
CFI_ENDPROC
@@ -18729,19 +19216,27 @@ index 6274f5f..9337430 100644
.endm
PTREGSCALL stub_clone, sys_clone, %r8
-@@ -703,9 +1010,10 @@ ENTRY(ptregscall_common)
+@@ -700,12 +1006,17 @@ ENTRY(ptregscall_common)
+ movq_cfi_restore R15+8, r15
+ movq_cfi_restore R14+8, r14
+ movq_cfi_restore R13+8, r13
++
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
movq_cfi_restore R12+8, r12
++#endif
++
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
+- ret $REST_SKIP /* pop extended registers */
+ pax_force_retaddr
- ret $REST_SKIP /* pop extended registers */
++ ret
CFI_ENDPROC
-END(ptregscall_common)
+ENDPROC(ptregscall_common)
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -720,7 +1028,7 @@ ENTRY(stub_execve)
+@@ -720,7 +1031,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -18750,7 +19245,7 @@ index 6274f5f..9337430 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -738,7 +1046,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -738,7 +1049,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -18759,7 +19254,7 @@ index 6274f5f..9337430 100644
/*
* Build the entry stubs and pointer table with some assembler magic.
-@@ -773,7 +1081,7 @@ vector=vector+1
+@@ -773,7 +1084,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -18768,9 +19263,14 @@ index 6274f5f..9337430 100644
.previous
END(interrupt)
-@@ -793,6 +1101,16 @@ END(interrupt)
- subq $ORIG_RAX-RBP, %rsp
- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
+@@ -790,9 +1101,19 @@ END(interrupt)
+ /* 0(%rsp): ~(interrupt number) */
+ .macro interrupt func
+ /* reserve pt_regs for scratch regs and rbp */
+- subq $ORIG_RAX-RBP, %rsp
+- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
++ subq $ORIG_RAX, %rsp
++ CFI_ADJUST_CFA_OFFSET ORIG_RAX
SAVE_ARGS_IRQ
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ testb $3, CS(%rdi)
@@ -18785,7 +19285,15 @@ index 6274f5f..9337430 100644
call \func
.endm
-@@ -824,7 +1142,7 @@ ret_from_intr:
+@@ -818,13 +1139,13 @@ ret_from_intr:
+ /* Restore saved previous stack */
+ popq %rsi
+ CFI_DEF_CFA_REGISTER rsi
+- leaq ARGOFFSET-RBP(%rsi), %rsp
++ movq %rsi, %rsp
+ CFI_DEF_CFA_REGISTER rsp
+- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
++ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -18794,7 +19302,7 @@ index 6274f5f..9337430 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -846,12 +1164,16 @@ retint_swapgs: /* return to user-space */
+@@ -846,12 +1167,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -18811,7 +19319,7 @@ index 6274f5f..9337430 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -940,7 +1262,7 @@ ENTRY(retint_kernel)
+@@ -940,7 +1265,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -18820,7 +19328,7 @@ index 6274f5f..9337430 100644
/*
* End of kprobes section
*/
-@@ -956,7 +1278,7 @@ ENTRY(\sym)
+@@ -956,7 +1281,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -18829,7 +19337,7 @@ index 6274f5f..9337430 100644
.endm
#ifdef CONFIG_SMP
-@@ -1021,12 +1343,22 @@ ENTRY(\sym)
+@@ -1021,12 +1346,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -18853,7 +19361,7 @@ index 6274f5f..9337430 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1038,15 +1370,25 @@ ENTRY(\sym)
+@@ -1038,15 +1373,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -18877,11 +19385,11 @@ index 6274f5f..9337430 100644
.endm
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
-+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
++#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1056,14 +1398,30 @@ ENTRY(\sym)
+@@ -1056,14 +1401,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -18898,10 +19406,10 @@ index 6274f5f..9337430 100644
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
+#ifdef CONFIG_SMP
-+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
-+ lea init_tss(%r12), %r12
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++ lea init_tss(%r13), %r13
+#else
-+ lea init_tss(%rip), %r12
++ lea init_tss(%rip), %r13
+#endif
subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
call \do_sym
@@ -18913,7 +19421,7 @@ index 6274f5f..9337430 100644
.endm
.macro errorentry sym do_sym
-@@ -1074,13 +1432,23 @@ ENTRY(\sym)
+@@ -1074,13 +1435,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -18938,7 +19446,7 @@ index 6274f5f..9337430 100644
.endm
/* error code is on the stack already */
-@@ -1093,13 +1461,23 @@ ENTRY(\sym)
+@@ -1093,13 +1464,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -18963,7 +19471,7 @@ index 6274f5f..9337430 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1129,9 +1507,10 @@ gs_change:
+@@ -1129,9 +1510,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -18975,7 +19483,7 @@ index 6274f5f..9337430 100644
.section __ex_table,"a"
.align 8
-@@ -1153,13 +1532,14 @@ ENTRY(kernel_thread_helper)
+@@ -1153,13 +1535,14 @@ ENTRY(kernel_thread_helper)
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
*/
@@ -18991,7 +19499,7 @@ index 6274f5f..9337430 100644
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1186,11 +1566,11 @@ ENTRY(kernel_execve)
+@@ -1186,11 +1569,11 @@ ENTRY(kernel_execve)
RESTORE_REST
testq %rax,%rax
je int_ret_from_sys_call
@@ -19005,7 +19513,7 @@ index 6274f5f..9337430 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
-@@ -1208,9 +1588,10 @@ ENTRY(call_softirq)
+@@ -1208,9 +1591,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -19017,7 +19525,7 @@ index 6274f5f..9337430 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1248,7 +1629,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1248,7 +1632,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -19026,7 +19534,7 @@ index 6274f5f..9337430 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1307,7 +1688,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1307,7 +1691,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -19035,7 +19543,7 @@ index 6274f5f..9337430 100644
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1356,16 +1737,31 @@ ENTRY(paranoid_exit)
+@@ -1356,16 +1740,31 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -19068,7 +19576,7 @@ index 6274f5f..9337430 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1394,7 +1790,7 @@ paranoid_schedule:
+@@ -1394,7 +1793,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -19077,7 +19585,7 @@ index 6274f5f..9337430 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1421,12 +1817,13 @@ ENTRY(error_entry)
+@@ -1421,12 +1820,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -19092,7 +19600,7 @@ index 6274f5f..9337430 100644
ret
/*
-@@ -1453,7 +1850,7 @@ bstep_iret:
+@@ -1453,7 +1853,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -19101,7 +19609,7 @@ index 6274f5f..9337430 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1473,7 +1870,7 @@ ENTRY(error_exit)
+@@ -1473,7 +1873,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -19110,7 +19618,7 @@ index 6274f5f..9337430 100644
/* runs on exception stack */
-@@ -1485,6 +1882,17 @@ ENTRY(nmi)
+@@ -1485,6 +1885,17 @@ ENTRY(nmi)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
@@ -19128,7 +19636,7 @@ index 6274f5f..9337430 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1495,12 +1903,28 @@ ENTRY(nmi)
+@@ -1495,12 +1906,28 @@ ENTRY(nmi)
DISABLE_INTERRUPTS(CLBR_NONE)
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
@@ -19158,7 +19666,7 @@ index 6274f5f..9337430 100644
jmp irq_return
nmi_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1529,14 +1953,14 @@ nmi_schedule:
+@@ -1529,14 +1956,14 @@ nmi_schedule:
jmp paranoid_exit
CFI_ENDPROC
#endif
@@ -19695,7 +20203,7 @@ index ce0be7c..1252d68 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index e11e394..599d09a 100644
+index e11e394..0a8c254 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,8 @@
@@ -19720,7 +20228,7 @@ index e11e394..599d09a 100644
.text
__HEAD
-@@ -85,35 +93,22 @@ startup_64:
+@@ -85,35 +93,23 @@ startup_64:
*/
addq %rbp, init_level4_pgt + 0(%rip)
addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
@@ -19737,11 +20245,12 @@ index e11e394..599d09a 100644
- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
-
-- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
++
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
+-
- /* Add an Identity mapping if I am above 1G */
- leaq _text(%rip), %rdi
- andq $PMD_PAGE_MASK, %rdi
@@ -19766,7 +20275,7 @@ index e11e394..599d09a 100644
/*
* Fixup the kernel text+data virtual addresses. Note that
-@@ -160,8 +155,8 @@ ENTRY(secondary_startup_64)
+@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
* after the boot processor executes this code.
*/
@@ -19777,7 +20286,7 @@ index e11e394..599d09a 100644
movq %rax, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -183,9 +178,17 @@ ENTRY(secondary_startup_64)
+@@ -183,9 +179,18 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -19785,18 +20294,19 @@ index e11e394..599d09a 100644
+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
jnc 1f
btsl $_EFER_NX, %eax
-+ leaq init_level4_pgt(%rip), %rdi
+#ifndef CONFIG_EFI
-+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
+#endif
-+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
-+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
-+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -247,6 +250,7 @@ ENTRY(secondary_startup_64)
+@@ -247,6 +252,7 @@ ENTRY(secondary_startup_64)
* jump. In addition we need to ensure %cs is set so we make this
* a far return.
*/
@@ -19804,7 +20314,7 @@ index e11e394..599d09a 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -269,7 +273,7 @@ ENTRY(secondary_startup_64)
+@@ -269,7 +275,7 @@ ENTRY(secondary_startup_64)
bad_address:
jmp bad_address
@@ -19813,7 +20323,7 @@ index e11e394..599d09a 100644
#ifdef CONFIG_EARLY_PRINTK
.globl early_idt_handlers
early_idt_handlers:
-@@ -314,18 +318,23 @@ ENTRY(early_idt_handler)
+@@ -314,18 +320,23 @@ ENTRY(early_idt_handler)
#endif /* EARLY_PRINTK */
1: hlt
jmp 1b
@@ -19838,7 +20348,7 @@ index e11e394..599d09a 100644
#define NEXT_PAGE(name) \
.balign PAGE_SIZE; \
ENTRY(name)
-@@ -338,7 +347,6 @@ ENTRY(name)
+@@ -338,7 +349,6 @@ ENTRY(name)
i = i + 1 ; \
.endr
@@ -19846,7 +20356,7 @@ index e11e394..599d09a 100644
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
-@@ -349,13 +357,41 @@ NEXT_PAGE(init_level4_pgt)
+@@ -349,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
@@ -19888,7 +20398,7 @@ index e11e394..599d09a 100644
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
-@@ -363,20 +399,23 @@ NEXT_PAGE(level3_kernel_pgt)
+@@ -363,20 +401,27 @@ NEXT_PAGE(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -19896,21 +20406,22 @@ index e11e394..599d09a 100644
+ .fill 512,8,0
+
NEXT_PAGE(level2_fixmap_pgt)
-- .fill 506,8,0
-- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .fill 506,8,0
+ .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
-+ .fill 507,8,0
+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
+ .fill 4,8,0
--NEXT_PAGE(level1_fixmap_pgt)
-+NEXT_PAGE(level1_vsyscall_pgt)
+ NEXT_PAGE(level1_fixmap_pgt)
.fill 512,8,0
-NEXT_PAGE(level2_ident_pgt)
- /* Since I easily can, map the first 1G.
++NEXT_PAGE(level1_vsyscall_pgt)
++ .fill 512,8,0
++
+ /* Since I easily can, map the first 2G.
* Don't set NX because code runs from these pages.
*/
@@ -19920,7 +20431,7 @@ index e11e394..599d09a 100644
NEXT_PAGE(level2_kernel_pgt)
/*
-@@ -389,35 +428,56 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -389,35 +434,56 @@ NEXT_PAGE(level2_kernel_pgt)
* If you want to increase this then increase MODULES_VADDR
* too.)
*/
@@ -24556,7 +25067,7 @@ index 1e572c5..2a162cd 100644
CFI_ENDPROC
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
-index 01c805b..dccb07f 100644
+index 01c805b..16da7cf 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -9,6 +9,7 @@ copy_page_c:
@@ -24567,43 +25078,59 @@ index 01c805b..dccb07f 100644
ret
CFI_ENDPROC
ENDPROC(copy_page_c)
-@@ -39,7 +40,7 @@ ENTRY(copy_page)
- movq 16 (%rsi), %rdx
- movq 24 (%rsi), %r8
+@@ -24,7 +25,7 @@ ENTRY(copy_page)
+ CFI_ADJUST_CFA_OFFSET 3*8
+ movq %rbx,(%rsp)
+ CFI_REL_OFFSET rbx, 0
+- movq %r12,1*8(%rsp)
++ movq %r14,1*8(%rsp)
+ CFI_REL_OFFSET r12, 1*8
+ movq %r13,2*8(%rsp)
+ CFI_REL_OFFSET r13, 2*8
+@@ -41,7 +42,7 @@ ENTRY(copy_page)
movq 32 (%rsi), %r9
-- movq 40 (%rsi), %r10
-+ movq 40 (%rsi), %r13
+ movq 40 (%rsi), %r10
movq 48 (%rsi), %r11
- movq 56 (%rsi), %r12
+- movq 56 (%rsi), %r12
++ movq 56 (%rsi), %r14
+
+ prefetcht0 5*64(%rsi)
-@@ -50,7 +51,7 @@ ENTRY(copy_page)
- movq %rdx, 16 (%rdi)
- movq %r8, 24 (%rdi)
+@@ -52,7 +53,7 @@ ENTRY(copy_page)
movq %r9, 32 (%rdi)
-- movq %r10, 40 (%rdi)
-+ movq %r13, 40 (%rdi)
+ movq %r10, 40 (%rdi)
movq %r11, 48 (%rdi)
- movq %r12, 56 (%rdi)
+- movq %r12, 56 (%rdi)
++ movq %r14, 56 (%rdi)
-@@ -69,7 +70,7 @@ ENTRY(copy_page)
- movq 16 (%rsi), %rdx
- movq 24 (%rsi), %r8
+ leaq 64 (%rsi), %rsi
+ leaq 64 (%rdi), %rdi
+@@ -71,7 +72,7 @@ ENTRY(copy_page)
movq 32 (%rsi), %r9
-- movq 40 (%rsi), %r10
-+ movq 40 (%rsi), %r13
+ movq 40 (%rsi), %r10
movq 48 (%rsi), %r11
- movq 56 (%rsi), %r12
+- movq 56 (%rsi), %r12
++ movq 56 (%rsi), %r14
-@@ -78,7 +79,7 @@ ENTRY(copy_page)
- movq %rdx, 16 (%rdi)
- movq %r8, 24 (%rdi)
+ movq %rax, (%rdi)
+ movq %rbx, 8 (%rdi)
+@@ -80,7 +81,7 @@ ENTRY(copy_page)
movq %r9, 32 (%rdi)
-- movq %r10, 40 (%rdi)
-+ movq %r13, 40 (%rdi)
+ movq %r10, 40 (%rdi)
movq %r11, 48 (%rdi)
- movq %r12, 56 (%rdi)
+- movq %r12, 56 (%rdi)
++ movq %r14, 56 (%rdi)
-@@ -95,6 +96,7 @@ ENTRY(copy_page)
+ leaq 64(%rdi),%rdi
+ leaq 64(%rsi),%rsi
+@@ -89,12 +90,13 @@ ENTRY(copy_page)
+
+ movq (%rsp),%rbx
+ CFI_RESTORE rbx
+- movq 1*8(%rsp),%r12
++ movq 1*8(%rsp),%r14
+ CFI_RESTORE r12
+ movq 2*8(%rsp),%r13
CFI_RESTORE r13
addq $3*8,%rsp
CFI_ADJUST_CFA_OFFSET -3*8
@@ -24621,7 +25148,7 @@ index 01c805b..dccb07f 100644
.byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
2:
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
-index 0248402..821c786 100644
+index 0248402..416b737 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,6 +16,7 @@
@@ -24692,30 +25219,6 @@ index 0248402..821c786 100644
ret
CFI_ENDPROC
ENDPROC(bad_from_user)
-@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
- jz 17f
- 1: movq (%rsi),%r8
- 2: movq 1*8(%rsi),%r9
--3: movq 2*8(%rsi),%r10
-+3: movq 2*8(%rsi),%rax
- 4: movq 3*8(%rsi),%r11
- 5: movq %r8,(%rdi)
- 6: movq %r9,1*8(%rdi)
--7: movq %r10,2*8(%rdi)
-+7: movq %rax,2*8(%rdi)
- 8: movq %r11,3*8(%rdi)
- 9: movq 4*8(%rsi),%r8
- 10: movq 5*8(%rsi),%r9
--11: movq 6*8(%rsi),%r10
-+11: movq 6*8(%rsi),%rax
- 12: movq 7*8(%rsi),%r11
- 13: movq %r8,4*8(%rdi)
- 14: movq %r9,5*8(%rdi)
--15: movq %r10,6*8(%rdi)
-+15: movq %rax,6*8(%rdi)
- 16: movq %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
decl %ecx
jnz 21b
@@ -24741,7 +25244,7 @@ index 0248402..821c786 100644
.section .fixup,"ax"
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
-index cb0c112..61e0020 100644
+index cb0c112..cb2d3c5 100644
--- a/arch/x86/lib/copy_user_nocache_64.S
+++ b/arch/x86/lib/copy_user_nocache_64.S
@@ -8,12 +8,14 @@
@@ -24775,30 +25278,6 @@ index cb0c112..61e0020 100644
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
-@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
- jz 17f
- 1: movq (%rsi),%r8
- 2: movq 1*8(%rsi),%r9
--3: movq 2*8(%rsi),%r10
-+3: movq 2*8(%rsi),%rax
- 4: movq 3*8(%rsi),%r11
- 5: movnti %r8,(%rdi)
- 6: movnti %r9,1*8(%rdi)
--7: movnti %r10,2*8(%rdi)
-+7: movnti %rax,2*8(%rdi)
- 8: movnti %r11,3*8(%rdi)
- 9: movq 4*8(%rsi),%r8
- 10: movq 5*8(%rsi),%r9
--11: movq 6*8(%rsi),%r10
-+11: movq 6*8(%rsi),%rax
- 12: movq 7*8(%rsi),%r11
- 13: movnti %r8,4*8(%rdi)
- 14: movnti %r9,5*8(%rdi)
--15: movnti %r10,6*8(%rdi)
-+15: movnti %rax,6*8(%rdi)
- 16: movnti %r11,7*8(%rdi)
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
jnz 21b
23: xorl %eax,%eax
@@ -24808,7 +25287,7 @@ index cb0c112..61e0020 100644
.section .fixup,"ax"
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
-index fb903b7..c92b7f7 100644
+index fb903b7..83cc6fb 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -8,6 +8,7 @@
@@ -24819,11 +25298,62 @@ index fb903b7..c92b7f7 100644
/*
* Checksum copy with exception handling.
+@@ -64,8 +65,8 @@ ENTRY(csum_partial_copy_generic)
+ CFI_ADJUST_CFA_OFFSET 7*8
+ movq %rbx, 2*8(%rsp)
+ CFI_REL_OFFSET rbx, 2*8
+- movq %r12, 3*8(%rsp)
+- CFI_REL_OFFSET r12, 3*8
++ movq %r15, 3*8(%rsp)
++ CFI_REL_OFFSET r15, 3*8
+ movq %r14, 4*8(%rsp)
+ CFI_REL_OFFSET r14, 4*8
+ movq %r13, 5*8(%rsp)
+@@ -80,16 +81,16 @@ ENTRY(csum_partial_copy_generic)
+ movl %edx, %ecx
+
+ xorl %r9d, %r9d
+- movq %rcx, %r12
++ movq %rcx, %r15
+
+- shrq $6, %r12
++ shrq $6, %r15
+ jz .Lhandle_tail /* < 64 */
+
+ clc
+
+ /* main loop. clear in 64 byte blocks */
+ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
+- /* r11: temp3, rdx: temp4, r12 loopcnt */
++ /* r11: temp3, rdx: temp4, r15 loopcnt */
+ /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ .p2align 4
+ .Lloop:
+@@ -123,7 +124,7 @@ ENTRY(csum_partial_copy_generic)
+ adcq %r14, %rax
+ adcq %r13, %rax
+
+- decl %r12d
++ decl %r15d
+
+ dest
+ movq %rbx, (%rsi)
+@@ -218,8 +219,8 @@ ENTRY(csum_partial_copy_generic)
+ .Lende:
+ movq 2*8(%rsp), %rbx
+ CFI_RESTORE rbx
+- movq 3*8(%rsp), %r12
+- CFI_RESTORE r12
++ movq 3*8(%rsp), %r15
++ CFI_RESTORE r15
+ movq 4*8(%rsp), %r14
+ CFI_RESTORE r14
+ movq 5*8(%rsp), %r13
@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
CFI_RESTORE rbp
addq $7*8, %rsp
CFI_ADJUST_CFA_OFFSET -7*8
-+ pax_force_retaddr 0, 1
++ pax_force_retaddr
ret
CFI_RESTORE_STATE
@@ -25024,7 +25554,7 @@ index 05a95e7..326f2fa 100644
CFI_ENDPROC
ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
-index efbf2a0..8893637 100644
+index efbf2a0..8090894 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -34,6 +34,7 @@
@@ -25043,48 +25573,9 @@ index efbf2a0..8893637 100644
ret
.Lmemcpy_e_e:
.previous
-@@ -81,13 +83,13 @@ ENTRY(memcpy)
- */
- movq 0*8(%rsi), %r8
- movq 1*8(%rsi), %r9
-- movq 2*8(%rsi), %r10
-+ movq 2*8(%rsi), %rcx
- movq 3*8(%rsi), %r11
- leaq 4*8(%rsi), %rsi
-
- movq %r8, 0*8(%rdi)
- movq %r9, 1*8(%rdi)
-- movq %r10, 2*8(%rdi)
-+ movq %rcx, 2*8(%rdi)
- movq %r11, 3*8(%rdi)
- leaq 4*8(%rdi), %rdi
- jae .Lcopy_forward_loop
-@@ -110,12 +112,12 @@ ENTRY(memcpy)
- subq $0x20, %rdx
- movq -1*8(%rsi), %r8
- movq -2*8(%rsi), %r9
-- movq -3*8(%rsi), %r10
-+ movq -3*8(%rsi), %rcx
- movq -4*8(%rsi), %r11
- leaq -4*8(%rsi), %rsi
- movq %r8, -1*8(%rdi)
- movq %r9, -2*8(%rdi)
-- movq %r10, -3*8(%rdi)
-+ movq %rcx, -3*8(%rdi)
- movq %r11, -4*8(%rdi)
- leaq -4*8(%rdi), %rdi
- jae .Lcopy_backward_loop
-@@ -135,12 +137,13 @@ ENTRY(memcpy)
- */
- movq 0*8(%rsi), %r8
- movq 1*8(%rsi), %r9
-- movq -2*8(%rsi, %rdx), %r10
-+ movq -2*8(%rsi, %rdx), %rcx
- movq -1*8(%rsi, %rdx), %r11
- movq %r8, 0*8(%rdi)
+@@ -141,6 +143,7 @@ ENTRY(memcpy)
movq %r9, 1*8(%rdi)
-- movq %r10, -2*8(%rdi, %rdx)
-+ movq %rcx, -2*8(%rdi, %rdx)
+ movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
+ pax_force_retaddr
retq
@@ -25115,121 +25606,9 @@ index efbf2a0..8893637 100644
CFI_ENDPROC
ENDPROC(memcpy)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
-index ee16461..c39c199 100644
+index ee16461..c4f4918 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
-@@ -61,13 +61,13 @@ ENTRY(memmove)
- 5:
- sub $0x20, %rdx
- movq 0*8(%rsi), %r11
-- movq 1*8(%rsi), %r10
-+ movq 1*8(%rsi), %rcx
- movq 2*8(%rsi), %r9
- movq 3*8(%rsi), %r8
- leaq 4*8(%rsi), %rsi
-
- movq %r11, 0*8(%rdi)
-- movq %r10, 1*8(%rdi)
-+ movq %rcx, 1*8(%rdi)
- movq %r9, 2*8(%rdi)
- movq %r8, 3*8(%rdi)
- leaq 4*8(%rdi), %rdi
-@@ -81,10 +81,10 @@ ENTRY(memmove)
- 4:
- movq %rdx, %rcx
- movq -8(%rsi, %rdx), %r11
-- lea -8(%rdi, %rdx), %r10
-+ lea -8(%rdi, %rdx), %r9
- shrq $3, %rcx
- rep movsq
-- movq %r11, (%r10)
-+ movq %r11, (%r9)
- jmp 13f
- .Lmemmove_end_forward:
-
-@@ -95,14 +95,14 @@ ENTRY(memmove)
- 7:
- movq %rdx, %rcx
- movq (%rsi), %r11
-- movq %rdi, %r10
-+ movq %rdi, %r9
- leaq -8(%rsi, %rdx), %rsi
- leaq -8(%rdi, %rdx), %rdi
- shrq $3, %rcx
- std
- rep movsq
- cld
-- movq %r11, (%r10)
-+ movq %r11, (%r9)
- jmp 13f
-
- /*
-@@ -127,13 +127,13 @@ ENTRY(memmove)
- 8:
- subq $0x20, %rdx
- movq -1*8(%rsi), %r11
-- movq -2*8(%rsi), %r10
-+ movq -2*8(%rsi), %rcx
- movq -3*8(%rsi), %r9
- movq -4*8(%rsi), %r8
- leaq -4*8(%rsi), %rsi
-
- movq %r11, -1*8(%rdi)
-- movq %r10, -2*8(%rdi)
-+ movq %rcx, -2*8(%rdi)
- movq %r9, -3*8(%rdi)
- movq %r8, -4*8(%rdi)
- leaq -4*8(%rdi), %rdi
-@@ -151,11 +151,11 @@ ENTRY(memmove)
- * Move data from 16 bytes to 31 bytes.
- */
- movq 0*8(%rsi), %r11
-- movq 1*8(%rsi), %r10
-+ movq 1*8(%rsi), %rcx
- movq -2*8(%rsi, %rdx), %r9
- movq -1*8(%rsi, %rdx), %r8
- movq %r11, 0*8(%rdi)
-- movq %r10, 1*8(%rdi)
-+ movq %rcx, 1*8(%rdi)
- movq %r9, -2*8(%rdi, %rdx)
- movq %r8, -1*8(%rdi, %rdx)
- jmp 13f
-@@ -167,9 +167,9 @@ ENTRY(memmove)
- * Move data from 8 bytes to 15 bytes.
- */
- movq 0*8(%rsi), %r11
-- movq -1*8(%rsi, %rdx), %r10
-+ movq -1*8(%rsi, %rdx), %r9
- movq %r11, 0*8(%rdi)
-- movq %r10, -1*8(%rdi, %rdx)
-+ movq %r9, -1*8(%rdi, %rdx)
- jmp 13f
- 10:
- cmpq $4, %rdx
-@@ -178,9 +178,9 @@ ENTRY(memmove)
- * Move data from 4 bytes to 7 bytes.
- */
- movl (%rsi), %r11d
-- movl -4(%rsi, %rdx), %r10d
-+ movl -4(%rsi, %rdx), %r9d
- movl %r11d, (%rdi)
-- movl %r10d, -4(%rdi, %rdx)
-+ movl %r9d, -4(%rdi, %rdx)
- jmp 13f
- 11:
- cmp $2, %rdx
-@@ -189,9 +189,9 @@ ENTRY(memmove)
- * Move data from 2 bytes to 3 bytes.
- */
- movw (%rsi), %r11w
-- movw -2(%rsi, %rdx), %r10w
-+ movw -2(%rsi, %rdx), %r9w
- movw %r11w, (%rdi)
-- movw %r10w, -2(%rdi, %rdx)
-+ movw %r9w, -2(%rdi, %rdx)
- jmp 13f
- 12:
- cmp $1, %rdx
@@ -202,6 +202,7 @@ ENTRY(memmove)
movb (%rsi), %r11b
movb %r11b, (%rdi)
@@ -25247,7 +25626,7 @@ index ee16461..c39c199 100644
.Lmemmove_end_forward_efs:
.previous
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
-index 79bd454..dff325a 100644
+index 79bd454..24b3780 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -31,6 +31,7 @@
@@ -25266,27 +25645,10 @@ index 79bd454..dff325a 100644
ret
.Lmemset_e_e:
.previous
-@@ -60,13 +62,13 @@
- ENTRY(memset)
- ENTRY(__memset)
- CFI_STARTPROC
-- movq %rdi,%r10
- movq %rdx,%r11
-
- /* expand byte value */
- movzbl %sil,%ecx
- movabs $0x0101010101010101,%rax
- mul %rcx /* with rax, clobbers rdx */
-+ movq %rdi,%rdx
-
- /* align dst */
- movl %edi,%r9d
-@@ -120,7 +122,8 @@ ENTRY(__memset)
- jnz .Lloop_1
+@@ -121,6 +123,7 @@ ENTRY(__memset)
.Lende:
-- movq %r10,%rax
-+ movq %rdx,%rax
+ movq %r10,%rax
+ pax_force_retaddr
ret
@@ -25611,7 +25973,7 @@ index c9f2d9b..e7fd2c0 100644
from += 64;
to += 64;
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
-index 69fa106..adda88b 100644
+index 69fa106..234ac7f 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -3,6 +3,7 @@
@@ -25622,34 +25984,8 @@ index 69fa106..adda88b 100644
#ifdef CONFIG_X86_64
/*
-@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
- CFI_STARTPROC
- pushq_cfi %rbx
- pushq_cfi %rbp
-- movq %rdi, %r10 /* Save pointer */
-+ movq %rdi, %r9 /* Save pointer */
- xorl %r11d, %r11d /* Return value */
- movl (%rdi), %eax
- movl 4(%rdi), %ecx
-@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
- movl 28(%rdi), %edi
- CFI_REMEMBER_STATE
- 1: \op
--2: movl %eax, (%r10)
-+2: movl %eax, (%r9)
- movl %r11d, %eax /* Return value */
-- movl %ecx, 4(%r10)
-- movl %edx, 8(%r10)
-- movl %ebx, 12(%r10)
-- movl %ebp, 20(%r10)
-- movl %esi, 24(%r10)
-- movl %edi, 28(%r10)
-+ movl %ecx, 4(%r9)
-+ movl %edx, 8(%r9)
-+ movl %ebx, 12(%r9)
-+ movl %ebp, 20(%r9)
-+ movl %esi, 24(%r9)
-+ movl %edi, 28(%r9)
+@@ -37,6 +38,7 @@ ENTRY(native_\op\()_safe_regs)
+ movl %edi, 28(%r10)
popq_cfi %rbp
popq_cfi %rbx
+ pax_force_retaddr
@@ -25912,7 +26248,7 @@ index 5dff5f0..cadebf4 100644
CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
-index a63efd6..ccecad8 100644
+index a63efd6..8149fbe 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -8,6 +8,7 @@
@@ -25923,10 +26259,30 @@ index a63efd6..ccecad8 100644
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
-@@ -41,5 +42,6 @@
- SAVE_ARGS
+@@ -15,11 +16,11 @@
+ \name:
+ CFI_STARTPROC
+
+- /* this one pushes 9 elems, the next one would be %rIP */
+- SAVE_ARGS
++ /* this one pushes 15+1 elems, the next one would be %rIP */
++ SAVE_ARGS 8
+
+ .if \put_ret_addr_in_rdi
+- movq_cfi_restore 9*8, rdi
++ movq_cfi_restore RIP, rdi
+ .endif
+
+ call \func
+@@ -38,8 +39,9 @@
+
+ /* SAVE_ARGS below is used only for the .cfi directives it contains. */
+ CFI_STARTPROC
+- SAVE_ARGS
++ SAVE_ARGS 8
restore:
- RESTORE_ARGS
+- RESTORE_ARGS
++ RESTORE_ARGS 1,8
+ pax_force_retaddr
ret
CFI_ENDPROC
@@ -27372,7 +27728,7 @@ index 53a7b69..8cc6fea 100644
+ return ret ? -EFAULT : 0;
+}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
-index dd74e46..7d26398 100644
+index dd74e46..0970b01 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -27384,6 +27740,17 @@ index dd74e46..7d26398 100644
(void __user *)start, len)))
return 0;
+@@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ goto slow_irqon;
+ #endif
+
++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
++ (void __user *)start, len)))
++ return 0;
++
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index f4f29b1..5cac4fb 100644
--- a/arch/x86/mm/highmem_32.c
@@ -33734,7 +34101,7 @@ index da3cfee..a5a6606 100644
*ppos = i;
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index c244f0e..bb09210 100644
+index c244f0e..fc574b2 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -255,10 +255,8 @@
@@ -34469,49 +34836,28 @@ index c244f0e..bb09210 100644
}
#endif
-@@ -835,97 +916,110 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -835,97 +916,109 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
+- __u32 tmp[OUTPUT_POOL_WORDS];
+ if (r->limit == 0 && random_min_urandom_seed) {
+ unsigned long now = jiffies;
-+
-+ if (time_before(now,
-+ r->last_pulled + random_min_urandom_seed * HZ))
-+ return;
-+ r->last_pulled = now;
-+ }
-+ if (r->pull &&
-+ r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
-+ r->entropy_count < r->poolinfo->poolfracbits)
-+ _xfer_secondary_pool(r, nbytes);
-+}
-+
-+static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
-+{
- __u32 tmp[OUTPUT_POOL_WORDS];
- if (r->pull && r->entropy_count < nbytes * 8 &&
- r->entropy_count < r->poolinfo->POOLBITS) {
- /* If we're limited, always leave two wakeup worth's BITS */
- int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
- int bytes = nbytes;
-+ /* For /dev/random's pool, always leave two wakeups' worth */
-+ int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
-+ int bytes = nbytes;
-
+-
- /* pull at least as many as BYTES as wakeup BITS */
- bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
- /* but never more than the buffer size */
- bytes = min_t(int, bytes, sizeof(tmp));
-+ /* pull at least as much as a wakeup */
-+ bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
-+ /* but never more than the buffer size */
-+ bytes = min_t(int, bytes, sizeof(tmp));
-
+-
- DEBUG_ENT("going to reseed %s with %d bits "
- "(%d of %d requested)\n",
- r->name, bytes * 8, nbytes * 8, r->entropy_count);
@@ -34520,11 +34866,33 @@ index c244f0e..bb09210 100644
- random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes, NULL);
- credit_entropy_bits(r, bytes*8);
-- }
++ if (time_before(now,
++ r->last_pulled + random_min_urandom_seed * HZ))
++ return;
++ r->last_pulled = now;
+ }
++ if (r->pull &&
++ r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
++ r->entropy_count < r->poolinfo->poolfracbits)
++ _xfer_secondary_pool(r, nbytes);
++}
++
++static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
++{
++ __u32 tmp[OUTPUT_POOL_WORDS];
++ int bytes, min_bytes;
++
++ /* For /dev/random's pool, always leave two wakeups' worth */
++ int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
++
++ /* pull at least as much as a wakeup */
++ min_bytes = random_read_wakeup_bits / 8;
++ /* but never more than the buffer size */
++ bytes = min(sizeof(tmp), max_t(size_t, min_bytes, nbytes));
++
+ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
-+ bytes = extract_entropy(r->pull, tmp, bytes,
-+ random_read_wakeup_bits / 8, rsvd_bytes);
++ bytes = extract_entropy(r->pull, tmp, bytes, min_bytes, rsvd_bytes);
+ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
}
@@ -34589,7 +34957,7 @@ index c244f0e..bb09210 100644
+ ibytes = nbytes;
+ /* If limited, never pull more than available */
+ if (r->limit)
-+ ibytes = min_t(size_t, ibytes, have_bytes - reserved);
++ ibytes = min_t(size_t, ibytes, max(0, have_bytes - reserved));
+ if (ibytes < min)
+ ibytes = 0;
+ entropy_count = max_t(int, 0,
@@ -34643,7 +35011,7 @@ index c244f0e..bb09210 100644
} hash;
__u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
-@@ -938,6 +1032,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+@@ -938,6 +1031,17 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
@@ -34661,7 +35029,7 @@ index c244f0e..bb09210 100644
* We mix the hash back into the pool to prevent backtracking
* attacks (where the attacker knows the state of the pool
* plus the current outputs, and attempts to find previous
-@@ -966,27 +1071,43 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+@@ -966,27 +1070,43 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
hash.w[1] ^= hash.w[4];
hash.w[2] ^= rol32(hash.w[2], 16);
@@ -34716,7 +35084,7 @@ index c244f0e..bb09210 100644
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
-@@ -994,8 +1115,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -994,8 +1114,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
extract_buf(r, tmp);
if (fips_enabled) {
@@ -34725,7 +35093,7 @@ index c244f0e..bb09210 100644
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
-@@ -1015,12 +1134,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -1015,12 +1133,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
return ret;
}
@@ -34743,7 +35111,7 @@ index c244f0e..bb09210 100644
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
-@@ -1036,7 +1160,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1036,7 +1159,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -34752,7 +35120,7 @@ index c244f0e..bb09210 100644
ret = -EFAULT;
break;
}
-@@ -1055,11 +1179,18 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1055,11 +1178,18 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
@@ -34773,7 +35141,7 @@ index c244f0e..bb09210 100644
extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
-@@ -1078,6 +1209,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
+@@ -1078,6 +1208,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
{
char *p = buf;
@@ -34781,7 +35149,7 @@ index c244f0e..bb09210 100644
while (nbytes) {
unsigned long v;
int chunk = min(nbytes, (int)sizeof(unsigned long));
-@@ -1111,12 +1243,11 @@ static void init_std_data(struct entropy_store *r)
+@@ -1111,12 +1242,11 @@ static void init_std_data(struct entropy_store *r)
ktime_t now = ktime_get_real();
unsigned long rv;
@@ -34797,7 +35165,7 @@ index c244f0e..bb09210 100644
mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
-@@ -1139,25 +1270,7 @@ static int rand_initialize(void)
+@@ -1139,25 +1269,7 @@ static int rand_initialize(void)
init_std_data(&nonblocking_pool);
return 0;
}
@@ -34824,7 +35192,7 @@ index c244f0e..bb09210 100644
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
-@@ -1169,71 +1282,59 @@ void rand_initialize_disk(struct gendisk *disk)
+@@ -1169,71 +1281,59 @@ void rand_initialize_disk(struct gendisk *disk)
* source.
*/
state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
@@ -34932,7 +35300,7 @@ index c244f0e..bb09210 100644
}
static unsigned int
-@@ -1244,9 +1345,9 @@ random_poll(struct file *file, poll_table * wait)
+@@ -1244,9 +1344,9 @@ random_poll(struct file *file, poll_table * wait)
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
@@ -34944,7 +35312,7 @@ index c244f0e..bb09210 100644
mask |= POLLOUT | POLLWRNORM;
return mask;
}
-@@ -1297,7 +1398,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+@@ -1297,7 +1397,8 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
@@ -34954,7 +35322,7 @@ index c244f0e..bb09210 100644
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
-@@ -1305,7 +1407,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+@@ -1305,7 +1406,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
@@ -34963,7 +35331,7 @@ index c244f0e..bb09210 100644
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
-@@ -1320,14 +1422,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+@@ -1320,14 +1421,19 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
@@ -34986,7 +35354,7 @@ index c244f0e..bb09210 100644
return 0;
default:
return -EINVAL;
-@@ -1387,23 +1494,23 @@ EXPORT_SYMBOL(generate_random_uuid);
+@@ -1387,23 +1493,23 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
static int min_read_thresh = 8, min_write_thresh;
@@ -35017,7 +35385,7 @@ index c244f0e..bb09210 100644
unsigned char buf[64], tmp_uuid[16], *uuid;
uuid = table->data;
-@@ -1427,8 +1534,26 @@ static int proc_do_uuid(ctl_table *table, int write,
+@@ -1427,8 +1533,26 @@ static int proc_do_uuid(ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
@@ -35045,7 +35413,7 @@ index c244f0e..bb09210 100644
{
.procname = "poolsize",
.data = &sysctl_poolsize,
-@@ -1440,12 +1565,12 @@ ctl_table random_table[] = {
+@@ -1440,12 +1564,12 @@ ctl_table random_table[] = {
.procname = "entropy_avail",
.maxlen = sizeof(int),
.mode = 0444,
@@ -35060,7 +35428,7 @@ index c244f0e..bb09210 100644
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
-@@ -1454,7 +1579,7 @@ ctl_table random_table[] = {
+@@ -1454,7 +1578,7 @@ ctl_table random_table[] = {
},
{
.procname = "write_wakeup_threshold",
@@ -35069,7 +35437,7 @@ index c244f0e..bb09210 100644
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
-@@ -1462,6 +1587,13 @@ ctl_table random_table[] = {
+@@ -1462,6 +1586,13 @@ ctl_table random_table[] = {
.extra2 = &max_write_thresh,
},
{
@@ -35083,7 +35451,7 @@ index c244f0e..bb09210 100644
.procname = "boot_id",
.data = &sysctl_bootid,
.maxlen = 16,
-@@ -1492,7 +1624,7 @@ int random_int_secret_init(void)
+@@ -1492,7 +1623,7 @@ int random_int_secret_init(void)
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
@@ -35092,7 +35460,7 @@ index c244f0e..bb09210 100644
unsigned int get_random_int(void)
{
__u32 *hash;
-@@ -1510,6 +1642,7 @@ unsigned int get_random_int(void)
+@@ -1510,6 +1641,7 @@ unsigned int get_random_int(void)
return ret;
}
@@ -47156,6 +47524,35 @@ index 3440812..2a4ef1f 100644
if (file->f_version != event_count) {
file->f_version = event_count;
return POLLIN | POLLRDNORM;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 49257b3..6011b4b 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -147,7 +147,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+ struct dev_state *ps = file->private_data;
+ struct usb_device *dev = ps->dev;
+ ssize_t ret = 0;
+- unsigned len;
++ size_t len;
+ loff_t pos;
+ int i;
+
+@@ -189,13 +189,13 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
+ for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
+ struct usb_config_descriptor *config =
+ (struct usb_config_descriptor *)dev->rawdescriptors[i];
+- unsigned int length = le16_to_cpu(config->wTotalLength);
++ size_t length = le16_to_cpu(config->wTotalLength);
+
+ if (*ppos < pos + length) {
+
+ /* The descriptor may claim to be longer than it
+ * really is. Here is the actual allocated length. */
+- unsigned alloclen =
++ size_t alloclen =
+ le16_to_cpu(dev->config[i].desc.wTotalLength);
+
+ len = length - (*ppos - pos);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 032e5a6..bc422e4 100644
--- a/drivers/usb/core/hcd.c
@@ -51358,7 +51755,7 @@ index a6395bd..f1e376a 100644
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
#ifdef __alpha__
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 8dd615c..65b7958 100644
+index 8dd615c..cb7cd01 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -32,6 +32,7 @@
@@ -51877,15 +52274,20 @@ index 8dd615c..65b7958 100644
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
-+ unsigned long pax_task_size = TASK_SIZE;
++ unsigned long pax_task_size;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
-@@ -713,11 +1058,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -713,11 +1058,82 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
- current->mm->def_flags = def_flags;
++ current->mm->def_flags = 0;
+
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(loc->elf_ex);
+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ current->mm->pax_flags = 0UL;
@@ -51904,8 +52306,6 @@ index 8dd615c..65b7958 100644
+ current->mm->delta_stack = 0UL;
+#endif
+
-+ current->mm->def_flags = 0;
-+
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
+ send_sig(SIGKILL, current, 0);
@@ -51933,19 +52333,17 @@ index 8dd615c..65b7958 100644
+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
+ pax_task_size = SEGMEXEC_TASK_SIZE;
+ current->mm->def_flags |= VM_NOHUGEPAGE;
-+ }
++ } else
+#endif
+
++ pax_task_size = TASK_SIZE;
++
+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
+ put_cpu();
+ }
+#endif
-
- /* Do this immediately, since STACK_TOP as used in setup_arg_pages
- may depend on the personality. */
- SET_PERSONALITY(loc->elf_ex);
+
+#ifdef CONFIG_PAX_ASLR
+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
@@ -51964,7 +52362,7 @@ index 8dd615c..65b7958 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -808,6 +1223,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -808,6 +1224,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
@@ -51985,7 +52383,7 @@ index 8dd615c..65b7958 100644
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -840,9 +1269,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -840,9 +1270,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -51998,7 +52396,7 @@ index 8dd615c..65b7958 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -881,17 +1310,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -881,17 +1311,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -52049,7 +52447,7 @@ index 8dd615c..65b7958 100644
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1098,7 +1554,7 @@ out:
+@@ -1098,7 +1555,7 @@ out:
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -52058,7 +52456,7 @@ index 8dd615c..65b7958 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1132,7 +1588,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1132,7 +1589,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -52067,7 +52465,7 @@ index 8dd615c..65b7958 100644
goto whole;
/*
-@@ -1354,9 +1810,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1354,9 +1811,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -52079,7 +52477,7 @@ index 8dd615c..65b7958 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1851,14 +2307,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1851,14 +2308,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -52096,7 +52494,7 @@ index 8dd615c..65b7958 100644
return size;
}
-@@ -1952,7 +2408,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1952,7 +2409,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -52105,7 +52503,7 @@ index 8dd615c..65b7958 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -1966,10 +2422,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1966,10 +2423,12 @@ static int elf_core_dump(struct coredump_params *cprm)
offset = dataoff;
size += sizeof(*elf);
@@ -52118,7 +52516,7 @@ index 8dd615c..65b7958 100644
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
-@@ -1983,7 +2441,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1983,7 +2442,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -52127,7 +52525,7 @@ index 8dd615c..65b7958 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -1994,6 +2452,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,6 +2453,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
@@ -52135,7 +52533,7 @@ index 8dd615c..65b7958 100644
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
-@@ -2018,7 +2477,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2018,7 +2478,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -52144,7 +52542,7 @@ index 8dd615c..65b7958 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2027,6 +2486,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2027,6 +2487,7 @@ static int elf_core_dump(struct coredump_params *cprm)
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
@@ -52152,7 +52550,7 @@ index 8dd615c..65b7958 100644
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
-@@ -2044,6 +2504,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2044,6 +2505,7 @@ static int elf_core_dump(struct coredump_params *cprm)
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
@@ -52160,7 +52558,7 @@ index 8dd615c..65b7958 100644
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
-@@ -2064,6 +2525,167 @@ out:
+@@ -2064,6 +2526,167 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -53015,6 +53413,28 @@ index c858a29..969f74f 100644
cFYI(1, "unknown ACL type %d", acl_type);
return 0;
}
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index c55808e..c1814ab 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1690,10 +1690,14 @@ static int cifs_writepages(struct address_space *mapping,
+ index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
+ } else {
+- index = wbc->range_start >> PAGE_CACHE_SHIFT;
+- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
++ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+ range_whole = true;
++ index = 0;
++ end = ULONG_MAX;
++ } else {
++ index = wbc->range_start >> PAGE_CACHE_SHIFT;
++ end = wbc->range_end >> PAGE_CACHE_SHIFT;
++ }
+ scanned = true;
+ }
+ retry:
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 6b0e064..94e6c3c 100644
--- a/fs/cifs/link.c
@@ -72470,6 +72890,54 @@ index b18ce4f..2ee2843 100644
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
#endif /* _ASM_GENERIC_ATOMIC64_H */
+diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
+index a60a7cc..0fe12f2 100644
+--- a/include/asm-generic/bitops/__fls.h
++++ b/include/asm-generic/bitops/__fls.h
+@@ -9,7 +9,7 @@
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+-static __always_inline unsigned long __fls(unsigned long word)
++static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+ int num = BITS_PER_LONG - 1;
+
+diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
+index 0576d1f..dad6c71 100644
+--- a/include/asm-generic/bitops/fls.h
++++ b/include/asm-generic/bitops/fls.h
+@@ -9,7 +9,7 @@
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+-static __always_inline int fls(int x)
++static __always_inline int __intentional_overflow(-1) fls(int x)
+ {
+ int r = 32;
+
+diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
+index b097cf8..3d40e14 100644
+--- a/include/asm-generic/bitops/fls64.h
++++ b/include/asm-generic/bitops/fls64.h
+@@ -15,7 +15,7 @@
+ * at position 64.
+ */
+ #if BITS_PER_LONG == 32
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+ __u32 h = x >> 32;
+ if (h)
+@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
+ return fls(x);
+ }
+ #elif BITS_PER_LONG == 64
+-static __always_inline int fls64(__u64 x)
++static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
+ {
+ if (x == 0)
+ return 0;
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..e04c5c9 100644
--- a/include/asm-generic/cache.h
@@ -73045,7 +73513,7 @@ index acd8d4b..c87c74b 100644
/* Stack area protections */
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
-index fc8a3ff..e48401e 100644
+index fc8a3ff..ad5938b 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -74,7 +74,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
@@ -73066,6 +73534,15 @@ index fc8a3ff..e48401e 100644
{
return (word >> shift) | (word << (32 - shift));
}
+@@ -140,7 +140,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
+ return (__s32)(value << shift) >> shift;
+ }
+
+-static inline unsigned fls_long(unsigned long l)
++static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
+ {
+ if (sizeof(l) == 4)
+ return fls(l);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ff039f0..cdf89ae 100644
--- a/include/linux/blkdev.h
@@ -77000,7 +77477,7 @@ index 800f113..13b3715 100644
}
diff --git a/include/linux/random.h b/include/linux/random.h
-index 7e77cee..c8a8a43 100644
+index 7e77cee..d51eec7 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -41,19 +41,27 @@ struct rand_pool_info {
@@ -77042,7 +77519,7 @@ index 7e77cee..c8a8a43 100644
-u32 random32(void);
-void srandom32(u32 seed);
-+u32 prandom_u32(void);
++u32 prandom_u32(void) __intentional_overflow(-1);
+void prandom_bytes(void *buf, int nbytes);
+void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
@@ -77055,10 +77532,10 @@ index 7e77cee..c8a8a43 100644
+#define random32() prandom_u32()
+#define srandom32(seed) prandom_seed(seed)
+
-+u32 prandom_u32_state(struct rnd_state *state);
++u32 prandom_u32_state(struct rnd_state *state) __intentional_overflow(-1);
+void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
+
-+static inline unsigned long pax_get_random_long(void)
++static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
+{
+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
+}
@@ -78741,7 +79218,7 @@ index 4bde182..943f335 100644
/*
* Internals. Dont't use..
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
-index 65efb92..137adbb 100644
+index 65efb92..a90154f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
@@ -78760,15 +79237,20 @@ index 65efb92..137adbb 100644
+ atomic_long_add_unchecked(x, &vm_stat[item]);
}
- static inline unsigned long global_page_state(enum zone_stat_item item)
+-static inline unsigned long global_page_state(enum zone_stat_item item)
++static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
{
- long x = atomic_long_read(&vm_stat[item]);
+ long x = atomic_long_read_unchecked(&vm_stat[item]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
-@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
- static inline unsigned long zone_page_state(struct zone *zone,
+@@ -106,10 +106,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
+ return x;
+ }
+
+-static inline unsigned long zone_page_state(struct zone *zone,
++static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
- long x = atomic_long_read(&zone->vm_stat[item]);
@@ -105742,10 +106224,10 @@ index 0000000..568b360
+}
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
new file mode 100644
-index 0000000..698da67
+index 0000000..a25306b
--- /dev/null
+++ b/tools/gcc/kernexec_plugin.c
-@@ -0,0 +1,471 @@
+@@ -0,0 +1,474 @@
+/*
+ * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -105891,21 +106373,21 @@ index 0000000..698da67
+}
+
+/*
-+ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
++ * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
+ */
+static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
+{
+ gimple asm_movabs_stmt;
+
-+ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
-+ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
++ // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
+ gimple_asm_set_volatile(asm_movabs_stmt, true);
+ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
+ update_stmt(asm_movabs_stmt);
+}
+
+/*
-+ * find all asm() stmts that clobber r10 and add a reload of r10
++ * find all asm() stmts that clobber r12 and add a reload of r12
+ */
+static unsigned int execute_kernexec_reload(void)
+{
@@ -105916,7 +106398,7 @@ index 0000000..698da67
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ // gimple match: __asm__ ("" : : : "r10");
++ // gimple match: __asm__ ("" : : : "r12");
+ gimple asm_stmt;
+ size_t nclobbers;
+
@@ -105925,11 +106407,11 @@ index 0000000..698da67
+ if (gimple_code(asm_stmt) != GIMPLE_ASM)
+ continue;
+
-+ // ... clobbering r10
++ // ... clobbering r12
+ nclobbers = gimple_asm_nclobbers(asm_stmt);
+ while (nclobbers--) {
+ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
-+ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
+ continue;
+ kernexec_reload_fptr_mask(&gsi);
+//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
@@ -106012,7 +106494,7 @@ index 0000000..698da67
+#endif
+ new_fptr = make_ssa_name(new_fptr, NULL);
+
-+ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
++ // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
+ input = build_tree_list(NULL_TREE, build_string(1, "0"));
+ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
+ output = build_tree_list(NULL_TREE, build_string(2, "=r"));
@@ -106024,7 +106506,7 @@ index 0000000..698da67
+ vec_safe_push(inputs, input);
+ vec_safe_push(outputs, output);
+#endif
-+ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
++ asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
+ SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
+ gimple_asm_set_volatile(asm_or_stmt, true);
+ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
@@ -106104,19 +106586,19 @@ index 0000000..698da67
+ emit_insn_before(btsq, insn);
+}
+
-+// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
++// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
+static void kernexec_instrument_retaddr_or(rtx insn)
+{
+ rtx orq;
+ rtvec argvec, constraintvec, labelvec;
+ int line;
+
-+ // create asm volatile("orq %%r10,(%%rsp)":::)
++ // create asm volatile("orq %%r12,(%%rsp)":::)
+ argvec = rtvec_alloc(0);
+ constraintvec = rtvec_alloc(0);
+ labelvec = rtvec_alloc(0);
+ line = expand_location(RTL_LOCATION(insn)).line;
-+ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
+ MEM_VOLATILE_P(orq) = 1;
+// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
+ emit_insn_before(orq, insn);
@@ -106129,6 +106611,9 @@ index 0000000..698da67
+{
+ rtx insn;
+
++// if (stack_realign_drap)
++// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
++
+ // 1. find function returns
+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
+ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
@@ -106200,7 +106685,7 @@ index 0000000..698da67
+ } else if (!strcmp(argv[i].value, "or")) {
+ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
-+ fix_register("r10", 1, 1);
++ fix_register("r12", 1, 1);
+ } else
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
+ continue;
@@ -106560,10 +107045,10 @@ index 0000000..679b9ef
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..a77968d
+index 0000000..75568e9
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,5989 @@
+@@ -0,0 +1,5983 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -107030,7 +107515,6 @@ index 0000000..a77968d
+ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
+cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 2-3 5368 NULL
+bitmap_fold_5396 bitmap_fold 4 5396 NULL
-+perf_adjust_period_5408 perf_adjust_period 2-3 5408 NULL
+nilfs_palloc_entries_per_group_5418 nilfs_palloc_entries_per_group 0 5418 NULL
+xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
+xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
@@ -108649,7 +109133,6 @@ index 0000000..a77968d
+mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
+pskb_may_pull_22546 pskb_may_pull 2 22546 NULL
+ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
-+atomic_long_read_unchecked_22551 atomic_long_read_unchecked 0 22551 NULL
+agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
+snd_pcm_hw_params_choose_22560 snd_pcm_hw_params_choose 0 22560 NULL
+dbFindCtl_22587 dbFindCtl 0 22587 NULL
@@ -109905,7 +110388,6 @@ index 0000000..a77968d
+vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
+cxio_hal_rqtpool_alloc_36648 cxio_hal_rqtpool_alloc 2 36648 NULL nohasharray
+lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 &cxio_hal_rqtpool_alloc_36648
-+perf_calculate_period_36662 perf_calculate_period 2-3 36662 NULL
+osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
+iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
+ext4_mb_discard_group_preallocations_36685 ext4_mb_discard_group_preallocations 2 36685 NULL
@@ -110170,7 +110652,6 @@ index 0000000..a77968d
+setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
+btrfs_mksubvol_39479 btrfs_mksubvol 3 39479 NULL
+ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
-+atomic64_read_unchecked_39505 atomic64_read_unchecked 0 39505 NULL
+wm8350_i2c_read_device_39542 wm8350_i2c_read_device 3 39542 NULL nohasharray
+int_proc_write_39542 int_proc_write 3 39542 &wm8350_i2c_read_device_39542
+pp_write_39554 pp_write 3 39554 NULL
@@ -111373,7 +111854,6 @@ index 0000000..a77968d
+sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
+smk_write_load_list_52280 smk_write_load_list 3 52280 NULL
+handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
-+atomic64_read_52300 atomic64_read 0 52300 NULL
+ath6kl_wmi_get_new_buf_52304 ath6kl_wmi_get_new_buf 1 52304 NULL
+jbd2_free_52306 jbd2_free 2 52306 NULL
+kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
@@ -112529,7 +113009,6 @@ index 0000000..a77968d
+nf_bridge_mtu_reduction_65192 nf_bridge_mtu_reduction 0 65192 NULL
+nfulnl_alloc_skb_65207 nfulnl_alloc_skb 2-1 65207 NULL
+whci_n_caps_65247 whci_n_caps 0 65247 NULL
-+atomic_long_read_65263 atomic_long_read 0 65263 NULL
+kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
+compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
+get_unaligned_le16_65293 get_unaligned_le16 0 65293 NULL
@@ -112555,10 +113034,10 @@ index 0000000..a77968d
+selnl_msglen_65499 selnl_msglen 0 65499 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..87dd5e2
+index 0000000..5515dcb
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,3840 @@
+@@ -0,0 +1,3927 @@
+/*
+ * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -112619,6 +113098,10 @@ index 0000000..87dd5e2
+#define MIN_CHECK true
+#define MAX_CHECK false
+
++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF\n\t"
++#define YES_ASM_STR "# size_overflow MARK_YES\n\t"
++#define OK_ASM_STR "# size_overflow\n\t"
++
+#if BUILDING_GCC_VERSION == 4005
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
+#endif
@@ -112684,7 +113167,7 @@ index 0000000..87dd5e2
+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20131120beta",
++ .version = "20131214beta",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -113898,11 +114381,16 @@ index 0000000..87dd5e2
+
+ cast_rhs_type = TREE_TYPE(cast_rhs);
+ type_max_type = TREE_TYPE(type_max);
-+ type_min_type = TREE_TYPE(type_min);
+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+
+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
@@ -114170,7 +114658,7 @@ index 0000000..87dd5e2
+ break;
+ case DImode:
+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ new_type = intDI_type_node;
++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
+ else
+ new_type = intTI_type_node;
+ break;
@@ -114622,12 +115110,17 @@ index 0000000..87dd5e2
+}
+
+// determine whether duplication will be necessary or not.
-+static void search_interesting_conditions(const_tree arg, bool *interesting_conditions)
++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
+{
+ struct pointer_set_t *visited;
+
++ if (gimple_assign_cast_p(cur_node->first_stmt))
++ interesting_conditions[CAST] = true;
++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
++ interesting_conditions[NOT_UNARY] = true;
++
+ visited = pointer_set_create();
-+ set_conditions(visited, interesting_conditions, arg);
++ set_conditions(visited, interesting_conditions, cur_node->node);
+ pointer_set_destroy(visited);
+}
+
@@ -114747,36 +115240,43 @@ index 0000000..87dd5e2
+ return false;
+}
+
++static const char *get_asm_string(const_gimple stmt)
++{
++ if (!stmt)
++ return NULL;
++ if (gimple_code(stmt) != GIMPLE_ASM)
++ return NULL;
++
++ return gimple_asm_string(stmt);
++}
++
+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
-+ return !strcmp(str, "# size_overflow MARK_TURN_OFF\n\t");
++ return !strcmp(str, TURN_OFF_ASM_STR);
+}
+
+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
-+ return !strcmp(str, "# size_overflow MARK_YES\n\t");
++ return !strcmp(str, YES_ASM_STR);
+}
+
+static bool is_size_overflow_asm(const_gimple stmt)
+{
+ const char *str;
+
-+ if (!stmt)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ str = gimple_asm_string(stmt);
+ return !strncmp(str, "# size_overflow", 15);
+}
+
@@ -114865,8 +115365,6 @@ index 0000000..87dd5e2
+ */
+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
+{
-+ const_tree input, output;
-+
+ if (!cur_node->intentional_mark_from_gimple)
+ return false;
+
@@ -114878,10 +115376,6 @@ index 0000000..87dd5e2
+ // skip param decls
+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
+ return true;
-+ input = gimple_asm_input_op(cur_node->intentional_mark_from_gimple, 0);
-+ output = gimple_asm_output_op(cur_node->intentional_mark_from_gimple, 0);
-+
-+ replace_size_overflow_asm_with_assign(cur_node->intentional_mark_from_gimple, TREE_VALUE(output), TREE_VALUE(input));
+ return true;
+}
+
@@ -114894,6 +115388,9 @@ index 0000000..87dd5e2
+{
+ const_tree fndecl;
+
++ if (is_intentional_attribute_from_gimple(cur_node))
++ return;
++
+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
+ return;
@@ -114918,9 +115415,6 @@ index 0000000..87dd5e2
+ else if (is_yes_intentional_attr(fndecl, cur_node->num))
+ cur_node->intentional_attr_decl = MARK_YES;
+
-+ if (is_intentional_attribute_from_gimple(cur_node))
-+ return;
-+
+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
+}
@@ -115006,13 +115500,8 @@ index 0000000..87dd5e2
+// a size_overflow asm stmt in the control flow doesn't stop the recursion
+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
+{
-+ const_tree asm_lhs;
-+
+ if (!is_size_overflow_asm(stmt))
-+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+
-+ asm_lhs = gimple_asm_input_op(stmt, 0);
-+ walk_use_def(visited, cur_node, TREE_VALUE(asm_lhs));
++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+}
+
+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
@@ -115073,39 +115562,58 @@ index 0000000..87dd5e2
+ pointer_set_destroy(visited);
+}
+
-+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
-+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
-+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
-+ * If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
++enum precond {
++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
++};
++
++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
+ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
+ */
-+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++static enum precond check_preconditions(struct interesting_node *cur_node)
+{
-+ struct pointer_set_t *visited;
+ bool interesting_conditions[3] = {false, false, false};
-+ tree new_node, orig_node = cur_node->node;
+
+ set_last_nodes(cur_node);
+
+ check_intentional_attribute_ipa(cur_node);
+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
-+ return cnodes;
++ return NO_ATTRIBUTE_SEARCH;
+
-+ search_interesting_conditions(orig_node, interesting_conditions);
++ search_interesting_conditions(cur_node, interesting_conditions);
+
+ // error code
+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
-+ return cnodes;
++ return NO_ATTRIBUTE_SEARCH;
+
-+ cnodes = search_overflow_attribute(cnodes, cur_node);
++ // unnecessary overflow check
++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ return NO_CHECK_INSERT;
+
+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
++ return NO_CHECK_INSERT;
++
++ return NONE;
++}
++
++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
++ */
++static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++{
++ enum precond ret;
++ struct pointer_set_t *visited;
++ tree new_node, orig_node = cur_node->node;
++
++ ret = check_preconditions(cur_node);
++ if (ret == NO_ATTRIBUTE_SEARCH)
+ return cnodes;
+
-+ // unnecessary overflow check
-+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ cnodes = search_overflow_attribute(cnodes, cur_node);
++
++ if (ret == NO_CHECK_INSERT)
+ return cnodes;
+
+ visited = pointer_set_create();
@@ -115317,9 +115825,6 @@ index 0000000..87dd5e2
+ imm_use_iterator imm_iter;
+ unsigned int argnum;
+
-+ if (is_size_overflow_intentional_asm_turn_off(intentional_asm))
-+ return head;
-+
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
+
+ if (pointer_set_insert(visited, node))
@@ -115372,10 +115877,20 @@ index 0000000..87dd5e2
+static void remove_size_overflow_asm(gimple stmt)
+{
+ gimple_stmt_iterator gsi;
++ tree input, output;
+
-+ gcc_assert(gimple_code(stmt) == GIMPLE_ASM);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_remove(&gsi, true);
++ if (!is_size_overflow_asm(stmt))
++ return;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ gsi = gsi_for_stmt(stmt);
++ gsi_remove(&gsi, true);
++ return;
++ }
++
++ input = gimple_asm_input_op(stmt, 0);
++ output = gimple_asm_output_op(stmt, 0);
++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
+}
+
+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
@@ -115384,7 +115899,7 @@ index 0000000..87dd5e2
+ */
+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
+{
-+ const_tree output, input;
++ const_tree output;
+ struct pointer_set_t *visited;
+ gimple intentional_asm = NOT_INTENTIONAL_ASM;
+
@@ -115395,25 +115910,31 @@ index 0000000..87dd5e2
+ intentional_asm = stmt;
+
+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
-+ input = gimple_asm_input_op(stmt, 0);
++
++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
+
+ if (gimple_asm_noutputs(stmt) == 0) {
++ const_tree input;
++
++ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ input = gimple_asm_input_op(stmt, 0);
+ remove_size_overflow_asm(stmt);
+ if (is_gimple_constant(TREE_VALUE(input)))
+ return head;
-+
+ visited = pointer_set_create();
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
+ }
+
-+ output = gimple_asm_output_op(stmt, 0);
-+
+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
-+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++ remove_size_overflow_asm(stmt);
+
+ visited = pointer_set_create();
++ output = gimple_asm_output_op(stmt, 0);
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
@@ -115531,6 +116052,18 @@ index 0000000..87dd5e2
+ }
+}
+
++static void remove_all_size_overflow_asm(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB(bb) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ remove_size_overflow_asm(gsi_stmt(si));
++ }
++}
++
+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
+ * the newly collected interesting functions (they are interesting if there is control flow between
@@ -115557,6 +116090,7 @@ index 0000000..87dd5e2
+ }
+
+ free_interesting_node(head);
++ remove_all_size_overflow_asm();
+ unset_current_function_decl();
+
+ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
@@ -115796,6 +116330,9 @@ index 0000000..87dd5e2
+ case GIMPLE_NOP:
+ return search_intentional(visited, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
++ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ return MARK_TURN_OFF;
++ return MARK_NO;
+ case GIMPLE_CALL:
+ return MARK_NO;
+ case GIMPLE_PHI:
@@ -115817,10 +116354,9 @@ index 0000000..87dd5e2
+}
+
+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
-+static const char *check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
+{
+ const_tree fndecl;
-+ const char *asm_str;
+ struct pointer_set_t *visited;
+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
+
@@ -115830,7 +116366,7 @@ index 0000000..87dd5e2
+ else if (is_yes_intentional_attr(fndecl, argnum))
+ decl_attr = MARK_YES;
+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ return "# size_overflow MARK_TURN_OFF\n\t";
++ return MARK_TURN_OFF;
+ }
+
+ visited = pointer_set_create();
@@ -115839,18 +116375,13 @@ index 0000000..87dd5e2
+
+ switch (cur_fndecl_attr) {
+ case MARK_NO:
-+ asm_str = "# size_overflow\n\t";
-+ break;
++ return MARK_NO;
+ case MARK_TURN_OFF:
-+ asm_str = "# size_overflow MARK_TURN_OFF\n\t";
-+ break;
++ return MARK_TURN_OFF;
+ default:
-+ asm_str = "# size_overflow MARK_YES\n\t";
+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
-+ break;
++ return MARK_YES;
+ }
-+
-+ return asm_str;
+}
+
+static void check_missing_size_overflow_attribute(tree var)
@@ -115986,6 +116517,21 @@ index 0000000..87dd5e2
+ update_stmt(stmt);
+}
+
++static const char *convert_mark_to_str(enum mark mark)
++{
++ switch (mark) {
++ case MARK_NO:
++ return OK_ASM_STR;
++ case MARK_YES:
++ case MARK_NOT_INTENTIONAL:
++ return YES_ASM_STR;
++ case MARK_TURN_OFF:
++ return TURN_OFF_ASM_STR;
++ }
++
++ gcc_unreachable();
++}
++
+/* Create the input of the size_overflow asm stmt.
+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
@@ -115999,6 +116545,8 @@ index 0000000..87dd5e2
+ return;
+ }
+
++ gcc_assert(!is_size_overflow_intentional_asm_turn_off(asm_data->def_stmt));
++
+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
+
@@ -116011,7 +116559,11 @@ index 0000000..87dd5e2
+ create_output_from_phi(stmt, argnum, asm_data);
+ break;
+ case GIMPLE_NOP: {
-+ const char *str = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++ enum mark mark;
++ const char *str;
++
++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++ str = convert_mark_to_str(mark);
+
+ asm_data->input = asm_data->output;
+ asm_data->output = NULL;
@@ -116041,19 +116593,24 @@ index 0000000..87dd5e2
+{
+ struct asm_data asm_data;
+ const char *str;
++ enum mark mark;
+
+ if (is_gimple_constant(output_node))
+ return;
+
++ asm_data.output = output_node;
++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ if (mark == MARK_TURN_OFF)
++ return;
++
+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
+
-+ asm_data.output = output_node;
+ asm_data.def_stmt = get_def_stmt(asm_data.output);
+ create_asm_input(stmt, argnum, &asm_data);
+ if (asm_data.input == NULL_TREE)
+ return;
+
-+ str = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ str = convert_mark_to_str(mark);
+ create_asm_stmt(str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+}
+
@@ -116150,16 +116707,22 @@ index 0000000..87dd5e2
+ if (mark != MARK_TURN_OFF)
+ return false;
+
-+ asm_data.input = gimple_call_lhs(stmt);
-+ if (asm_data.input == NULL_TREE) {
++ asm_data.def_stmt = stmt;
++ asm_data.output = gimple_call_lhs(stmt);
++
++ if (asm_data.output == NULL_TREE) {
+ asm_data.input = gimple_call_arg(stmt, 0);
+ if (is_gimple_constant(asm_data.input))
+ return false;
++ asm_data.output = NULL;
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(2, "rm"), NULL, &asm_data);
++ return true;
+ }
+
-+ asm_data.output = NULL;
-+ asm_data.def_stmt = stmt;
-+ create_asm_stmt("# size_overflow MARK_TURN_OFF\n\t", build_string(2, "rm"), NULL, &asm_data);
++ create_asm_input(stmt, 0, &asm_data);
++ gcc_assert(asm_data.input != NULL_TREE);
++
++ create_asm_stmt(TURN_OFF_ASM_STR, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
+ return true;
+}
+
@@ -116209,6 +116772,9 @@ index 0000000..87dd5e2
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt = gsi_stmt(gsi);
+
++ if (is_size_overflow_asm(stmt))
++ continue;
++
+ if (is_gimple_call(stmt))
+ handle_interesting_function(stmt);
+ else if (gimple_code(stmt) == GIMPLE_RETURN)