summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-06-03 09:46:36 -0400
committerAnthony G. Basile <blueness@gentoo.org>2014-06-03 09:46:36 -0400
commit0def4b4b06f849ab8df54d9422444878208721c1 (patch)
tree7d5f9862337b6b95ee46992c0b0c1878238eee76
parentGrsec/PaX: 3.0-{3.2.59,3.14.4}-201405281922 (diff)
downloadhardened-patchset-0def4b4b06f849ab8df54d9422444878208721c1.tar.gz
hardened-patchset-0def4b4b06f849ab8df54d9422444878208721c1.tar.bz2
hardened-patchset-0def4b4b06f849ab8df54d9422444878208721c1.zip
Grsec/PaX: 3.0-{3.2.59,3.14.5}-20140603071620140603
-rw-r--r--3.14.5/0000_README (renamed from 3.14.4/0000_README)2
-rw-r--r--3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch (renamed from 3.14.4/4420_grsecurity-3.0-3.14.4-201405281922.patch)599
-rw-r--r--3.14.5/4425_grsec_remove_EI_PAX.patch (renamed from 3.14.4/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.14.5/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.14.4/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.14.5/4430_grsec-remove-localversion-grsec.patch (renamed from 3.14.4/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.14.5/4435_grsec-mute-warnings.patch (renamed from 3.14.4/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.14.5/4440_grsec-remove-protected-paths.patch (renamed from 3.14.4/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.14.5/4450_grsec-kconfig-default-gids.patch (renamed from 3.14.4/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.14.5/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.14.4/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.14.5/4470_disable-compat_vdso.patch (renamed from 3.14.4/4470_disable-compat_vdso.patch)0
-rw-r--r--3.14.5/4475_emutramp_default_on.patch (renamed from 3.14.4/4475_emutramp_default_on.patch)0
-rw-r--r--3.2.59/0000_README2
-rw-r--r--3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch (renamed from 3.2.59/4420_grsecurity-3.0-3.2.59-201405281920.patch)8966
-rw-r--r--3.2.59/4450_grsec-kconfig-default-gids.patch12
-rw-r--r--3.2.59/4465_selinux-avc_audit-log-curr_ip.patch2
15 files changed, 5187 insertions, 4396 deletions
diff --git a/3.14.4/0000_README b/3.14.5/0000_README
index 275b0d1..287174d 100644
--- a/3.14.4/0000_README
+++ b/3.14.5/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.4-201405281922.patch
+Patch: 4420_grsecurity-3.0-3.14.5-201406021708.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.4/4420_grsecurity-3.0-3.14.4-201405281922.patch b/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch
index c9100d1..400f193 100644
--- a/3.14.4/4420_grsecurity-3.0-3.14.4-201405281922.patch
+++ b/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch
@@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index d7c07fd..1ad8228 100644
+index fa77b0b..dadf5fd 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -7508,18 +7508,6 @@ index 4006964..fcb3cc2 100644
ret = __copy_from_user(to, from, n);
else
copy_from_user_overflow();
-diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
-index 8b06343..090483c 100644
---- a/arch/parisc/include/uapi/asm/resource.h
-+++ b/arch/parisc/include/uapi/asm/resource.h
-@@ -1,7 +1,6 @@
- #ifndef _ASM_PARISC_RESOURCE_H
- #define _ASM_PARISC_RESOURCE_H
-
--#define _STK_LIM_MAX 10 * _STK_LIM
- #include <asm-generic/resource.h>
-
- #endif
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc..b9fc230 100644
--- a/arch/parisc/kernel/module.c
@@ -7624,7 +7612,7 @@ index 50dfafc..b9fc230 100644
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index b7cadc4..bf4a32d 100644
+index 31ffa9b..588a798 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -7648,7 +7636,7 @@ index b7cadc4..bf4a32d 100644
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.high_limit = mmap_upper_limit();
- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
+ info.threadstack_offset = offset;
addr = vm_unmapped_area(&info);
@@ -7675,7 +7663,7 @@ index b7cadc4..bf4a32d 100644
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.high_limit = mm->mmap_base;
- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
+ info.threadstack_offset = offset;
addr = vm_unmapped_area(&info);
@@ -18365,10 +18353,10 @@ index 94e40f1..ebd03e4 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index c8b0519..fd29e73 100644
+index b39e194..9d44fd1 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
-@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
+@@ -99,7 +99,7 @@ static __always_inline void __preempt_count_sub(int val)
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
@@ -19602,7 +19590,7 @@ index 04905bf..49203ca 100644
}
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 0d592e0..f58a222 100644
+index 0d592e0..7437fcc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -7,6 +7,7 @@
@@ -19626,7 +19614,7 @@ index 0d592e0..f58a222 100644
#define segment_eq(a, b) ((a).seg == (b).seg)
-@@ -85,8 +91,34 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
@@ -19636,26 +19624,28 @@ index 0d592e0..f58a222 100644
+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
+#define access_ok(type, addr, size) \
+({ \
-+ long __size = size; \
++ unsigned long __size = size; \
+ unsigned long __addr = (unsigned long)addr; \
-+ unsigned long __addr_ao = __addr & PAGE_MASK; \
-+ unsigned long __end_ao = __addr + __size - 1; \
+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
-+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
-+ while(__addr_ao <= __end_ao) { \
-+ char __c_ao; \
-+ __addr_ao += PAGE_SIZE; \
-+ if (__size > PAGE_SIZE) \
-+ _cond_resched(); \
-+ if (__get_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ if (type != VERIFY_WRITE) { \
++ if (__ret_ao && __size) { \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while (__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ _cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
+ __addr = __addr_ao; \
-+ continue; \
+ } \
-+ if (__put_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ __addr = __addr_ao; \
+ } \
+ } \
+ __ret_ao; \
@@ -19663,7 +19653,7 @@ index 0d592e0..f58a222 100644
/*
* The exception table consists of pairs of addresses relative to the
-@@ -176,10 +208,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -19676,7 +19666,7 @@ index 0d592e0..f58a222 100644
__ret_gu; \
})
-@@ -187,13 +221,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
@@ -19701,7 +19691,7 @@ index 0d592e0..f58a222 100644
"3: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
-@@ -206,8 +248,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile(ASM_STAC "\n" \
@@ -19712,7 +19702,7 @@ index 0d592e0..f58a222 100644
"3: " ASM_CLAC "\n" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
-@@ -257,7 +299,8 @@ extern void __put_user_8(void);
+@@ -257,7 +301,8 @@ extern void __put_user_8(void);
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -19722,7 +19712,7 @@ index 0d592e0..f58a222 100644
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
-@@ -275,6 +318,7 @@ extern void __put_user_8(void);
+@@ -275,6 +320,7 @@ extern void __put_user_8(void);
__put_user_x(X, __pu_val, ptr, __ret_pu); \
break; \
} \
@@ -19730,7 +19720,7 @@ index 0d592e0..f58a222 100644
__ret_pu; \
})
-@@ -355,8 +399,10 @@ do { \
+@@ -355,8 +401,10 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -19742,7 +19732,7 @@ index 0d592e0..f58a222 100644
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -364,8 +410,10 @@ do { \
+@@ -364,8 +412,10 @@ do { \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
@@ -19755,7 +19745,7 @@ index 0d592e0..f58a222 100644
#define __get_user_size_ex(x, ptr, size) \
do { \
-@@ -389,7 +437,7 @@ do { \
+@@ -389,7 +439,7 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -19764,7 +19754,7 @@ index 0d592e0..f58a222 100644
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
-@@ -406,13 +454,24 @@ do { \
+@@ -406,13 +456,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -19791,7 +19781,7 @@ index 0d592e0..f58a222 100644
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -420,8 +479,10 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -19803,7 +19793,7 @@ index 0d592e0..f58a222 100644
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -429,10 +490,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
@@ -19818,7 +19808,7 @@ index 0d592e0..f58a222 100644
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
-@@ -442,11 +505,13 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
*/
#define uaccess_try do { \
current_thread_info()->uaccess_err = 0; \
@@ -19832,7 +19822,7 @@ index 0d592e0..f58a222 100644
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
} while (0)
-@@ -471,8 +536,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
* On error, the variable @x is set to zero.
*/
@@ -19845,7 +19835,7 @@ index 0d592e0..f58a222 100644
/**
* __put_user: - Write a simple value into user space, with less checking.
-@@ -494,8 +563,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
* Returns zero on success, or -EFAULT on error.
*/
@@ -19858,7 +19848,7 @@ index 0d592e0..f58a222 100644
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
-@@ -513,7 +586,7 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -19867,7 +19857,7 @@ index 0d592e0..f58a222 100644
} while (0)
#define put_user_try uaccess_try
-@@ -542,18 +615,19 @@ extern void __cmpxchg_wrong_size(void)
+@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
@@ -19889,7 +19879,7 @@ index 0d592e0..f58a222 100644
: "i" (-EFAULT), "q" (__new), "1" (__old) \
: "memory" \
); \
-@@ -562,14 +636,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
case 2: \
{ \
asm volatile("\t" ASM_STAC "\n" \
@@ -19906,7 +19896,7 @@ index 0d592e0..f58a222 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -578,14 +652,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
case 4: \
{ \
asm volatile("\t" ASM_STAC "\n" \
@@ -19923,7 +19913,7 @@ index 0d592e0..f58a222 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -597,14 +671,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
__cmpxchg_wrong_size(); \
\
asm volatile("\t" ASM_STAC "\n" \
@@ -19940,7 +19930,7 @@ index 0d592e0..f58a222 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -613,6 +687,7 @@ extern void __cmpxchg_wrong_size(void)
+@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
default: \
__cmpxchg_wrong_size(); \
} \
@@ -19948,7 +19938,7 @@ index 0d592e0..f58a222 100644
*__uval = __old; \
__ret; \
})
-@@ -636,17 +711,6 @@ extern struct movsl_mask {
+@@ -636,17 +713,6 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
@@ -19966,7 +19956,7 @@ index 0d592e0..f58a222 100644
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
# define copy_user_diag __compiletime_error
#else
-@@ -656,7 +720,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
extern void copy_user_diag("copy_from_user() buffer size is too small")
copy_from_user_overflow(void);
extern void copy_user_diag("copy_to_user() buffer size is too small")
@@ -19975,7 +19965,7 @@ index 0d592e0..f58a222 100644
#undef copy_user_diag
-@@ -669,7 +733,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
+@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
extern void
__compiletime_warning("copy_to_user() buffer size is not provably correct")
@@ -19984,7 +19974,7 @@ index 0d592e0..f58a222 100644
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
#else
-@@ -684,10 +748,16 @@ __copy_from_user_overflow(int size, unsigned long count)
+@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
#endif
@@ -20002,7 +19992,7 @@ index 0d592e0..f58a222 100644
might_fault();
-@@ -709,12 +779,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
* case, and do only runtime checking for non-constant sizes.
*/
@@ -20024,7 +20014,7 @@ index 0d592e0..f58a222 100644
return n;
}
-@@ -722,17 +795,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -28784,10 +28774,10 @@ index 3927528..fc19971 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 2b85784..ad70e19 100644
+index ee0c3b5..773bb94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1777,8 +1777,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1776,8 +1776,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -28798,7 +28788,7 @@ index 2b85784..ad70e19 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2689,6 +2689,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2688,6 +2688,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -28807,7 +28797,7 @@ index 2b85784..ad70e19 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5503,7 +5505,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -5502,7 +5504,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -34132,7 +34122,7 @@ index 0149575..f746de8 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index 4ed75dd..3cf24f0b 100644
+index af2d431..3cf24f0b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
@@ -34298,7 +34288,7 @@ index 4ed75dd..3cf24f0b 100644
+ pax_close_kernel();
- header->pages = sz / PAGE_SIZE;
-- hole = sz - (proglen + sizeof(*header));
+- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
/* insert a random number of int3 instructions before BPF code */
@@ -40603,10 +40593,10 @@ index 15a74f9..4278889 100644
return can_switch;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index df77e20..d3fda9f 100644
+index 697f215..6f89b7f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -1361,7 +1361,7 @@ typedef struct drm_i915_private {
+@@ -1362,7 +1362,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -40788,10 +40778,10 @@ index d554169..f4426bb 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 9b8a7c7..60f6003 100644
+index 963639d..ea0c0cb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -10776,13 +10776,13 @@ struct intel_quirk {
+@@ -10787,13 +10787,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -40807,7 +40797,7 @@ index 9b8a7c7..60f6003 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -10790,18 +10790,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -10801,18 +10801,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -41191,7 +41181,7 @@ index 28f84b4..fb3e224 100644
ret = drm_irq_install(qdev->ddev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
-index c7e7e65..7dddd4d 100644
+index c82c1d6a9..6158c02 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
@@ -41214,7 +41204,7 @@ index c7e7e65..7dddd4d 100644
}
vma->vm_ops = &qxl_ttm_vm_ops;
return 0;
-@@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
+@@ -561,25 +563,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
@@ -41881,10 +41871,10 @@ index ec0ae2d..dc0780b 100644
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index cc32a6f..02a4b1c 100644
+index 8a5384c..cf63c18 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
-@@ -2421,7 +2421,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+@@ -2422,7 +2422,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -41893,7 +41883,7 @@ index cc32a6f..02a4b1c 100644
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2455,7 +2455,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2456,7 +2456,7 @@ int hid_add_device(struct hid_device *hdev)
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -44611,10 +44601,10 @@ index 3e6d115..ffecdeb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 4a6ca1c..e952750 100644
+index 56e24c0..e1c8e1f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
-@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (r1_sync_page_io(rdev, sect, s,
bio->bi_io_vec[idx].bv_page,
READ) != 0)
@@ -44623,7 +44613,7 @@ index 4a6ca1c..e952750 100644
}
sectors -= s;
sect += s;
-@@ -2156,7 +2156,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
test_bit(In_sync, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
@@ -46214,10 +46204,10 @@ index cf49c22..971b133 100644
struct sm_sysfs_attribute *vendor_attribute;
char *vendor;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index e5628fc..ffe54d1 100644
+index 91ec8cd..562ff5f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
-@@ -4551,6 +4551,7 @@ static void __exit bonding_exit(void)
+@@ -4552,6 +4552,7 @@ static void __exit bonding_exit(void)
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
@@ -46656,10 +46646,10 @@ index bf0d55e..82bcfbd1 100644
priv = netdev_priv(dev);
priv->phy = phy;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index 1831fb7..9c24bca 100644
+index 20bb669..9a0e17e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
-@@ -984,13 +984,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+@@ -991,13 +991,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
@@ -46682,7 +46672,7 @@ index 1831fb7..9c24bca 100644
return rtnl_link_register(ops);
};
-@@ -1045,7 +1047,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+@@ -1052,7 +1054,7 @@ static int macvlan_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -46692,10 +46682,10 @@ index 1831fb7..9c24bca 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index ff111a8..c4c3ac4 100644
+index 3381c4f..dea5fd5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
-@@ -1011,7 +1011,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
ret = 0;
@@ -46704,7 +46694,7 @@ index ff111a8..c4c3ac4 100644
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
macvtap_put_vlan(vlan);
-@@ -1181,7 +1181,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -46796,19 +46786,6 @@ index 26f8635..c237839 100644
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
-diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
-index d350d27..75d7d9d 100644
---- a/drivers/net/usb/cdc_ncm.c
-+++ b/drivers/net/usb/cdc_ncm.c
-@@ -768,7 +768,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
- skb_out->len > CDC_NCM_MIN_TX_PKT)
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
- ctx->tx_max - skb_out->len);
-- else if ((skb_out->len % dev->maxpacket) == 0)
-+ else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
- *skb_put(skb_out, 1) = 0; /* force short packet */
-
- /* set final frame length */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 660bd5e..ac59452 100644
--- a/drivers/net/usb/hso.c
@@ -50026,10 +50003,10 @@ index 62ec84b..93159d8 100644
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index 9117d0b..d289a7a 100644
+index 665acbf..d18fab4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
-@@ -739,7 +739,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
+@@ -734,7 +734,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
@@ -57622,7 +57599,7 @@ index 1e86823..8e34695 100644
else if (whole->bd_holder != NULL)
return false; /* is a partition of a held device */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index cbd3a7d..c6a2881 100644
+index cbd3a7d6f..c6a2881 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1216,9 +1216,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -57995,10 +57972,10 @@ index f3ac415..3d2420c 100644
server->ops->print_stats(m, tcon);
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
-index 849f613..eae6dec 100644
+index 7c6b73c..a8f0db2 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
-@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
+@@ -1068,7 +1068,7 @@ cifs_init_request_bufs(void)
*/
cifs_req_cachep = kmem_cache_create("cifs_request",
CIFSMaxBufSize + max_hdr_size, 0,
@@ -58007,7 +57984,7 @@ index 849f613..eae6dec 100644
if (cifs_req_cachep == NULL)
return -ENOMEM;
-@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
+@@ -1095,7 +1095,7 @@ cifs_init_request_bufs(void)
efficient to alloc 1 per page off the slab compared to 17K (5page)
alloc of large cifs buffers even when page debugging is on */
cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
@@ -58016,7 +57993,7 @@ index 849f613..eae6dec 100644
NULL);
if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp);
-@@ -1168,8 +1168,8 @@ init_cifs(void)
+@@ -1180,8 +1180,8 @@ init_cifs(void)
atomic_set(&bufAllocCount, 0);
atomic_set(&smBufAllocCount, 0);
#ifdef CONFIG_CIFS_STATS2
@@ -58028,10 +58005,10 @@ index 849f613..eae6dec 100644
atomic_set(&midCount, 0);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
-index c0f3718..6afed7d 100644
+index 30f6e92..e915ba5 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
-@@ -804,35 +804,35 @@ struct cifs_tcon {
+@@ -806,35 +806,35 @@ struct cifs_tcon {
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
#ifdef CONFIG_CIFS_STATS
@@ -58091,7 +58068,7 @@ index c0f3718..6afed7d 100644
} smb2_stats;
#endif /* CONFIG_CIFS_SMB2 */
} stats;
-@@ -1162,7 +1162,7 @@ convert_delimiter(char *path, char delim)
+@@ -1170,7 +1170,7 @@ convert_delimiter(char *path, char delim)
}
#ifdef CONFIG_CIFS_STATS
@@ -58100,7 +58077,7 @@ index c0f3718..6afed7d 100644
static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
unsigned int bytes)
-@@ -1528,8 +1528,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
+@@ -1536,8 +1536,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
/* Various Debug counters */
GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
#ifdef CONFIG_CIFS_STATS2
@@ -58112,7 +58089,7 @@ index c0f3718..6afed7d 100644
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
-index 834fce7..8a314b5 100644
+index 87c4dd0..a90f115 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
@@ -58134,7 +58111,7 @@ index 834fce7..8a314b5 100644
}
retry:
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
-index 2f9f379..43f8025 100644
+index 3b0c62e..f7d090c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -170,7 +170,7 @@ cifs_buf_get(void)
@@ -58156,10 +58133,10 @@ index 2f9f379..43f8025 100644
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
-index 526fb89..ecdbf5a 100644
+index d1fdfa8..94558f8 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
-@@ -616,27 +616,27 @@ static void
+@@ -626,27 +626,27 @@ static void
cifs_clear_stats(struct cifs_tcon *tcon)
{
#ifdef CONFIG_CIFS_STATS
@@ -58208,7 +58185,7 @@ index 526fb89..ecdbf5a 100644
#endif
}
-@@ -645,36 +645,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
{
#ifdef CONFIG_CIFS_STATS
seq_printf(m, " Oplocks breaks: %d",
@@ -58265,7 +58242,7 @@ index 526fb89..ecdbf5a 100644
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
-index 192f51a..539307e 100644
+index 35ddc3e..563e809 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
@@ -58838,7 +58815,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index 3d78fcc..122929d 100644
+index 3d78fcc..75b208f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,8 +55,20 @@
@@ -59329,7 +59306,7 @@ index 3d78fcc..122929d 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1626,3 +1801,298 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1626,3 +1801,311 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return compat_do_execve(getname(filename), argv, envp);
}
#endif
@@ -59577,12 +59554,25 @@ index 3d78fcc..122929d 100644
+}
+#endif
+
-+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
++void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
+{
-+
+#ifdef CONFIG_PAX_USERCOPY
+ const char *type;
++#endif
++
++#ifndef CONFIG_STACK_GROWSUP
++ const void * stackstart = task_stack_page(current);
++ if (unlikely(current_stack_pointer < stackstart + 512 ||
++ current_stack_pointer >= stackstart + THREAD_SIZE))
++ BUG();
++#endif
+
++#ifndef CONFIG_PAX_USERCOPY_DEBUG
++ if (const_size)
++ return;
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
+ if (!n)
+ return;
+
@@ -63008,7 +62998,7 @@ index 49d84f8..4807e0b 100644
/* Copy the blockcheck stats from the superblock probe */
osb->osb_ecc_stats = *stats;
diff --git a/fs/open.c b/fs/open.c
-index b9ed8b2..0d5c7a0 100644
+index 2ed7325..4e77ac3 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -32,6 +32,8 @@
@@ -63112,7 +63102,7 @@ index b9ed8b2..0d5c7a0 100644
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
if (!uid_valid(uid))
-@@ -994,6 +1031,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+@@ -982,6 +1019,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
} else {
fsnotify_open(f);
fd_install(fd, f);
@@ -77540,10 +77530,10 @@ index 810431d..0ec4804f 100644
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index 34c7bdc..38d4f3b 100644
+index 38a7437..47f62a4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
-@@ -787,6 +787,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+@@ -802,6 +802,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -78288,7 +78278,7 @@ index d08e4d2..95fad61 100644
/**
diff --git a/include/linux/cred.h b/include/linux/cred.h
-index 04421e8..117e17a 100644
+index 04421e8..a85afd4 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -35,7 +35,7 @@ struct group_info {
@@ -78319,6 +78309,14 @@ index 04421e8..117e17a 100644
#endif
/**
+@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
+
+ #define task_uid(task) (task_cred_xxx((task), uid))
+ #define task_euid(task) (task_cred_xxx((task), euid))
++#define task_securebits(task) (task_cred_xxx((task), securebits))
+
+ #define current_cred_xxx(xxx) \
+ ({ \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b92eadf..b4ecdc1 100644
--- a/include/linux/crypto.h
@@ -81345,7 +81343,7 @@ index 492de72..1bddcd4 100644
return nd->saved_names[nd->depth];
}
diff --git a/include/linux/net.h b/include/linux/net.h
-index 94734a6..d8d6931 100644
+index 17d8339..81656c0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -192,7 +192,7 @@ struct net_proto_family {
@@ -81358,18 +81356,18 @@ index 94734a6..d8d6931 100644
struct iovec;
struct kvec;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index daafd95..74c5d1e 100644
+index 911718f..f673407 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1146,6 +1146,7 @@ struct net_device_ops {
- struct net_device *dev,
+@@ -1147,6 +1147,7 @@ struct net_device_ops {
void *priv);
+ int (*ndo_get_lock_subclass)(struct net_device *dev);
};
+typedef struct net_device_ops __no_const net_device_ops_no_const;
/*
* The DEVICE structure.
-@@ -1228,7 +1229,7 @@ struct net_device {
+@@ -1229,7 +1230,7 @@ struct net_device {
int iflink;
struct net_device_stats stats;
@@ -81712,10 +81710,10 @@ index 4ea1d37..80f4b33 100644
/*
* The return value from decompress routine is the length of the
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index de83b4e..c4b997d 100644
+index 1841b58..fbeebf8 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
+@@ -29,11 +29,16 @@ extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
#endif
@@ -81732,7 +81730,7 @@ index de83b4e..c4b997d 100644
#ifdef CONFIG_PREEMPT_COUNT
-@@ -41,6 +46,12 @@ do { \
+@@ -43,6 +48,12 @@ do { \
barrier(); \
} while (0)
@@ -81745,7 +81743,7 @@ index de83b4e..c4b997d 100644
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
-@@ -49,6 +60,12 @@ do { \
+@@ -51,6 +62,12 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -81758,7 +81756,7 @@ index de83b4e..c4b997d 100644
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
-@@ -113,8 +130,10 @@ do { \
+@@ -115,8 +132,10 @@ do { \
* region.
*/
#define preempt_disable() barrier()
@@ -81769,7 +81767,7 @@ index de83b4e..c4b997d 100644
#define preempt_enable() barrier()
#define preempt_check_resched() do { } while (0)
-@@ -128,11 +147,13 @@ do { \
+@@ -130,11 +149,13 @@ do { \
/*
* Modules have no business playing preemption tricks.
*/
@@ -82108,7 +82106,7 @@ index b66c211..13d2915 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index a781dec..2c03225 100644
+index ccd0c6f..39c28a4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -129,6 +129,7 @@ struct fs_struct;
@@ -82344,7 +82342,7 @@ index a781dec..2c03225 100644
{
return tsk->pid;
}
-@@ -1988,6 +2099,25 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2006,6 +2117,25 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
@@ -82370,7 +82368,7 @@ index a781dec..2c03225 100644
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
-@@ -2112,7 +2242,9 @@ void yield(void);
+@@ -2130,7 +2260,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -82380,7 +82378,7 @@ index a781dec..2c03225 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2145,6 +2277,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2163,6 +2295,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -82388,7 +82386,7 @@ index a781dec..2c03225 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2307,7 +2440,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2325,7 +2458,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -82397,7 +82395,7 @@ index a781dec..2c03225 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2508,9 +2641,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2526,9 +2659,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -82731,7 +82729,7 @@ index 6ae004e..2743532 100644
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
-index 54f91d3..be2c379 100644
+index 302ab80..3233276 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -11,7 +11,7 @@ struct sock;
@@ -83017,30 +83015,18 @@ index 387fa7d..3fcde6b 100644
#ifdef CONFIG_MAGIC_SYSRQ
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index fddbe20..e4cce53 100644
+index a629e4b..3fea3d9 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -161,6 +161,25 @@ static inline bool test_and_clear_restore_sigmask(void)
+@@ -159,6 +159,13 @@ static inline bool test_and_clear_restore_sigmask(void)
#error "no set_restore_sigmask() provided and default one won't work"
#endif
-+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
-+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY)
-+extern void pax_check_alloca(unsigned long size);
-+#endif
++extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
+
+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY)
-+ /* always check if we've overflowed the stack in a copy*user */
-+ pax_check_alloca(sizeof(unsigned long));
-+#endif
-+
-+#ifndef CONFIG_PAX_USERCOPY_DEBUG
-+ if (!__builtin_constant_p(n))
-+#endif
-+ __check_object_size(ptr, n, to_user);
++ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
+}
+
#endif /* __KERNEL__ */
@@ -83977,21 +83963,6 @@ index 8ba8ce2..99b7fff 100644
struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list);
-diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
-index 956b175..55d1504 100644
---- a/include/net/netfilter/nf_conntrack_extend.h
-+++ b/include/net/netfilter/nf_conntrack_extend.h
-@@ -47,8 +47,8 @@ enum nf_ct_ext_id {
- /* Extensions: optional stuff which isn't permanently in struct. */
- struct nf_ct_ext {
- struct rcu_head rcu;
-- u8 offset[NF_CT_EXT_NUM];
-- u8 len;
-+ u16 offset[NF_CT_EXT_NUM];
-+ u16 len;
- char data[0];
- };
-
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 2b47eaa..6d5bcc2 100644
--- a/include/net/netlink.h
@@ -84153,7 +84124,7 @@ index 7f4eeb3..37e8fe1 100644
/* Get the size of a DATA chunk payload. */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
-index 6ee76c8..45f2609 100644
+index 0dfcc92..7967849 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -507,7 +507,7 @@ struct sctp_pf {
@@ -84357,7 +84328,7 @@ index 52beadf..598734c 100644
u8 qfull;
enum fc_lport_state state;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
-index d65fbec..f80fef2 100644
+index b4f1eff..7fdbd46 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -180,9 +180,9 @@ struct scsi_device {
@@ -84817,7 +84788,7 @@ index 30f5362..8ed8ac9 100644
void *pmi_pal;
u8 *vbe_state_orig; /*
diff --git a/init/Kconfig b/init/Kconfig
-index d56cb03..7e6d5dc 100644
+index 93c5ef0..ac92caa 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1079,6 +1079,7 @@ endif # CGROUPS
@@ -85657,7 +85628,7 @@ index 8d6e145..33e0b1e 100644
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
diff --git a/kernel/audit.c b/kernel/audit.c
-index 95a20f3..e1cb300 100644
+index d5f31c1..06646e1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
@@ -85697,7 +85668,7 @@ index 95a20f3..e1cb300 100644
s.version = AUDIT_VERSION_LATEST;
s.backlog_wait_time = audit_backlog_wait_time;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
-index 7aef2f4..db6ced2 100644
+index 3b29605..f6c85d0 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
@@ -86011,7 +85982,7 @@ index c18b1f1..b9a0132 100644
return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
-index e0573a4..3874e41 100644
+index e0573a4..20fb164 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
@@ -86049,7 +86020,7 @@ index e0573a4..3874e41 100644
/* dumpability changes */
if (!uid_eq(old->euid, new->euid) ||
!gid_eq(old->egid, new->egid) ||
-@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
+@@ -479,6 +491,108 @@ int commit_creds(struct cred *new)
put_cred(old);
return 0;
}
@@ -86118,6 +86089,7 @@ index e0573a4..3874e41 100644
+ int ret;
+ int schedule_it = 0;
+ struct task_struct *t;
++ unsigned oldsecurebits = current_cred()->securebits;
+
+ /* we won't get called with tasklist_lock held for writing
+ and interrupts disabled as the cred struct in that case is
@@ -86134,7 +86106,11 @@ index e0573a4..3874e41 100644
+ read_lock(&tasklist_lock);
+ for (t = next_thread(current); t != current;
+ t = next_thread(t)) {
-+ if (t->delayed_cred == NULL) {
++ /* we'll check if the thread has uid 0 in
++ * the delayed worker routine
++ */
++ if (task_securebits(t) == oldsecurebits &&
++ t->delayed_cred == NULL) {
+ t->delayed_cred = get_cred(new);
+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
+ set_tsk_need_resched(t);
@@ -86143,6 +86119,7 @@ index e0573a4..3874e41 100644
+ read_unlock(&tasklist_lock);
+ rcu_read_unlock();
+ }
++
+ return ret;
+#else
+ return __commit_creds(new);
@@ -90966,7 +90943,7 @@ index accfd24..e00f0c0 100644
struct timer_list timer;
unsigned long expire;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
-index b418cb0..f879a3d 100644
+index 4f3a3c03..04b7886 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
@@ -92488,10 +92465,10 @@ index b32b70c..e512eb0 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 2de3c84..4ecaf1b 100644
+index 06a9bc0..cfbba83 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -2069,15 +2069,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2070,15 +2070,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
@@ -92512,7 +92489,7 @@ index 2de3c84..4ecaf1b 100644
if (ret)
goto out;
-@@ -2122,15 +2124,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2123,15 +2125,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
@@ -92533,7 +92510,7 @@ index 2de3c84..4ecaf1b 100644
if (ret)
goto out;
-@@ -2599,6 +2603,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2600,6 +2604,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -92561,7 +92538,7 @@ index 2de3c84..4ecaf1b 100644
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
-@@ -2715,6 +2740,11 @@ retry_avoidcopy:
+@@ -2716,6 +2741,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -92573,7 +92550,7 @@ index 2de3c84..4ecaf1b 100644
/* Make the old page be freed below */
new_page = old_page;
}
-@@ -2879,6 +2909,10 @@ retry:
+@@ -2880,6 +2910,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -92584,7 +92561,7 @@ index 2de3c84..4ecaf1b 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
-@@ -2909,6 +2943,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2910,6 +2944,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -92595,7 +92572,7 @@ index 2de3c84..4ecaf1b 100644
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address);
-@@ -2922,6 +2960,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2923,6 +2961,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(hstate_index(h));
}
@@ -97229,10 +97206,10 @@ index def5dd2..4ce55cec 100644
return 0;
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
-index 175273f..1c63e05 100644
+index 44ebd5c..1f732bae 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
-@@ -474,7 +474,7 @@ out:
+@@ -475,7 +475,7 @@ out:
return NOTIFY_DONE;
}
@@ -97241,7 +97218,7 @@ index 175273f..1c63e05 100644
.notifier_call = vlan_device_event,
};
-@@ -549,8 +549,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
+@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
@@ -97509,7 +97486,7 @@ index 919a5ce..cc6b444 100644
table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
if (!table)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
-index 8323bce..a03130d 100644
+index d074d06..ad3cfcf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
@@ -97533,7 +97510,7 @@ index 8323bce..a03130d 100644
batadv_iv_ogm_slide_own_bcast_window(hard_iface);
-@@ -1594,7 +1594,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
+@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
return;
/* could be changed by schedule_own_packet() */
@@ -97543,10 +97520,10 @@ index 8323bce..a03130d 100644
if (ogm_packet->flags & BATADV_DIRECTLINK)
has_directlink_flag = true;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
-index 88df9b1..69cf7c0 100644
+index cc1cfd6..7a68e022 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
-@@ -445,7 +445,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
+@@ -446,7 +446,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
frag_header.packet_type = BATADV_UNICAST_FRAG;
frag_header.version = BATADV_COMPAT_VERSION;
frag_header.ttl = BATADV_TTL;
@@ -97743,10 +97720,10 @@ index f9c0980a..fcbbfeb 100644
tty_port_close(&dev->port, tty, filp);
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
-index 0e474b1..fb7109c 100644
+index 1059ed3..d70846a 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
-@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
tmp.valid_hooks = t->table->valid_hooks;
}
mutex_unlock(&ebt_mutex);
@@ -97755,7 +97732,7 @@ index 0e474b1..fb7109c 100644
BUGPRINT("c2u Didn't work\n");
ret = -EFAULT;
break;
-@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
goto out;
tmp.valid_hooks = t->valid_hooks;
@@ -97764,7 +97741,7 @@ index 0e474b1..fb7109c 100644
ret = -EFAULT;
break;
}
-@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
tmp.entries_size = t->table->entries_size;
tmp.valid_hooks = t->table->valid_hooks;
@@ -98062,7 +98039,7 @@ index a16ed7b..eb44d17 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 45fa2f1..f3e28ec 100644
+index fccc195..c8486ab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1688,14 +1688,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
@@ -98118,7 +98095,7 @@ index 45fa2f1..f3e28ec 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -4331,7 +4331,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -4333,7 +4333,7 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
@@ -98127,7 +98104,7 @@ index 45fa2f1..f3e28ec 100644
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -6250,7 +6250,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -6302,7 +6302,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -98155,7 +98132,7 @@ index cf999e0..c59a975 100644
}
EXPORT_SYMBOL(dev_load);
diff --git a/net/core/filter.c b/net/core/filter.c
-index ad30d62..21c0743 100644
+index ebce437..9fed9d0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -126,7 +126,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
@@ -98193,35 +98170,7 @@ index ad30d62..21c0743 100644
continue;
case BPF_S_ANC_PROTOCOL:
A = ntohs(skb->protocol);
-@@ -355,6 +355,10 @@ load_b:
-
- if (skb_is_nonlinear(skb))
- return 0;
-+
-+ if (skb->len < sizeof(struct nlattr))
-+ return 0;
-+
- if (A > skb->len - sizeof(struct nlattr))
- return 0;
-
-@@ -371,11 +375,15 @@ load_b:
-
- if (skb_is_nonlinear(skb))
- return 0;
-+
-+ if (skb->len < sizeof(struct nlattr))
-+ return 0;
-+
- if (A > skb->len - sizeof(struct nlattr))
- return 0;
-
- nla = (struct nlattr *)&skb->data[A];
-- if (nla->nla_len > A - skb->len)
-+ if (nla->nla_len > skb->len - A)
- return 0;
-
- nla = nla_find_nested(nla, X);
-@@ -391,9 +399,10 @@ load_b:
+@@ -395,9 +395,10 @@ load_b:
continue;
#endif
default:
@@ -98233,7 +98182,7 @@ index ad30d62..21c0743 100644
return 0;
}
}
-@@ -416,7 +425,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
+@@ -420,7 +421,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
int pc, ret = 0;
@@ -98242,7 +98191,7 @@ index ad30d62..21c0743 100644
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
-@@ -679,7 +688,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
+@@ -683,7 +684,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
if (!fp)
return -ENOMEM;
@@ -98314,7 +98263,7 @@ index b618694..192bbba 100644
m->msg_iov = iov;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index e161290..8149aea 100644
+index 7d95f69..a6065de 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2824,7 +2824,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
@@ -98404,7 +98353,7 @@ index 2bf8329..2eb1423 100644
return 0;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
-index 81d3a9a..a0bd7a8 100644
+index 7c8ffd9..0cb3687 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
@@ -98479,7 +98428,7 @@ index fdac61c..e5e5b46 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 120eecc..cd1ec44 100644
+index 83b9d6a..cff1ce7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -98558,7 +98507,7 @@ index b442e7e..6f5b5a2 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 90b96a1..cd18f16d 100644
+index e5ae776e..15c90cb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2003,7 +2003,7 @@ EXPORT_SYMBOL(__skb_checksum);
@@ -98704,7 +98653,7 @@ index c0fc6bd..51d8326 100644
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
-index a0e9cf6..ef7f9ed 100644
+index 6a7fae2..d7c22e6 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -9,26 +9,33 @@
@@ -99024,7 +98973,7 @@ index c7539e2..b455e51 100644
break;
case NETDEV_DOWN:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index b53f0bf..3585b33 100644
+index 9d43468..ffa28cc 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
@@ -99146,7 +99095,7 @@ index c10a3ce..dd71f84 100644
return -ENOMEM;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
-index ec4f762..4ce3645 100644
+index 94213c8..8bdb342 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
@@ -99200,7 +99149,7 @@ index 580dd96..9fcef7e 100644
msg.msg_flags = flags;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
-index 48eafae..defff53 100644
+index e4a8f76..dd8ad72 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,7 +44,7 @@
@@ -99275,7 +99224,7 @@ index 812b183..56cbe9c 100644
.maxtype = IFLA_IPTUN_MAX,
.policy = ipip_policy,
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
-index 59da7cd..e318de1 100644
+index f95b6f9..2ee2097 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
@@ -99305,7 +99254,7 @@ index 59da7cd..e318de1 100644
ret = -EFAULT;
else
ret = 0;
-@@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
+@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
switch (cmd) {
case ARPT_SO_GET_INFO:
@@ -99314,7 +99263,7 @@ index 59da7cd..e318de1 100644
break;
case ARPT_SO_GET_ENTRIES:
ret = compat_get_entries(sock_net(sk), user, len);
-@@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
+@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
switch (cmd) {
case ARPT_SO_GET_INFO:
@@ -99324,7 +99273,7 @@ index 59da7cd..e318de1 100644
case ARPT_SO_GET_ENTRIES:
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
-index 718dfbd..cef4152 100644
+index 99e810f..3711b81 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
@@ -99354,7 +99303,7 @@ index 718dfbd..cef4152 100644
ret = -EFAULT;
else
ret = 0;
-@@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
switch (cmd) {
case IPT_SO_GET_INFO:
@@ -99363,7 +99312,7 @@ index 718dfbd..cef4152 100644
break;
case IPT_SO_GET_ENTRIES:
ret = compat_get_entries(sock_net(sk), user, len);
-@@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
switch (cmd) {
case IPT_SO_GET_INFO:
@@ -99386,7 +99335,7 @@ index 2510c02..cfb34fa 100644
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index 2d11c09..3f153f8 100644
+index e21934b..16f52a6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -59,7 +59,7 @@ struct ping_table {
@@ -99398,39 +99347,16 @@ index 2d11c09..3f153f8 100644
EXPORT_SYMBOL_GPL(pingv6_ops);
static u16 ping_port_rover;
-@@ -255,23 +255,28 @@ int ping_init_sock(struct sock *sk)
- struct group_info *group_info = get_current_groups();
- int i, j, count = group_info->ngroups;
- kgid_t low, high;
-+ int ret = 0;
+@@ -259,7 +259,7 @@ int ping_init_sock(struct sock *sk)
inet_get_ping_group_range_net(net, &low, &high);
if (gid_lte(low, group) && gid_lte(group, high))
- return 0;
+ goto out_release_group;
- for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
- for (j = 0; j < cp_count; j++) {
- kgid_t gid = group_info->blocks[i][j];
- if (gid_lte(low, gid) && gid_lte(gid, high))
-- return 0;
-+ goto out_release_group;
- }
-
- count -= cp_count;
- }
-
-- return -EACCES;
-+ ret = -EACCES;
-+
-+out_release_group:
-+ put_group_info(group_info);
-+ return ret;
- }
- EXPORT_SYMBOL_GPL(ping_init_sock);
-
-@@ -341,7 +346,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ group_info = get_current_groups();
+ count = group_info->ngroups;
+@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
return -ENODEV;
}
}
@@ -99439,7 +99365,7 @@ index 2d11c09..3f153f8 100644
scoped);
rcu_read_unlock();
-@@ -549,7 +554,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
+@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
}
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -99448,7 +99374,7 @@ index 2d11c09..3f153f8 100644
#endif
}
-@@ -567,7 +572,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
+@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
info, (u8 *)icmph);
#if IS_ENABLED(CONFIG_IPV6)
} else if (family == AF_INET6) {
@@ -99457,7 +99383,7 @@ index 2d11c09..3f153f8 100644
info, (u8 *)icmph);
#endif
}
-@@ -837,6 +842,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -844,6 +844,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{
struct inet_sock *isk = inet_sk(sk);
int family = sk->sk_family;
@@ -99466,7 +99392,7 @@ index 2d11c09..3f153f8 100644
struct sk_buff *skb;
int copied, err;
-@@ -846,12 +853,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -853,12 +855,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (flags & MSG_OOB)
goto out;
@@ -99487,7 +99413,7 @@ index 2d11c09..3f153f8 100644
addr_len);
#endif
}
-@@ -883,7 +897,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -890,7 +899,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sin->sin_port = 0 /* skb->h.uh->source */;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
@@ -99495,7 +99421,7 @@ index 2d11c09..3f153f8 100644
}
if (isk->cmsg_flags)
-@@ -905,14 +918,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+@@ -912,14 +920,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
@@ -99512,7 +99438,7 @@ index 2d11c09..3f153f8 100644
else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
-@@ -1104,7 +1116,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1111,7 +1118,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -99595,7 +99521,7 @@ index c04518f..824ebe5 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 4c011ec..8fae66b 100644
+index 1344373..02f339e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -233,7 +233,7 @@ static const struct seq_operations rt_cache_seq_ops = {
@@ -100263,7 +100189,7 @@ index 7b32652..0bc348b 100644
table = kmemdup(ipv6_icmp_table_template,
sizeof(ipv6_icmp_table_template),
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
-index f3ffb43..1172ba7 100644
+index 2465d18..bc5bf7f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,7 +71,7 @@ struct ip6gre_net {
@@ -100284,7 +100210,7 @@ index f3ffb43..1172ba7 100644
.handler = ip6gre_rcv,
.err_handler = ip6gre_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-@@ -1634,7 +1634,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+@@ -1643,7 +1643,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_FLAGS] = { .type = NLA_U32 },
};
@@ -100293,7 +100219,7 @@ index f3ffb43..1172ba7 100644
.kind = "ip6gre",
.maxtype = IFLA_GRE_MAX,
.policy = ip6gre_policy,
-@@ -1647,7 +1647,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+@@ -1657,7 +1657,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
.fill_info = ip6gre_fill_info,
};
@@ -100303,7 +100229,7 @@ index f3ffb43..1172ba7 100644
.maxtype = IFLA_GRE_MAX,
.policy = ip6gre_policy,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
-index 5db8d31..4a72c26 100644
+index 0e51f68..1f501e1 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -85,7 +85,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -100360,7 +100286,7 @@ index 0a00f44..bec42b2 100644
msg.msg_flags = flags;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
-index 710238f..0fd1816 100644
+index e080fbb..412b3cf 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
@@ -100390,7 +100316,7 @@ index 710238f..0fd1816 100644
ret = -EFAULT;
else
ret = 0;
-@@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
switch (cmd) {
case IP6T_SO_GET_INFO:
@@ -100399,7 +100325,7 @@ index 710238f..0fd1816 100644
break;
case IP6T_SO_GET_ENTRIES:
ret = compat_get_entries(sock_net(sk), user, len);
-@@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
switch (cmd) {
case IP6T_SO_GET_INFO:
@@ -100692,10 +100618,10 @@ index cc85a9b..526a133 100644
return -ENOMEM;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index fba54a4..73e374e 100644
+index 7cc1102..7785931 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
-@@ -2972,7 +2972,7 @@ struct ctl_table ipv6_route_table_template[] = {
+@@ -2973,7 +2973,7 @@ struct ctl_table ipv6_route_table_template[] = {
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
{
@@ -101742,7 +101668,7 @@ index f042ae5..30ea486 100644
}
EXPORT_SYMBOL(nf_unregister_sockopt);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index adce01e..8d52d50 100644
+index c68e5e0..8d52d50 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -152,8 +152,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
@@ -101756,16 +101682,6 @@ index adce01e..8d52d50 100644
nfnl_lock(NFNL_SUBSYS_NFTABLES);
type = __nf_tables_chain_type_lookup(afi->family, nla);
if (type != NULL)
-@@ -1934,7 +1934,8 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
-
- static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
- [NFTA_SET_TABLE] = { .type = NLA_STRING },
-- [NFTA_SET_NAME] = { .type = NLA_STRING },
-+ [NFTA_SET_NAME] = { .type = NLA_STRING,
-+ .len = IFNAMSIZ - 1 },
- [NFTA_SET_FLAGS] = { .type = NLA_U32 },
- [NFTA_SET_KEY_TYPE] = { .type = NLA_U32 },
- [NFTA_SET_KEY_LEN] = { .type = NLA_U32 },
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index a155d19..726b0f2 100644
--- a/net/netfilter/nfnetlink_log.c
@@ -102521,10 +102437,10 @@ index 2b1738e..a9d0fc9 100644
/* Initialize IPv6 support and register with socket layer. */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
-index 4e1d0fc..068fef7 100644
+index a62a215..0976540 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
-@@ -831,8 +831,10 @@ int sctp_register_af(struct sctp_af *af)
+@@ -836,8 +836,10 @@ int sctp_register_af(struct sctp_af *af)
return 0;
}
@@ -102536,7 +102452,7 @@ index 4e1d0fc..068fef7 100644
return 1;
}
-@@ -962,7 +964,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
+@@ -967,7 +969,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
static struct sctp_af sctp_af_inet;
@@ -102545,7 +102461,7 @@ index 4e1d0fc..068fef7 100644
.event_msgname = sctp_inet_event_msgname,
.skb_msgname = sctp_inet_skb_msgname,
.af_supported = sctp_inet_af_supported,
-@@ -1034,7 +1036,7 @@ static const struct net_protocol sctp_protocol = {
+@@ -1039,7 +1041,7 @@ static const struct net_protocol sctp_protocol = {
};
/* IPv4 address related functions. */
@@ -102554,7 +102470,7 @@ index 4e1d0fc..068fef7 100644
.sa_family = AF_INET,
.sctp_xmit = sctp_v4_xmit,
.setsockopt = ip_setsockopt,
-@@ -1119,7 +1121,7 @@ static void sctp_v4_pf_init(void)
+@@ -1124,7 +1126,7 @@ static void sctp_v4_pf_init(void)
static void sctp_v4_pf_exit(void)
{
@@ -102564,7 +102480,7 @@ index 4e1d0fc..068fef7 100644
static int sctp_v4_protosw_init(void)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
-index 5d6883f..394a102 100644
+index fef2acd..c705c4f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -439,7 +439,7 @@ static void sctp_generate_sack_event(unsigned long data)
@@ -102577,10 +102493,10 @@ index 5d6883f..394a102 100644
sctp_generate_t1_cookie_event,
sctp_generate_t1_init_event,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 981aaf8..5bc016d 100644
+index 604a6ac..f87f0a3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
-@@ -2169,11 +2169,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
+@@ -2175,11 +2175,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
{
struct sctp_association *asoc;
struct sctp_ulpevent *event;
@@ -102595,7 +102511,7 @@ index 981aaf8..5bc016d 100644
/*
* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
-@@ -4255,13 +4257,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+@@ -4259,13 +4261,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
@@ -102613,7 +102529,7 @@ index 981aaf8..5bc016d 100644
return -EFAULT;
return 0;
}
-@@ -4279,6 +4284,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+@@ -4283,6 +4288,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
*/
static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
@@ -102622,7 +102538,7 @@ index 981aaf8..5bc016d 100644
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
-@@ -4287,7 +4294,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
+@@ -4291,7 +4298,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
len = sizeof(int);
if (put_user(len, optlen))
return -EFAULT;
@@ -102632,7 +102548,7 @@ index 981aaf8..5bc016d 100644
return -EFAULT;
return 0;
}
-@@ -4662,12 +4670,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
+@@ -4666,12 +4674,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
*/
static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
@@ -102649,7 +102565,7 @@ index 981aaf8..5bc016d 100644
return -EFAULT;
return 0;
}
-@@ -4708,6 +4719,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
+@@ -4712,6 +4723,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen)
return -ENOMEM;
@@ -102659,10 +102575,10 @@ index 981aaf8..5bc016d 100644
return -EFAULT;
to += addrlen;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
-index 35c8923..536614e 100644
+index c82fdc1..4ca1f95 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
-@@ -305,7 +305,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+@@ -308,7 +308,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
{
struct net *net = current->nsproxy->net_ns;
char tmp[8];
@@ -102671,7 +102587,7 @@ index 35c8923..536614e 100644
int ret;
int changed = 0;
char *none = "none";
-@@ -352,7 +352,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+@@ -355,7 +355,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
{
struct net *net = current->nsproxy->net_ns;
int new_value;
@@ -102680,7 +102596,7 @@ index 35c8923..536614e 100644
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
int ret;
-@@ -379,7 +379,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+@@ -382,7 +382,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
{
struct net *net = current->nsproxy->net_ns;
int new_value;
@@ -102689,7 +102605,16 @@ index 35c8923..536614e 100644
unsigned int min = *(unsigned int *) ctl->extra1;
unsigned int max = *(unsigned int *) ctl->extra2;
int ret;
-@@ -402,7 +402,7 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+@@ -408,7 +408,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
+ loff_t *ppos)
+ {
+ struct net *net = current->nsproxy->net_ns;
+- struct ctl_table tbl;
++ ctl_table_no_const tbl;
+ int new_value, ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+@@ -436,7 +436,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
int sctp_sysctl_net_register(struct net *net)
{
@@ -102698,7 +102623,7 @@ index 35c8923..536614e 100644
if (!net_eq(net, &init_net)) {
int i;
-@@ -415,7 +415,10 @@ int sctp_sysctl_net_register(struct net *net)
+@@ -449,7 +449,10 @@ int sctp_sysctl_net_register(struct net *net)
table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
}
@@ -107563,10 +107488,10 @@ index 0000000..82bc5a8
+}
diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
new file mode 100644
-index 0000000..8af3693
+index 0000000..e90c205
--- /dev/null
+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,287 @@
+@@ -0,0 +1,295 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
@@ -107637,7 +107562,6 @@ index 0000000..8af3693
+#include "tree-pass.h"
+//#include "df.h"
+#include "predict.h"
-+//#include "lto-streamer.h"
+#include "ipa-utils.h"
+
+#if BUILDING_GCC_VERSION >= 4009
@@ -107652,6 +107576,7 @@ index 0000000..8af3693
+#include "tree-ssanames.h"
+#include "print-tree.h"
+#include "tree-eh.h"
++#include "stmt.h"
+#endif
+
+#include "gimple.h"
@@ -107665,6 +107590,10 @@ index 0000000..8af3693
+#include "ssa-iterators.h"
+#endif
+
++//#include "lto/lto.h"
++//#include "data-streamer.h"
++//#include "lto-compress.h"
++
+//#include "expr.h" where are you...
+extern rtx emit_move_insn(rtx x, rtx y);
+
@@ -107676,6 +107605,8 @@ index 0000000..8af3693
+
+#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
+#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
++#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node))
++#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node))
+
+#if BUILDING_GCC_VERSION == 4005
+#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls; vars && (D = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), I)
@@ -107814,6 +107745,8 @@ index 0000000..8af3693
+#if BUILDING_GCC_VERSION >= 4007
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+ cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
++#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
++ cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
@@ -121719,7 +121652,7 @@ index ed2f51e..cc2d8f6 100644
ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
-index 6789d78..4afd019e 100644
+index 6789d788..4afd019e 100644
--- a/tools/perf/util/include/asm/alternative-asm.h
+++ b/tools/perf/util/include/asm/alternative-asm.h
@@ -5,4 +5,7 @@
diff --git a/3.14.4/4425_grsec_remove_EI_PAX.patch b/3.14.5/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.14.4/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.5/4425_grsec_remove_EI_PAX.patch
diff --git a/3.14.4/4427_force_XATTR_PAX_tmpfs.patch b/3.14.5/4427_force_XATTR_PAX_tmpfs.patch
index bbcef41..bbcef41 100644
--- a/3.14.4/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.14.5/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.4/4430_grsec-remove-localversion-grsec.patch b/3.14.5/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.14.4/4430_grsec-remove-localversion-grsec.patch
+++ b/3.14.5/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.4/4435_grsec-mute-warnings.patch b/3.14.5/4435_grsec-mute-warnings.patch
index 392cefb..392cefb 100644
--- a/3.14.4/4435_grsec-mute-warnings.patch
+++ b/3.14.5/4435_grsec-mute-warnings.patch
diff --git a/3.14.4/4440_grsec-remove-protected-paths.patch b/3.14.5/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.14.4/4440_grsec-remove-protected-paths.patch
+++ b/3.14.5/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.4/4450_grsec-kconfig-default-gids.patch b/3.14.5/4450_grsec-kconfig-default-gids.patch
index 19a4285..19a4285 100644
--- a/3.14.4/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.5/4450_grsec-kconfig-default-gids.patch
diff --git a/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.5/4465_selinux-avc_audit-log-curr_ip.patch
index 2765cdc..2765cdc 100644
--- a/3.14.4/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.5/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.4/4470_disable-compat_vdso.patch b/3.14.5/4470_disable-compat_vdso.patch
index 677174c..677174c 100644
--- a/3.14.4/4470_disable-compat_vdso.patch
+++ b/3.14.5/4470_disable-compat_vdso.patch
diff --git a/3.14.4/4475_emutramp_default_on.patch b/3.14.5/4475_emutramp_default_on.patch
index a453a5b..a453a5b 100644
--- a/3.14.4/4475_emutramp_default_on.patch
+++ b/3.14.5/4475_emutramp_default_on.patch
diff --git a/3.2.59/0000_README b/3.2.59/0000_README
index 4d1e516..71c8053 100644
--- a/3.2.59/0000_README
+++ b/3.2.59/0000_README
@@ -154,7 +154,7 @@ Patch: 1058_linux-3.2.59.patch
From: http://www.kernel.org
Desc: Linux 3.2.59
-Patch: 4420_grsecurity-3.0-3.2.59-201405281920.patch
+Patch: 4420_grsecurity-3.0-3.2.59-201406030716.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.59/4420_grsecurity-3.0-3.2.59-201405281920.patch b/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch
index ae61f08..fc192d4 100644
--- a/3.2.59/4420_grsecurity-3.0-3.2.59-201405281920.patch
+++ b/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch
@@ -273,7 +273,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index 1be3414..ef0a264 100644
+index 1be3414..0f629f5 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -354,7 +354,7 @@ index 1be3414..ef0a264 100644
+endif
+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
+ifdef CONFIG_PAX_SIZE_OVERFLOW
-+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
+endif
+ifdef CONFIG_PAX_LATENT_ENTROPY
+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
@@ -456,19 +456,17 @@ index 1be3414..ef0a264 100644
# Target to install modules
PHONY += modules_install
-@@ -1163,8 +1243,9 @@ CLEAN_FILES += vmlinux System.map \
- MRPROPER_DIRS += include/config usr/include include/generated \
+@@ -1164,6 +1244,9 @@ MRPROPER_DIRS += include/config usr/include include/generated \
arch/*/include/generated
MRPROPER_FILES += .config .config.old .version .old_version \
-- include/linux/version.h \
-- Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
-+ include/linux/version.h tools/gcc/size_overflow_hash.h\
-+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
-+ tools/gcc/randomize_layout_seed.h
+ include/linux/version.h \
++ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
++ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
++ tools/gcc/randomize_layout_seed.h \
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
# clean - Delete most, but leave enough to build external modules
- #
-@@ -1201,6 +1282,7 @@ distclean: mrproper
+@@ -1201,6 +1284,7 @@ distclean: mrproper
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' \
@@ -476,7 +474,7 @@ index 1be3414..ef0a264 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1361,6 +1443,8 @@ PHONY += $(module-dirs) modules
+@@ -1361,6 +1445,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -485,7 +483,7 @@ index 1be3414..ef0a264 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1487,17 +1571,21 @@ else
+@@ -1487,17 +1573,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -511,7 +509,7 @@ index 1be3414..ef0a264 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1507,11 +1595,15 @@ endif
+@@ -1507,11 +1597,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -15940,7 +15938,7 @@ index d7ef849..b1b009a 100644
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 36361bf..2c6406a 100644
+index 36361bf..be257d9 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -7,6 +7,7 @@
@@ -15964,7 +15962,7 @@ index 36361bf..2c6406a 100644
#define segment_eq(a, b) ((a).seg == (b).seg)
-@@ -76,7 +82,33 @@
+@@ -76,7 +82,35 @@
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
@@ -15972,26 +15970,28 @@ index 36361bf..2c6406a 100644
+#define access_ok_noprefault(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+#define access_ok(type, addr, size) \
+({ \
-+ long __size = size; \
++ unsigned long __size = size; \
+ unsigned long __addr = (unsigned long)addr; \
-+ unsigned long __addr_ao = __addr & PAGE_MASK; \
-+ unsigned long __end_ao = __addr + __size - 1; \
+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
-+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
-+ while(__addr_ao <= __end_ao) { \
-+ char __c_ao; \
-+ __addr_ao += PAGE_SIZE; \
-+ if (__size > PAGE_SIZE) \
-+ cond_resched(); \
-+ if (__get_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ if (type != VERIFY_WRITE) { \
++ if (__ret_ao && __size) { \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while (__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
+ __addr = __addr_ao; \
-+ continue; \
+ } \
-+ if (__put_user(__c_ao, (char __user *)__addr)) \
-+ break; \
-+ __addr = __addr_ao; \
+ } \
+ } \
+ __ret_ao; \
@@ -15999,7 +15999,7 @@ index 36361bf..2c6406a 100644
/*
* The exception table consists of pairs of addresses: the first is the
-@@ -182,12 +214,20 @@ extern int __get_user_bad(void);
+@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
@@ -16023,7 +16023,7 @@ index 36361bf..2c6406a 100644
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
-@@ -199,8 +239,8 @@ extern int __get_user_bad(void);
+@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
@@ -16034,7 +16034,7 @@ index 36361bf..2c6406a 100644
"3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
_ASM_EXTABLE(2b, 3b - 2b) \
-@@ -252,7 +292,7 @@ extern void __put_user_8(void);
+@@ -252,7 +294,7 @@ extern void __put_user_8(void);
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -16043,7 +16043,7 @@ index 36361bf..2c6406a 100644
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
-@@ -373,7 +413,7 @@ do { \
+@@ -373,7 +415,7 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -16052,7 +16052,7 @@ index 36361bf..2c6406a 100644
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -381,7 +421,7 @@ do { \
+@@ -381,7 +423,7 @@ do { \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
@@ -16061,7 +16061,7 @@ index 36361bf..2c6406a 100644
: "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_size_ex(x, ptr, size) \
-@@ -406,7 +446,7 @@ do { \
+@@ -406,7 +448,7 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -16070,7 +16070,7 @@ index 36361bf..2c6406a 100644
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: ltype(x) : "m" (__m(addr)))
-@@ -423,13 +463,24 @@ do { \
+@@ -423,13 +465,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -16097,7 +16097,7 @@ index 36361bf..2c6406a 100644
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -437,7 +488,7 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -16106,7 +16106,7 @@ index 36361bf..2c6406a 100644
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -445,10 +496,10 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
@@ -16119,7 +16119,7 @@ index 36361bf..2c6406a 100644
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: : ltype(x), "m" (__m(addr)))
-@@ -487,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
* On error, the variable @x is set to zero.
*/
@@ -16132,7 +16132,7 @@ index 36361bf..2c6406a 100644
/**
* __put_user: - Write a simple value into user space, with less checking.
-@@ -510,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
* Returns zero on success, or -EFAULT on error.
*/
@@ -16145,7 +16145,7 @@ index 36361bf..2c6406a 100644
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
-@@ -529,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -21377,7 +21377,7 @@ index 7209070..ada4d63 100644
* Shouldn't happen, we returned above if in_interrupt():
*/
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
-index 69bca46..1ac9a15 100644
+index 69bca46..e38f147 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -21389,7 +21389,7 @@ index 69bca46..1ac9a15 100644
/*
* Probabilistic stack overflow check:
*
-@@ -38,16 +40,16 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+@@ -38,16 +40,17 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#ifdef CONFIG_DEBUG_STACKOVERFLOW
u64 curbase = (u64)task_stack_page(current);
@@ -21403,14 +21403,14 @@ index 69bca46..1ac9a15 100644
- sizeof(struct pt_regs) + 128,
-
- "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-- current->comm, curbase, regs->sp);
-+ if (regs->sp >= curbase + sizeof(struct thread_info) +
-+ sizeof(struct pt_regs) + 128 &&
-+ regs->sp <= curbase + THREAD_SIZE)
-+ return;
-+ WARN_ONCE(1, "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
-+ current->comm, curbase, regs->sp);
-+ gr_handle_kernel_exploit();
++ if (regs->sp >= curbase &&
++ regs->sp <= curbase + THREAD_SIZE &&
++ regs->sp < curbase + sizeof(struct thread_info) +
++ sizeof(struct pt_regs) + 128) {
++ WARN_ONCE(1, "do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
+ current->comm, curbase, regs->sp);
++ gr_handle_kernel_exploit();
++ }
#endif
}
@@ -23848,7 +23848,7 @@ index 09ff517..df19fbff 100644
.short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 20061b9..55ec769 100644
+index 20061b9..e2d53a8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
@@ -23919,7 +23919,19 @@ index 20061b9..55ec769 100644
return;
#ifdef CONFIG_X86_32
-@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -242,6 +248,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = X86_TRAP_DF;
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
++ die("grsec: kernel stack overflow detected", regs, error_code);
++#endif
++
+ /*
+ * This is always a kernel trap and never fixable (and thus must
+ * never return).
+@@ -259,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
conditional_sti(regs);
#ifdef CONFIG_X86_32
@@ -23952,7 +23964,7 @@ index 20061b9..55ec769 100644
tsk->thread.error_code = error_code;
tsk->thread.trap_no = X86_TRAP_GP;
-@@ -299,6 +321,13 @@ gp_in_kernel:
+@@ -299,6 +326,13 @@ gp_in_kernel:
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
return;
@@ -23966,7 +23978,7 @@ index 20061b9..55ec769 100644
die("general protection fault", regs, error_code);
}
-@@ -419,7 +448,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -419,7 +453,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs);
@@ -23975,7 +23987,7 @@ index 20061b9..55ec769 100644
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB);
preempt_conditional_cli(regs);
-@@ -433,7 +462,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+@@ -433,7 +467,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
* We already checked v86 mode above, so we can check for kernel mode
* by just checking the CPL of CS.
*/
@@ -23984,7 +23996,7 @@ index 20061b9..55ec769 100644
tsk->thread.debugreg6 &= ~DR_STEP;
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
-@@ -463,7 +492,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+@@ -463,7 +497,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
return;
conditional_sti(regs);
@@ -23993,7 +24005,7 @@ index 20061b9..55ec769 100644
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
-@@ -576,8 +605,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+@@ -576,8 +610,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
void __math_state_restore(struct task_struct *tsk)
{
/* We need a safe address that is cheap to find and that is already
@@ -49427,7 +49439,7 @@ index 643a0a0..4da1c03 100644
return NULL;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index bac83d8..37d177a 100644
+index bac83d8..0b87bf6 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1639,6 +1639,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
@@ -49458,36 +49470,7 @@ index bac83d8..37d177a 100644
spin_unlock_irqrestore(&tty->read_lock, flags);
*b += n;
*nr -= n;
-@@ -1996,12 +1997,19 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
- if (tty->ops->flush_chars)
- tty->ops->flush_chars(tty);
- } else {
-+ bool lock;
-+
-+ lock = L_ECHO(tty) || (tty->icanon & L_ECHONL(tty));
-+ if (lock)
-+ mutex_lock(&tty->output_lock);
- while (nr > 0) {
- mutex_lock(&tty->output_lock);
- c = tty->ops->write(tty, b, nr);
- mutex_unlock(&tty->output_lock);
- if (c < 0) {
- retval = c;
-+ if (lock)
-+ mutex_unlock(&tty->output_lock);
- goto break_out;
- }
- if (!c)
-@@ -2009,6 +2017,8 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
- b += c;
- nr -= c;
- }
-+ if (lock)
-+ mutex_unlock(&tty->output_lock);
- }
- if (!nr)
- break;
-@@ -2134,6 +2144,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
+@@ -2134,6 +2135,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
*ops = tty_ldisc_N_TTY;
ops->owner = NULL;
@@ -56724,7 +56707,7 @@ index 451b9b8..12e5a03 100644
out_free_fd:
diff --git a/fs/exec.c b/fs/exec.c
-index 78199eb..125722f 100644
+index 78199eb..7ff0dd8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -55,12 +55,35 @@
@@ -57050,7 +57033,15 @@ index 78199eb..125722f 100644
set_fs(old_fs);
return result;
}
-@@ -1070,6 +1148,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
+@@ -841,6 +919,7 @@ static int exec_mmap(struct mm_struct *mm)
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
++ populate_stack();
+ task_unlock(tsk);
+ arch_pick_mmap_layout(mm);
+ if (old_mm) {
+@@ -1070,6 +1149,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
perf_event_comm(tsk);
}
@@ -57072,7 +57063,7 @@ index 78199eb..125722f 100644
int flush_old_exec(struct linux_binprm * bprm)
{
int retval;
-@@ -1084,6 +1177,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+@@ -1084,6 +1178,7 @@ int flush_old_exec(struct linux_binprm * bprm)
set_mm_exe_file(bprm->mm, bprm->file);
@@ -57080,7 +57071,7 @@ index 78199eb..125722f 100644
/*
* Release all of the old mmap stuff
*/
-@@ -1116,10 +1210,6 @@ EXPORT_SYMBOL(would_dump);
+@@ -1116,10 +1211,6 @@ EXPORT_SYMBOL(would_dump);
void setup_new_exec(struct linux_binprm * bprm)
{
@@ -57091,7 +57082,7 @@ index 78199eb..125722f 100644
arch_pick_mmap_layout(current->mm);
/* This is the point of no return */
-@@ -1130,18 +1220,7 @@ void setup_new_exec(struct linux_binprm * bprm)
+@@ -1130,18 +1221,7 @@ void setup_new_exec(struct linux_binprm * bprm)
else
set_dumpable(current->mm, suid_dumpable);
@@ -57111,7 +57102,7 @@ index 78199eb..125722f 100644
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
-@@ -1229,7 +1308,7 @@ void install_exec_creds(struct linux_binprm *bprm)
+@@ -1229,7 +1309,7 @@ void install_exec_creds(struct linux_binprm *bprm)
* wait until new credentials are committed
* by commit_creds() above
*/
@@ -57120,7 +57111,7 @@ index 78199eb..125722f 100644
perf_event_exit_task(current);
/*
* cred_guard_mutex must be held at least to this point to prevent
-@@ -1259,6 +1338,13 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1259,6 +1339,13 @@ int check_unsafe_exec(struct linux_binprm *bprm)
bprm->unsafe |= LSM_UNSAFE_PTRACE;
}
@@ -57134,7 +57125,7 @@ index 78199eb..125722f 100644
n_fs = 1;
spin_lock(&p->fs->lock);
rcu_read_lock();
-@@ -1268,7 +1354,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1268,7 +1355,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -57143,7 +57134,7 @@ index 78199eb..125722f 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1302,7 +1388,8 @@ int prepare_binprm(struct linux_binprm *bprm)
+@@ -1302,7 +1389,8 @@ int prepare_binprm(struct linux_binprm *bprm)
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
@@ -57153,7 +57144,7 @@ index 78199eb..125722f 100644
/* Set-uid? */
if (mode & S_ISUID) {
bprm->per_clear |= PER_CLEAR_ON_SETID;
-@@ -1463,6 +1550,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+@@ -1463,6 +1551,31 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
EXPORT_SYMBOL(search_binary_handler);
@@ -57185,7 +57176,7 @@ index 78199eb..125722f 100644
/*
* sys_execve() executes a new program.
*/
-@@ -1471,6 +1583,11 @@ static int do_execve_common(const char *filename,
+@@ -1471,6 +1584,11 @@ static int do_execve_common(const char *filename,
struct user_arg_ptr envp,
struct pt_regs *regs)
{
@@ -57197,7 +57188,7 @@ index 78199eb..125722f 100644
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
-@@ -1478,6 +1595,8 @@ static int do_execve_common(const char *filename,
+@@ -1478,6 +1596,8 @@ static int do_execve_common(const char *filename,
int retval;
const struct cred *cred = current_cred();
@@ -57206,7 +57197,7 @@ index 78199eb..125722f 100644
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
-@@ -1518,12 +1637,22 @@ static int do_execve_common(const char *filename,
+@@ -1518,12 +1638,22 @@ static int do_execve_common(const char *filename,
if (IS_ERR(file))
goto out_unmark;
@@ -57229,7 +57220,7 @@ index 78199eb..125722f 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1540,24 +1669,70 @@ static int do_execve_common(const char *filename,
+@@ -1540,24 +1670,70 @@ static int do_execve_common(const char *filename,
if (retval < 0)
goto out;
@@ -57304,7 +57295,7 @@ index 78199eb..125722f 100644
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
-@@ -1566,6 +1741,14 @@ static int do_execve_common(const char *filename,
+@@ -1566,6 +1742,14 @@ static int do_execve_common(const char *filename,
put_files_struct(displaced);
return retval;
@@ -57319,7 +57310,7 @@ index 78199eb..125722f 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1639,7 +1822,7 @@ static int expand_corename(struct core_name *cn)
+@@ -1639,7 +1823,7 @@ static int expand_corename(struct core_name *cn)
{
char *old_corename = cn->corename;
@@ -57328,7 +57319,7 @@ index 78199eb..125722f 100644
cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
if (!cn->corename) {
-@@ -1736,7 +1919,7 @@ static int format_corename(struct core_name *cn, long signr)
+@@ -1736,7 +1920,7 @@ static int format_corename(struct core_name *cn, long signr)
int pid_in_pattern = 0;
int err = 0;
@@ -57337,7 +57328,7 @@ index 78199eb..125722f 100644
cn->corename = kmalloc(cn->size, GFP_KERNEL);
cn->used = 0;
-@@ -1833,6 +2016,295 @@ out:
+@@ -1833,6 +2017,308 @@ out:
return ispipe;
}
@@ -57578,12 +57569,25 @@ index 78199eb..125722f 100644
+}
+#endif
+
-+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
++void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
+{
-+
+#ifdef CONFIG_PAX_USERCOPY
+ const char *type;
++#endif
++
++#ifndef CONFIG_STACK_GROWSUP
++ const void * stackstart = task_stack_page(current);
++ if (unlikely(current_stack_pointer < stackstart + 512 ||
++ current_stack_pointer >= stackstart + THREAD_SIZE))
++ BUG();
++#endif
+
++#ifndef CONFIG_PAX_USERCOPY_DEBUG
++ if (const_size)
++ return;
++#endif
++
++#ifdef CONFIG_PAX_USERCOPY
+ if (!n)
+ return;
+
@@ -57633,7 +57637,7 @@ index 78199eb..125722f 100644
static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
-@@ -2006,17 +2478,17 @@ static void coredump_finish(struct mm_struct *mm)
+@@ -2006,17 +2492,17 @@ static void coredump_finish(struct mm_struct *mm)
void set_dumpable(struct mm_struct *mm, int value)
{
switch (value) {
@@ -57654,7 +57658,7 @@ index 78199eb..125722f 100644
set_bit(MMF_DUMP_SECURELY, &mm->flags);
smp_wmb();
set_bit(MMF_DUMPABLE, &mm->flags);
-@@ -2029,7 +2501,7 @@ static int __get_dumpable(unsigned long mm_flags)
+@@ -2029,7 +2515,7 @@ static int __get_dumpable(unsigned long mm_flags)
int ret;
ret = mm_flags & MMF_DUMPABLE_MASK;
@@ -57663,7 +57667,7 @@ index 78199eb..125722f 100644
}
/*
-@@ -2050,17 +2522,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -2050,17 +2536,17 @@ static void wait_for_dump_helpers(struct file *file)
pipe = file->f_path.dentry->d_inode->i_pipe;
pipe_lock(pipe);
@@ -57686,7 +57690,7 @@ index 78199eb..125722f 100644
pipe_unlock(pipe);
}
-@@ -2121,7 +2593,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2121,7 +2607,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
int retval = 0;
int flag = 0;
int ispipe;
@@ -57696,7 +57700,7 @@ index 78199eb..125722f 100644
struct coredump_params cprm = {
.signr = signr,
.regs = regs,
-@@ -2136,6 +2609,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2136,6 +2623,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
audit_core_dumps(signr);
@@ -57706,7 +57710,7 @@ index 78199eb..125722f 100644
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
-@@ -2146,14 +2622,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2146,14 +2636,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
if (!cred)
goto fail;
/*
@@ -57727,7 +57731,7 @@ index 78199eb..125722f 100644
}
retval = coredump_wait(exit_code, &core_state);
-@@ -2203,7 +2681,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2203,7 +2695,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
}
cprm.limit = RLIM_INFINITY;
@@ -57736,7 +57740,7 @@ index 78199eb..125722f 100644
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
task_tgid_vnr(current), current->comm);
-@@ -2230,9 +2708,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2230,9 +2722,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
} else {
struct inode *inode;
@@ -57756,7 +57760,7 @@ index 78199eb..125722f 100644
cprm.file = filp_open(cn.corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
-@@ -2273,7 +2761,7 @@ close_fail:
+@@ -2273,7 +2775,7 @@ close_fail:
filp_close(cprm.file, NULL);
fail_dropcount:
if (ispipe)
@@ -57765,7 +57769,7 @@ index 78199eb..125722f 100644
fail_unlock:
kfree(cn.corename);
fail_corename:
-@@ -2292,7 +2780,7 @@ fail:
+@@ -2292,7 +2794,7 @@ fail:
*/
int dump_write(struct file *file, const void *addr, int nr)
{
@@ -64866,10 +64870,10 @@ index 8a89949..6776861 100644
xfs_init_zones(void)
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..802b13c
+index 0000000..2255157
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1147 @@
+@@ -0,0 +1,1160 @@
+#
+# grecurity configuration
+#
@@ -65008,6 +65012,19 @@ index 0000000..802b13c
+ If you use PaX it is essential that you say Y here as it closes up
+ several holes that make full ASLR useless locally.
+
++
++config GRKERNSEC_KSTACKOVERFLOW
++ bool "Prevent kernel stack overflows"
++ default y if GRKERNSEC_CONFIG_AUTO
++ depends on !IA64 && 64BIT && BROKEN
++ help
++ If you say Y here, the kernel's process stacks will be allocated
++ with vmalloc instead of the kernel's default allocator. This
++ introduces guard pages that in combination with the alloca checking
++ of the STACKLEAK feature prevents all forms of kernel process stack
++ overflow abuse. Note that this is different from kernel stack
++ buffer overflows.
++
+config GRKERNSEC_BRUTE
+ bool "Deter exploit bruteforcing"
+ default y if GRKERNSEC_CONFIG_AUTO
@@ -77456,7 +77473,7 @@ index 4f7a632..b9e6f95 100644
/**
diff --git a/include/linux/cred.h b/include/linux/cred.h
-index 4030896..65aefc8 100644
+index 4030896..4d2c309 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -34,7 +34,7 @@ struct group_info {
@@ -77487,6 +77504,14 @@ index 4030896..65aefc8 100644
#endif
/**
+@@ -339,6 +342,7 @@ static inline void put_cred(const struct cred *_cred)
+
+ #define task_uid(task) (task_cred_xxx((task), uid))
+ #define task_euid(task) (task_cred_xxx((task), euid))
++#define task_securebits(task) (task_cred_xxx((task), securebits))
+
+ #define current_cred_xxx(xxx) \
+ ({ \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 8a94217..15d49e3 100644
--- a/include/linux/crypto.h
@@ -81375,7 +81400,7 @@ index 2148b12..519b820 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index cb34ff4..38255ee 100644
+index cb34ff4..1d75f44 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio_list;
@@ -81691,7 +81716,33 @@ index cb34ff4..38255ee 100644
* is_global_init - check if a task structure is init
* @tsk: Task structure to be checked.
*
-@@ -2116,7 +2231,9 @@ void yield(void);
+@@ -1953,6 +2068,25 @@ extern u64 sched_clock_cpu(int cpu);
+
+ extern void sched_clock_init(void);
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static inline void populate_stack(void)
++{
++ struct task_struct *curtask = current;
++ int c;
++ int *ptr = curtask->stack;
++ int *end = curtask->stack + THREAD_SIZE;
++
++ while (ptr < end) {
++ c = *(volatile int *)ptr;
++ ptr += PAGE_SIZE/sizeof(int);
++ }
++}
++#else
++static inline void populate_stack(void)
++{
++}
++#endif
++
+ #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ static inline void sched_clock_tick(void)
+ {
+@@ -2116,7 +2250,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -81701,7 +81752,7 @@ index cb34ff4..38255ee 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2149,6 +2266,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2149,6 +2285,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -81709,7 +81760,7 @@ index cb34ff4..38255ee 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2270,6 +2388,12 @@ static inline void mmdrop(struct mm_struct * mm)
+@@ -2270,6 +2407,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -81722,7 +81773,7 @@ index cb34ff4..38255ee 100644
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2286,9 +2410,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2286,9 +2429,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -81733,7 +81784,7 @@ index cb34ff4..38255ee 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2451,9 +2574,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2451,9 +2593,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -82633,28 +82684,18 @@ index 7faf933..9b85a0c 100644
#ifdef CONFIG_MAGIC_SYSRQ
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
-index 8d03f07..66b3cf6 100644
+index 8d03f07..995ab36 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -123,6 +123,23 @@ static inline void set_restore_sigmask(void)
+@@ -123,6 +123,13 @@ static inline void set_restore_sigmask(void)
}
#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
-+extern void __check_object_size(const void *ptr, unsigned long n, bool to);
++extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
+
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY)
-+extern void pax_check_alloca(unsigned long size);
-+#endif
-+
-+static inline void check_object_size(const void *ptr, unsigned long n, bool to)
++static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
-+#if defined(CONFIG_X86) && defined(CONFIG_PAX_USERCOPY)
-+ /* always check if we've overflowed the stack in a copy*user */
-+ pax_check_alloca(sizeof(unsigned long));
-+#endif
-+
-+ if (!__builtin_constant_p(n))
-+ __check_object_size(ptr, n, to);
++ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
+}
+
#endif /* __KERNEL__ */
@@ -83157,7 +83198,7 @@ index 0000000..d6b4440
+
+#endif /* _LINUX_VIRTIO_SCSI_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 4bde182..943f335 100644
+index 4bde182..d19c720 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -83172,7 +83213,15 @@ index 4bde182..943f335 100644
/* bits [20..32] reserved for arch specific ioremap internals */
/*
-@@ -124,7 +129,7 @@ extern void free_vm_area(struct vm_struct *area);
+@@ -59,6 +64,7 @@ extern void *vzalloc_node(unsigned long size, int node);
+ extern void *vmalloc_exec(unsigned long size);
+ extern void *vmalloc_32(unsigned long size);
+ extern void *vmalloc_32_user(unsigned long size);
++extern void *vmalloc_stack(int node);
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+@@ -124,7 +130,7 @@ extern void free_vm_area(struct vm_struct *area);
/* for /dev/kmem */
extern long vread(char *buf, char *addr, unsigned long count);
@@ -85867,7 +85916,7 @@ index 42e8fa0..9e7406b 100644
return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
-index 48c6fd3..8398912 100644
+index 48c6fd3..cb63d13 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
@@ -85904,7 +85953,7 @@ index 48c6fd3..8398912 100644
/* dumpability changes */
if (old->euid != new->euid ||
old->egid != new->egid ||
-@@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
+@@ -540,6 +551,107 @@ int commit_creds(struct cred *new)
put_cred(old);
return 0;
}
@@ -85973,6 +86022,7 @@ index 48c6fd3..8398912 100644
+ int ret;
+ int schedule_it = 0;
+ struct task_struct *t;
++ unsigned oldsecurebits = current_cred()->securebits;
+
+ /* we won't get called with tasklist_lock held for writing
+ and interrupts disabled as the cred struct in that case is
@@ -85988,7 +86038,11 @@ index 48c6fd3..8398912 100644
+ read_lock(&tasklist_lock);
+ for (t = next_thread(current); t != current;
+ t = next_thread(t)) {
-+ if (t->delayed_cred == NULL) {
++ /* we'll check if the thread has uid 0 in
++ * the delayed worker routine
++ */
++ if (task_securebits(t) == oldsecurebits &&
++ t->delayed_cred == NULL) {
+ t->delayed_cred = get_cred(new);
+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
+ set_tsk_need_resched(t);
@@ -85997,6 +86051,7 @@ index 48c6fd3..8398912 100644
+ read_unlock(&tasklist_lock);
+ rcu_read_unlock();
+ }
++
+ return ret;
+#else
+ return __commit_creds(new);
@@ -86351,7 +86406,7 @@ index fde15f9..99f1b97 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index ce0c182..c6ec99a 100644
+index ce0c182..b8e5b18 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -34,6 +34,7 @@
@@ -86362,15 +86417,68 @@ index ce0c182..c6ec99a 100644
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
-@@ -168,6 +169,7 @@ void free_task(struct task_struct *tsk)
- free_thread_info(tsk->stack);
+@@ -137,6 +138,30 @@ static inline void free_thread_info(struct thread_info *ti)
+ }
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
++ int node)
++{
++ return vmalloc_stack(node);
++}
++
++static inline void gr_free_thread_info(struct thread_info *ti)
++{
++ vfree(ti);
++}
++#else
++static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
++ int node)
++{
++ return alloc_thread_info_node(tsk, node);
++}
++
++static inline void gr_free_thread_info(struct thread_info *ti)
++{
++ free_thread_info(ti);
++}
++#endif
++
+ /* SLAB cache for signal_struct structures (tsk->signal) */
+ static struct kmem_cache *signal_cachep;
+
+@@ -157,17 +182,20 @@ static struct kmem_cache *mm_cachep;
+
+ static void account_kernel_stack(struct thread_info *ti, int account)
+ {
++#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW
+ struct zone *zone = page_zone(virt_to_page(ti));
+
+ mod_zone_page_state(zone, NR_KERNEL_STACK, account);
++#endif
+ }
+
+ void free_task(struct task_struct *tsk)
+ {
+ account_kernel_stack(tsk->stack, -1);
+- free_thread_info(tsk->stack);
++ gr_free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
+ put_seccomp_filter(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
-@@ -270,19 +272,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+@@ -263,26 +291,31 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ if (!tsk)
+ return NULL;
+
+- ti = alloc_thread_info_node(tsk, node);
++ ti = gr_alloc_thread_info_node(tsk, node);
+ if (!ti) {
+ free_task_struct(tsk);
+ return NULL;
}
err = arch_dup_task_struct(tsk, orig);
@@ -86399,7 +86507,14 @@ index ce0c182..c6ec99a 100644
#endif
/*
-@@ -306,13 +313,78 @@ out:
+@@ -300,19 +333,84 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ return tsk;
+
+ out:
+- free_thread_info(ti);
++ gr_free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
}
#ifdef CONFIG_MMU
@@ -86482,7 +86597,7 @@ index ce0c182..c6ec99a 100644
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
-@@ -324,8 +396,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -324,8 +422,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
@@ -86493,7 +86608,7 @@ index ce0c182..c6ec99a 100644
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
-@@ -341,63 +413,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -341,63 +439,16 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -86562,7 +86677,7 @@ index ce0c182..c6ec99a 100644
/*
* Link in the new vma and copy the page table entries.
-@@ -420,6 +445,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -420,6 +471,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
}
@@ -86594,7 +86709,7 @@ index ce0c182..c6ec99a 100644
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
-@@ -428,14 +478,6 @@ out:
+@@ -428,14 +504,6 @@ out:
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
@@ -86609,7 +86724,7 @@ index ce0c182..c6ec99a 100644
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
-@@ -647,6 +689,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
+@@ -647,6 +715,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(get_task_mm);
@@ -86636,7 +86751,7 @@ index ce0c182..c6ec99a 100644
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
-@@ -832,13 +894,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
+@@ -832,13 +920,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
spin_unlock(&fs->lock);
return -EAGAIN;
}
@@ -86658,7 +86773,7 @@ index ce0c182..c6ec99a 100644
return 0;
}
-@@ -1047,7 +1116,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
+@@ -1047,7 +1142,7 @@ static void posix_cpu_timers_init(struct task_struct *tsk)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
@@ -86667,7 +86782,7 @@ index ce0c182..c6ec99a 100644
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
-@@ -1096,6 +1165,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1096,6 +1191,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto fork_out;
ftrace_graph_init_task(p);
@@ -86675,7 +86790,7 @@ index ce0c182..c6ec99a 100644
rt_mutex_init_task(p);
-@@ -1104,10 +1174,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1104,10 +1200,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
@@ -86691,7 +86806,7 @@ index ce0c182..c6ec99a 100644
goto bad_fork_free;
}
current->flags &= ~PF_NPROC_EXCEEDED;
-@@ -1341,6 +1414,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1341,6 +1440,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
}
@@ -86703,7 +86818,7 @@ index ce0c182..c6ec99a 100644
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(&current->signal->live);
-@@ -1421,6 +1499,8 @@ bad_fork_cleanup_count:
+@@ -1421,6 +1525,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -86712,7 +86827,7 @@ index ce0c182..c6ec99a 100644
return ERR_PTR(retval);
}
-@@ -1507,6 +1587,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1507,6 +1613,7 @@ long do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
@@ -86720,7 +86835,7 @@ index ce0c182..c6ec99a 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1521,6 +1602,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1521,6 +1628,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -86729,7 +86844,7 @@ index ce0c182..c6ec99a 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1591,7 +1674,7 @@ void __init proc_caches_init(void)
+@@ -1591,7 +1700,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -86738,7 +86853,7 @@ index ce0c182..c6ec99a 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1630,7 +1713,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1630,7 +1739,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -86747,7 +86862,7 @@ index ce0c182..c6ec99a 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1719,7 +1802,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1719,7 +1828,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -89516,10 +89631,22 @@ index 3d9f31c..7fefc9e 100644
default:
diff --git a/kernel/sched.c b/kernel/sched.c
-index ea85b0d..e0b6326 100644
+index ea85b0d..633af22 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -5050,7 +5050,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
+@@ -3307,8 +3307,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ next->active_mm = oldmm;
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next);
+- } else
++ } else {
+ switch_mm(oldmm, mm, next);
++ populate_stack();
++ }
+
+ if (!prev->mm) {
+ prev->active_mm = NULL;
+@@ -5050,7 +5052,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
@@ -89528,7 +89655,7 @@ index ea85b0d..e0b6326 100644
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
-@@ -5067,7 +5067,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+@@ -5067,7 +5069,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
*
* The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
@@ -89537,7 +89664,7 @@ index ea85b0d..e0b6326 100644
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
-@@ -5088,7 +5088,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
+@@ -5088,7 +5090,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
@@ -89546,7 +89673,7 @@ index ea85b0d..e0b6326 100644
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
-@@ -5297,6 +5297,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -5297,6 +5299,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -89555,7 +89682,7 @@ index ea85b0d..e0b6326 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -5330,7 +5332,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -5330,7 +5334,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -89565,7 +89692,7 @@ index ea85b0d..e0b6326 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -5487,6 +5490,7 @@ recheck:
+@@ -5487,6 +5492,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -89573,7 +89700,19 @@ index ea85b0d..e0b6326 100644
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
-@@ -6629,7 +6633,7 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+@@ -6521,8 +6527,10 @@ void idle_task_exit(void)
+
+ BUG_ON(cpu_online(smp_processor_id()));
+
+- if (mm != &init_mm)
++ if (mm != &init_mm) {
+ switch_mm(mm, &init_mm, current);
++ populate_stack();
++ }
+ mmdrop(mm);
+ }
+
+@@ -6629,7 +6637,7 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -89582,7 +89721,7 @@ index ea85b0d..e0b6326 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -6646,17 +6650,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -6646,17 +6654,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -89604,7 +89743,7 @@ index ea85b0d..e0b6326 100644
/*
* In the intermediate directories, both the child directory and
-@@ -6664,22 +6668,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -6664,22 +6672,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -89636,7 +89775,7 @@ index ea85b0d..e0b6326 100644
const char *procname, void *data, int maxlen,
mode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -6699,7 +6706,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -6699,7 +6710,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -89645,7 +89784,7 @@ index ea85b0d..e0b6326 100644
if (table == NULL)
return NULL;
-@@ -6734,9 +6741,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -6734,9 +6745,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -89657,7 +89796,7 @@ index ea85b0d..e0b6326 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -6763,11 +6770,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -6763,11 +6774,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -89672,7 +89811,7 @@ index ea85b0d..e0b6326 100644
if (entry == NULL)
return;
-@@ -6790,8 +6799,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -6790,8 +6803,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -89687,7 +89826,7 @@ index ea85b0d..e0b6326 100644
}
#else
static void register_sched_domain_sysctl(void)
-@@ -6889,7 +6902,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+@@ -6889,7 +6906,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
@@ -96193,6 +96332,18 @@ index 6182c8a..7d532cf 100644
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index cf332bc..add7e3a 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -33,6 +33,7 @@ void use_mm(struct mm_struct *mm)
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
++ populate_stack();
+ task_unlock(tsk);
+
+ if (active_mm != mm)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5a688a2..fffb9f6 100644
--- a/mm/mprotect.c
@@ -98142,7 +98293,7 @@ index 136ac4f..f917fa9 100644
mm->unmap_area = arch_unmap_area;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index eeba3bb..a22618a 100644
+index eeba3bb..5fc3323 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -98288,7 +98439,26 @@ index eeba3bb..a22618a 100644
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
start, end, node, gfp_mask, caller);
if (!area)
-@@ -1801,10 +1859,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1694,6 +1752,18 @@ static inline void *__vmalloc_node_flags(unsigned long size,
+ node, __builtin_return_address(0));
+ }
+
++void *vmalloc_stack(int node)
++{
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ gfp_t mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
++#else
++ gfp_t mask = GFP_KERNEL | __GFP_NOTRACK;
++#endif
++
++ return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, mask, PAGE_KERNEL,
++ node, __builtin_return_address(0));
++}
++
+ /**
+ * vmalloc - allocate virtually contiguous memory
+ * @size: allocation size
+@@ -1801,10 +1871,9 @@ EXPORT_SYMBOL(vzalloc_node);
* For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
@@ -98300,7 +98470,7 @@ index eeba3bb..a22618a 100644
-1, __builtin_return_address(0));
}
-@@ -2099,6 +2156,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+@@ -2099,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
@@ -98309,7 +98479,7 @@ index eeba3bb..a22618a 100644
if ((PAGE_SIZE-1) & (unsigned long)addr)
return -EINVAL;
-@@ -2351,8 +2410,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+@@ -2351,8 +2422,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
@@ -98320,7 +98490,7 @@ index eeba3bb..a22618a 100644
if (!vas || !vms)
goto err_free;
-@@ -2536,11 +2595,15 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2536,11 +2607,15 @@ static int s_show(struct seq_file *m, void *p)
{
struct vm_struct *v = p;
@@ -110194,19 +110364,17 @@ index 0a7ca6c..f4b948c 100644
};
diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
new file mode 100644
-index 0000000..1f0214f
+index 0000000..de92ed9
--- /dev/null
+++ b/tools/gcc/.gitignore
-@@ -0,0 +1,3 @@
+@@ -0,0 +1 @@
+randomize_layout_seed.h
-+size_overflow_hash.h
-+size_overflow_hash_aux.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..d25d472
+index 0000000..7b8921f
--- /dev/null
+++ b/tools/gcc/Makefile
-@@ -0,0 +1,60 @@
+@@ -0,0 +1,52 @@
+#CC := gcc
+#PLUGIN_SOURCE_FILES := pax_plugin.c
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
@@ -110215,23 +110383,29 @@ index 0000000..d25d472
+
+ifeq ($(PLUGINCC),$(HOSTCC))
+HOSTLIBS := hostlibs
-+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb
++export HOST_EXTRACFLAGS
+else
+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing -Wno-unused-variable
++export HOST_EXTRACXXFLAGS
+endif
+
++export GCCPLUGINS_DIR HOSTLIBS
++
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
+$(HOSTLIBS)-y += colorize_plugin.so
-+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
+$(HOSTLIBS)-$(CONFIG_GRKERNSEC_RANDSTRUCT) += randomize_layout_plugin.so
+
++subdir-$(CONFIG_PAX_SIZE_OVERFLOW) := size_overflow_plugin
++subdir- += size_overflow_plugin
++
+always := $($(HOSTLIBS)-y)
+
+constify_plugin-objs := constify_plugin.o
@@ -110240,33 +110414,19 @@ index 0000000..d25d472
+kernexec_plugin-objs := kernexec_plugin.o
+checker_plugin-objs := checker_plugin.o
+colorize_plugin-objs := colorize_plugin.o
-+size_overflow_plugin-objs := size_overflow_plugin.o
+latent_entropy_plugin-objs := latent_entropy_plugin.o
+structleak_plugin-objs := structleak_plugin.o
+randomize_layout_plugin-objs := randomize_layout_plugin.o
+
-+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
+$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
+
-+quiet_cmd_build_size_overflow_hash = GENHASH $@
-+ cmd_build_size_overflow_hash = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
-+ $(call if_changed,build_size_overflow_hash)
-+
-+quiet_cmd_build_size_overflow_hash_aux = GENHASH $@
-+ cmd_build_size_overflow_hash_aux = \
-+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@
-+$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
-+ $(call if_changed,build_size_overflow_hash_aux)
-+
+quiet_cmd_create_randomize_layout_seed = GENSEED $@
+ cmd_create_randomize_layout_seed = \
+ $(CONFIG_SHELL) $(srctree)/$(src)/gen-random-seed.sh $@ $(objtree)/include/generated/randomize_layout_hash.h
+$(objtree)/$(obj)/randomize_layout_seed.h: FORCE
+ $(call if_changed,create_randomize_layout_seed)
+
-+targets += size_overflow_hash.h size_overflow_hash_aux.h randomize_layout_seed.h randomize_layout_hash.h
++targets += randomize_layout_seed.h randomize_layout_hash.h
diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
new file mode 100644
index 0000000..5452feea
@@ -111204,10 +111364,10 @@ index 0000000..82bc5a8
+}
diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
new file mode 100644
-index 0000000..8af3693
+index 0000000..e90c205
--- /dev/null
+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,287 @@
+@@ -0,0 +1,295 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
@@ -111278,7 +111438,6 @@ index 0000000..8af3693
+#include "tree-pass.h"
+//#include "df.h"
+#include "predict.h"
-+//#include "lto-streamer.h"
+#include "ipa-utils.h"
+
+#if BUILDING_GCC_VERSION >= 4009
@@ -111293,6 +111452,7 @@ index 0000000..8af3693
+#include "tree-ssanames.h"
+#include "print-tree.h"
+#include "tree-eh.h"
++#include "stmt.h"
+#endif
+
+#include "gimple.h"
@@ -111306,6 +111466,10 @@ index 0000000..8af3693
+#include "ssa-iterators.h"
+#endif
+
++//#include "lto/lto.h"
++//#include "data-streamer.h"
++//#include "lto-compress.h"
++
+//#include "expr.h" where are you...
+extern rtx emit_move_insn(rtx x, rtx y);
+
@@ -111317,6 +111481,8 @@ index 0000000..8af3693
+
+#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
+#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
++#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node))
++#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node))
+
+#if BUILDING_GCC_VERSION == 4005
+#define FOR_EACH_LOCAL_DECL(FUN, I, D) for (tree vars = (FUN)->local_decls; vars && (D = TREE_VALUE(vars)); vars = TREE_CHAIN(vars), I)
@@ -111455,6 +111621,8 @@ index 0000000..8af3693
+#if BUILDING_GCC_VERSION >= 4007
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+ cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
++#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
++ cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
@@ -111509,109 +111677,6 @@ index 0000000..7514850
+ HASH=`echo -n "$SEED" | sha256sum | cut -d" " -f1 | tr -d ' \n'`
+ echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2"
+fi
-diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
-new file mode 100644
-index 0000000..791ca76
---- /dev/null
-+++ b/tools/gcc/generate_size_overflow_hash.sh
-@@ -0,0 +1,97 @@
-+#!/bin/bash
-+
-+# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
-+
-+header1="size_overflow_hash.h"
-+database="size_overflow_hash.data"
-+n=65536
-+hashtable_name="size_overflow_hash"
-+
-+usage() {
-+cat <<EOF
-+usage: $0 options
-+OPTIONS:
-+ -h|--help help
-+ -o header file
-+ -d database file
-+ -n hash array size
-+ -s name of the hash table
-+EOF
-+ return 0
-+}
-+
-+while true
-+do
-+ case "$1" in
-+ -h|--help) usage && exit 0;;
-+ -n) n=$2; shift 2;;
-+ -o) header1="$2"; shift 2;;
-+ -d) database="$2"; shift 2;;
-+ -s) hashtable_name="$2"; shift 2;;
-+ --) shift 1; break ;;
-+ *) break ;;
-+ esac
-+done
-+
-+create_defines() {
-+ for i in `seq 0 31`
-+ do
-+ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
-+ done
-+ echo >> "$header1"
-+}
-+
-+create_structs() {
-+ rm -f "$header1"
-+
-+ create_defines
-+
-+ cat "$database" | while read data
-+ do
-+ data_array=($data)
-+ struct_hash_name="${data_array[0]}"
-+ funcn="${data_array[1]}"
-+ params="${data_array[2]}"
-+ next="${data_array[4]}"
-+
-+ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
-+
-+ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
-+ echo -en "\t.param\t= " >> "$header1"
-+ line=
-+ for param_num in ${params//-/ };
-+ do
-+ line="${line}PARAM"$param_num"|"
-+ done
-+
-+ echo -e "${line%?},\n};\n" >> "$header1"
-+ done
-+}
-+
-+create_headers() {
-+ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
-+}
-+
-+create_array_elements() {
-+ index=0
-+ grep -v "nohasharray" $database | sort -n -k 4 | while read data
-+ do
-+ data_array=($data)
-+ i="${data_array[3]}"
-+ hash="${data_array[0]}"
-+ while [[ $index -lt $i ]]
-+ do
-+ echo -e "\t["$index"]\t= NULL," >> "$header1"
-+ index=$(($index + 1))
-+ done
-+ index=$(($index + 1))
-+ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
-+ done
-+ echo '};' >> $header1
-+}
-+
-+create_structs
-+create_headers
-+create_array_elements
-+
-+exit 0
diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
new file mode 100644
index 0000000..d81c094
@@ -113713,12 +113778,4344 @@ index 0000000..8dafb22
+
+ return 0;
+}
-diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
+diff --git a/tools/gcc/size_overflow_plugin/.gitignore b/tools/gcc/size_overflow_plugin/.gitignore
+new file mode 100644
+index 0000000..92d3b0c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/.gitignore
+@@ -0,0 +1,2 @@
++size_overflow_hash.h
++size_overflow_hash_aux.h
+diff --git a/tools/gcc/size_overflow_plugin/Makefile b/tools/gcc/size_overflow_plugin/Makefile
+new file mode 100644
+index 0000000..1ae2ed5
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/Makefile
+@@ -0,0 +1,20 @@
++$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
++always := $($(HOSTLIBS)-y)
++
++size_overflow_plugin-objs := $(patsubst $(srctree)/$(src)/%.c,%.o,$(wildcard $(srctree)/$(src)/*.c))
++
++$(patsubst $(srctree)/$(src)/%.c,$(obj)/%.o,$(wildcard $(srctree)/$(src)/*.c)): $(objtree)/$(obj)/size_overflow_hash.h $(objtree)/$(obj)/size_overflow_hash_aux.h
++
++quiet_cmd_build_size_overflow_hash = GENHASH $@
++ cmd_build_size_overflow_hash = \
++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
++ $(call if_changed,build_size_overflow_hash)
++
++quiet_cmd_build_size_overflow_hash_aux = GENHASH $@
++ cmd_build_size_overflow_hash_aux = \
++ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -s size_overflow_hash_aux -d $< -o $@
++$(objtree)/$(obj)/size_overflow_hash_aux.h: $(src)/size_overflow_hash_aux.data FORCE
++ $(call if_changed,build_size_overflow_hash_aux)
++
++targets += size_overflow_hash.h size_overflow_hash_aux.h
+diff --git a/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
+new file mode 100644
+index 0000000..12b1e3b
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/generate_size_overflow_hash.sh
+@@ -0,0 +1,102 @@
++#!/bin/bash
++
++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
++
++header1="size_overflow_hash.h"
++database="size_overflow_hash.data"
++n=65536
++hashtable_name="size_overflow_hash"
++
++usage() {
++cat <<EOF
++usage: $0 options
++OPTIONS:
++ -h|--help help
++ -o header file
++ -d database file
++ -n hash array size
++ -s name of the hash table
++EOF
++ return 0
++}
++
++while true
++do
++ case "$1" in
++ -h|--help) usage && exit 0;;
++ -n) n=$2; shift 2;;
++ -o) header1="$2"; shift 2;;
++ -d) database="$2"; shift 2;;
++ -s) hashtable_name="$2"; shift 2;;
++ --) shift 1; break ;;
++ *) break ;;
++ esac
++done
++
++create_defines() {
++ for i in `seq 0 31`
++ do
++ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
++ done
++ echo >> "$header1"
++}
++
++create_structs() {
++ rm -f "$header1"
++
++ create_defines
++
++ cat "$database" | while read data
++ do
++ data_array=($data)
++ struct_hash_name="${data_array[0]}"
++ funcn="${data_array[1]}"
++ params="${data_array[2]}"
++ next="${data_array[4]}"
++
++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
++
++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
++ echo -en "\t.param\t= " >> "$header1"
++ line=
++ for param_num in ${params//-/ };
++ do
++ line="${line}PARAM"$param_num"|"
++ done
++
++ echo -e "${line%?},\n};\n" >> "$header1"
++ done
++}
++
++create_headers() {
++ echo "const struct size_overflow_hash * const $hashtable_name[$n] = {" >> "$header1"
++}
++
++create_array_elements() {
++ index=0
++ grep -v "nohasharray" $database | sort -n -k 4 | while read data
++ do
++ data_array=($data)
++ i="${data_array[3]}"
++ hash="${data_array[0]}"
++ while [[ $index -lt $i ]]
++ do
++ echo -e "\t["$index"]\t= NULL," >> "$header1"
++ index=$(($index + 1))
++ done
++ index=$(($index + 1))
++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
++ done
++ echo '};' >> $header1
++}
++
++size_overflow_plugin_dir=`dirname $header1`
++if [ "$size_overflow_plugin_dir" != '.' ]; then
++ mkdir -p "$size_overflow_plugin_dir" 2> /dev/null
++fi
++
++create_structs
++create_headers
++create_array_elements
++
++exit 0
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
+new file mode 100644
+index 0000000..3e8148c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
+@@ -0,0 +1,790 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
++
++// data for the size_overflow asm stmt
++struct asm_data {
++ gimple def_stmt;
++ tree input;
++ tree output;
++};
++
++#if BUILDING_GCC_VERSION <= 4007
++static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
++#else
++static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
++#endif
++{
++ tree list;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *vec_list = NULL;
++#else
++ vec<tree, va_gc> *vec_list = NULL;
++#endif
++
++ list = build_tree_list(NULL_TREE, string);
++ list = chainon(NULL_TREE, build_tree_list(list, io));
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, gc, vec_list, list);
++#else
++ vec_safe_push(vec_list, list);
++#endif
++ return vec_list;
++}
++
++static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
++{
++ gimple asm_stmt;
++ gimple_stmt_iterator gsi;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *input, *output = NULL;
++#else
++ vec<tree, va_gc> *input, *output = NULL;
++#endif
++
++ input = create_asm_io_list(str_input, asm_data->input);
++
++ if (asm_data->output)
++ output = create_asm_io_list(str_output, asm_data->output);
++
++ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
++ gsi = gsi_for_stmt(asm_data->def_stmt);
++ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
++
++ if (asm_data->output)
++ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
++}
++
++static void replace_call_lhs(const struct asm_data *asm_data)
++{
++ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
++ update_stmt(asm_data->def_stmt);
++ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
++}
++
++static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
++{
++ enum mark cur_fndecl_attr;
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ cur_fndecl_attr = search_intentional(visited, arg);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ }
++ return MARK_NO;
++}
++
++static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ enum mark cur_fndecl_attr;
++ const_tree rhs1, rhs2;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ cur_fndecl_attr = search_intentional(visited, rhs1);
++ if (cur_fndecl_attr != MARK_NO)
++ return cur_fndecl_attr;
++ return search_intentional(visited, rhs2);
++}
++
++// Look up the intentional_overflow attribute on the caller and the callee functions.
++static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME)
++ return get_intentional_attr_type(lhs);
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return MARK_NO;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return MARK_NO;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_intentional(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ return MARK_TURN_OFF;
++ return MARK_NO;
++ case GIMPLE_CALL:
++ return MARK_NO;
++ case GIMPLE_PHI:
++ return search_intentional_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_intentional_binary(visited, lhs);
++ }
++ case GIMPLE_RETURN:
++ return MARK_NO;
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
++static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++ struct pointer_set_t *visited;
++ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
++
++ fndecl = get_interesting_orig_fndecl(stmt, argnum);
++ if (is_end_intentional_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, argnum))
++ decl_attr = MARK_YES;
++ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ return MARK_TURN_OFF;
++ }
++
++ visited = pointer_set_create();
++ cur_fndecl_attr = search_intentional(visited, arg);
++ pointer_set_destroy(visited);
++
++ switch (cur_fndecl_attr) {
++ case MARK_NO:
++ case MARK_TURN_OFF:
++ return cur_fndecl_attr;
++ default:
++ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
++ return MARK_YES;
++ }
++}
++
++static void check_missing_size_overflow_attribute(tree var)
++{
++ tree orig_fndecl;
++ unsigned int num;
++
++ if (is_a_return_check(var))
++ orig_fndecl = DECL_ORIGIN(var);
++ else
++ orig_fndecl = DECL_ORIGIN(current_function_decl);
++
++ num = get_function_num(var, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return;
++
++ is_missing_function(orig_fndecl, num);
++}
++
++static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ search_size_overflow_attribute(visited, arg);
++ }
++}
++
++static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
++{
++ const_gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ search_size_overflow_attribute(visited, rhs1);
++ search_size_overflow_attribute(visited, rhs2);
++}
++
++static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) == PARM_DECL) {
++ check_missing_size_overflow_attribute(lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ check_missing_size_overflow_attribute(fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return search_size_overflow_attribute_phi(visited, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return search_size_overflow_attribute_binary(visited, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Search missing entries in the hash table (invoked from the gimple pass)
++static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
++{
++ tree fndecl = NULL_TREE;
++ tree lhs;
++ struct pointer_set_t *visited;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
++ return;
++
++ if (num == 0) {
++ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
++ lhs = gimple_return_retval(stmt);
++ } else {
++ gcc_assert(is_gimple_call(stmt));
++ lhs = gimple_call_arg(stmt, num - 1);
++ fndecl = gimple_call_fndecl(stmt);
++ }
++
++ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
++ return;
++
++ visited = pointer_set_create();
++ search_size_overflow_attribute(visited, lhs);
++ pointer_set_destroy(visited);
++}
++
++static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign;
++
++ assign = gimple_build_assign(asm_data->input, asm_data->output);
++ gsi = gsi_for_stmt(stmt);
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ asm_data->def_stmt = assign;
++
++ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->output = make_ssa_name(asm_data->output, stmt);
++ if (gimple_code(stmt) == GIMPLE_RETURN)
++ gimple_return_set_retval(stmt, asm_data->output);
++ else
++ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
++ update_stmt(stmt);
++}
++
++static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
++{
++ const char *fn_name;
++ char *asm_comment;
++ unsigned int len;
++
++ if (argnum == 0)
++ fn_name = DECL_NAME_POINTER(current_function_decl);
++ else
++ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
++
++ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
++ gcc_assert(len > 0);
++
++ return asm_comment;
++}
++
++static const char *convert_mark_to_str(enum mark mark)
++{
++ switch (mark) {
++ case MARK_NO:
++ return OK_ASM_STR;
++ case MARK_YES:
++ case MARK_NOT_INTENTIONAL:
++ return YES_ASM_STR;
++ case MARK_TURN_OFF:
++ return TURN_OFF_ASM_STR;
++ }
++
++ gcc_unreachable();
++}
++
++/* Create the input of the size_overflow asm stmt.
++ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
++ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
++ */
++static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
++{
++ if (!asm_data->def_stmt) {
++ asm_data->input = NULL_TREE;
++ return;
++ }
++
++ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
++ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
++
++ switch (gimple_code(asm_data->def_stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ replace_call_lhs(asm_data);
++ break;
++ case GIMPLE_PHI:
++ create_output_from_phi(stmt, argnum, asm_data);
++ break;
++ case GIMPLE_NOP: {
++ enum mark mark;
++ const char *mark_str;
++ char *asm_comment;
++
++ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
++
++ asm_data->input = asm_data->output;
++ asm_data->output = NULL;
++ asm_data->def_stmt = stmt;
++
++ mark_str = convert_mark_to_str(mark);
++ asm_comment = create_asm_comment(argnum, stmt, mark_str);
++
++ create_asm_stmt(asm_comment, build_string(3, "rm"), NULL, asm_data);
++ free(asm_comment);
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ case GIMPLE_ASM:
++ if (is_size_overflow_asm(asm_data->def_stmt)) {
++ asm_data->input = NULL_TREE;
++ break;
++ }
++ default:
++ debug_gimple_stmt(asm_data->def_stmt);
++ gcc_unreachable();
++ }
++}
++
++/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
++ * is of the right kind create the appropriate size_overflow asm stmts:
++ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
++ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
++ */
++static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
++{
++ struct asm_data asm_data;
++ const char *mark_str;
++ char *asm_comment;
++ enum mark mark;
++
++ if (is_gimple_constant(output_node))
++ return;
++
++ asm_data.output = output_node;
++ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
++ if (mark != MARK_TURN_OFF)
++ search_missing_size_overflow_attribute_gimple(stmt, argnum);
++
++ asm_data.def_stmt = get_def_stmt(asm_data.output);
++ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
++ return;
++
++ create_asm_input(stmt, argnum, &asm_data);
++ if (asm_data.input == NULL_TREE)
++ return;
++
++ mark_str = convert_mark_to_str(mark);
++ asm_comment = create_asm_comment(argnum, stmt, mark_str);
++ create_asm_stmt(asm_comment, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
++ free(asm_comment);
++}
++
++// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
++static bool create_mark_asm(gimple stmt, enum mark mark)
++{
++ struct asm_data asm_data;
++ const char *asm_str;
++
++ switch (mark) {
++ case MARK_TURN_OFF:
++ asm_str = TURN_OFF_ASM_STR;
++ break;
++ case MARK_NOT_INTENTIONAL:
++ case MARK_YES:
++ asm_str = YES_ASM_STR;
++ break;
++ default:
++ gcc_unreachable();
++ }
++
++ asm_data.def_stmt = stmt;
++ asm_data.output = gimple_call_lhs(stmt);
++
++ if (asm_data.output == NULL_TREE) {
++ asm_data.input = gimple_call_arg(stmt, 0);
++ if (is_gimple_constant(asm_data.input))
++ return false;
++ asm_data.output = NULL;
++ create_asm_stmt(asm_str, build_string(3, "rm"), NULL, &asm_data);
++ return true;
++ }
++
++ create_asm_input(stmt, 0, &asm_data);
++ gcc_assert(asm_data.input != NULL_TREE);
++
++ create_asm_stmt(asm_str, build_string(2, "0"), build_string(4, "=rm"), &asm_data);
++ return true;
++}
++
++static bool is_from_cast(const_tree node)
++{
++ gimple def_stmt = get_def_stmt(node);
++
++ if (!def_stmt)
++ return false;
++
++ if (gimple_assign_cast_p(def_stmt))
++ return true;
++
++ return false;
++}
++
++// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
++static bool skip_ptr_minus(gimple stmt)
++{
++ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (!is_from_cast(rhs1))
++ return false;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (!is_from_cast(rhs2))
++ return false;
++
++ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
++ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
++
++ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
++ return false;
++
++ create_mark_asm(stmt, MARK_YES);
++ return true;
++}
++
++static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
++{
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ case GIMPLE_CALL:
++ break;
++ case GIMPLE_PHI: {
++ unsigned int i, n = gimple_phi_num_args(def_stmt);
++
++ pointer_set_insert(visited, def_stmt);
++
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(def_stmt, i);
++
++ walk_use_def_ptr(visited, arg);
++ }
++ }
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ return;
++ case 3:
++ if (skip_ptr_minus(def_stmt))
++ return;
++
++ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
++ return;
++ default:
++ return;
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
++static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
++{
++ struct pointer_set_t *visited;
++
++ visited = pointer_set_create();
++ walk_use_def_ptr(visited, arg);
++ pointer_set_destroy(visited);
++}
++
++// Determine the return value and insert the asm stmt to mark the return stmt.
++static void insert_asm_ret(gimple stmt)
++{
++ tree ret;
++
++ ret = gimple_return_retval(stmt);
++ create_size_overflow_asm(stmt, ret, 0);
++}
++
++// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
++static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
++{
++ tree arg;
++ unsigned int argnum;
++
++ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
++ gcc_assert(argnum != 0);
++ if (argnum == CANNOT_FIND_ARG)
++ return;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ gcc_assert(arg != NULL_TREE);
++
++ // skip all ptr - ptr expressions
++ insert_mark_not_intentional_asm_at_ptr(arg);
++
++ create_size_overflow_asm(stmt, arg, argnum);
++}
++
++// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
++static void set_argnum_attribute(const_tree attr, bool *argnums)
++{
++ unsigned int argnum;
++ tree attr_value;
++
++ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
++ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
++ argnums[argnum] = true;
++ }
++}
++
++// If a function arg or the return value is in the hash table then set its index in the array.
++static void set_argnum_hash(tree fndecl, bool *argnums)
++{
++ unsigned int num;
++ const struct size_overflow_hash *hash;
++
++ hash = get_function_hash(DECL_ORIGIN(fndecl));
++ if (!hash)
++ return;
++
++ for (num = 0; num <= MAX_PARAM; num++) {
++ if (!(hash->param & (1U << num)))
++ continue;
++
++ argnums[num] = true;
++ }
++}
++
++static bool is_all_the_argnums_empty(bool *argnums)
++{
++ unsigned int i;
++
++ for (i = 0; i <= MAX_PARAM; i++)
++ if (argnums[i])
++ return false;
++ return true;
++}
++
++// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
++static void search_interesting_args(tree fndecl, bool *argnums)
++{
++ const_tree attr;
++
++ set_argnum_hash(fndecl, argnums);
++ if (!is_all_the_argnums_empty(argnums))
++ return;
++
++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
++ if (attr && TREE_VALUE(attr))
++ set_argnum_attribute(attr, argnums);
++}
++
++/*
++ * Look up the intentional_overflow attribute that turns off ipa based duplication
++ * on the callee function.
++ */
++static bool is_mark_turn_off_attribute(gimple stmt)
++{
++ enum mark mark;
++ const_tree fndecl = gimple_call_fndecl(stmt);
++
++ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
++ if (mark == MARK_TURN_OFF)
++ return true;
++ return false;
++}
++
++// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
++static void handle_interesting_function(gimple stmt)
++{
++ unsigned int argnum;
++ tree fndecl;
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ if (gimple_call_num_args(stmt) == 0)
++ return;
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return;
++ fndecl = DECL_ORIGIN(fndecl);
++
++ if (is_mark_turn_off_attribute(stmt)) {
++ create_mark_asm(stmt, MARK_TURN_OFF);
++ return;
++ }
++
++ search_interesting_args(fndecl, orig_argnums);
++
++ for (argnum = 1; argnum < MAX_PARAM; argnum++)
++ if (orig_argnums[argnum])
++ insert_asm_arg(stmt, argnum);
++}
++
++// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
++static void handle_interesting_ret(gimple stmt)
++{
++ bool orig_argnums[MAX_PARAM + 1] = {false};
++
++ search_interesting_args(current_function_decl, orig_argnums);
++
++ if (orig_argnums[0])
++ insert_asm_ret(stmt);
++}
++
++// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
++static unsigned int search_interesting_functions(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ gimple stmt = gsi_stmt(gsi);
++
++ if (is_size_overflow_asm(stmt))
++ continue;
++
++ if (is_gimple_call(stmt))
++ handle_interesting_function(stmt);
++ else if (gimple_code(stmt) == GIMPLE_RETURN)
++ handle_interesting_ret(stmt);
++ }
++ }
++ return 0;
++}
++
++/*
++ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
++ * this pass inserts asm stmts to mark the interesting args
++ * that the ipa pass will detect and insert the size overflow checks for.
++ */
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data insert_size_overflow_asm_pass_data = {
++#else
++static struct gimple_opt_pass insert_size_overflow_asm_pass = {
++ .pass = {
++#endif
++ .type = GIMPLE_PASS,
++ .name = "insert_size_overflow_asm",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = search_interesting_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = PROP_cfg,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
++#if BUILDING_GCC_VERSION < 4009
++ }
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class insert_size_overflow_asm_pass : public gimple_opt_pass {
++public:
++ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
++ unsigned int execute() { return search_interesting_functions(); }
++};
++}
++#endif
++
++struct opt_pass *make_insert_size_overflow_asm_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new insert_size_overflow_asm_pass();
++#else
++ return &insert_size_overflow_asm_pass.pass;
++#endif
++}
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
+new file mode 100644
+index 0000000..88469e9
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
+@@ -0,0 +1,902 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++#define MIN_CHECK true
++#define MAX_CHECK false
++
++static tree get_size_overflow_type(struct visited *visited, const_gimple stmt, const_tree node)
++{
++ const_tree type;
++ tree new_type;
++
++ gcc_assert(node != NULL_TREE);
++
++ type = TREE_TYPE(node);
++
++ if (pointer_set_contains(visited->my_stmts, stmt))
++ return TREE_TYPE(node);
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ new_type = size_overflow_type_HI;
++ break;
++ case HImode:
++ new_type = size_overflow_type_SI;
++ break;
++ case SImode:
++ new_type = size_overflow_type_DI;
++ break;
++ case DImode:
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
++ else
++ new_type = size_overflow_type_TI;
++ break;
++ case TImode:
++ gcc_assert(!TYPE_UNSIGNED(type));
++ new_type = size_overflow_type_TI;
++ break;
++ default:
++ debug_tree((tree)node);
++ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
++ gcc_unreachable();
++ }
++
++ if (TYPE_QUALS(type) != 0)
++ return build_qualified_type(new_type, TYPE_QUALS(type));
++ return new_type;
++}
++
++static tree get_lhs(const_gimple stmt)
++{
++ switch (gimple_code(stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ return gimple_get_lhs(stmt);
++ case GIMPLE_PHI:
++ return gimple_phi_result(stmt);
++ default:
++ return NULL_TREE;
++ }
++}
++
++static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before)
++{
++ gimple_stmt_iterator gsi;
++ tree lhs;
++ gimple new_stmt;
++
++ if (rhs == NULL_TREE)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(stmt);
++ new_stmt = build_cast_stmt(visited, size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
++ pointer_set_insert(visited->my_stmts, new_stmt);
++
++ lhs = get_lhs(new_stmt);
++ gcc_assert(lhs != NULL_TREE);
++ return lhs;
++}
++
++tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before)
++{
++ tree lhs, dst_type;
++ gimple_stmt_iterator gsi;
++
++ if (rhs1 == NULL_TREE) {
++ debug_gimple_stmt(oldstmt);
++ error("%s: rhs1 is NULL_TREE", __func__);
++ gcc_unreachable();
++ }
++
++ switch (gimple_code(oldstmt)) {
++ case GIMPLE_ASM:
++ lhs = rhs1;
++ break;
++ case GIMPLE_CALL:
++ case GIMPLE_ASSIGN:
++ lhs = gimple_get_lhs(oldstmt);
++ break;
++ default:
++ debug_gimple_stmt(oldstmt);
++ gcc_unreachable();
++ }
++
++ gsi = gsi_for_stmt(oldstmt);
++ pointer_set_insert(visited->stmts, oldstmt);
++ if (lookup_stmt_eh_lp(oldstmt) != 0) {
++ basic_block next_bb, cur_bb;
++ const_edge e;
++
++ gcc_assert(before == false);
++ gcc_assert(stmt_can_throw_internal(oldstmt));
++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
++ gcc_assert(!gsi_end_p(gsi));
++
++ cur_bb = gimple_bb(oldstmt);
++ next_bb = cur_bb->next_bb;
++ e = find_edge(cur_bb, next_bb);
++ gcc_assert(e != NULL);
++ gcc_assert(e->flags & EDGE_FALLTHRU);
++
++ gsi = gsi_after_labels(next_bb);
++ gcc_assert(!gsi_end_p(gsi));
++
++ before = true;
++ oldstmt = gsi_stmt(gsi);
++ }
++
++ dst_type = get_size_overflow_type(visited, oldstmt, lhs);
++
++ if (is_gimple_constant(rhs1))
++ return cast_a_tree(dst_type, rhs1);
++ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before);
++}
++
++tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++{
++ gimple stmt;
++ gimple_stmt_iterator gsi;
++ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
++
++ if (pointer_set_contains(visited->my_stmts, oldstmt))
++ return lhs;
++
++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
++ rhs1 = gimple_assign_rhs1(oldstmt);
++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
++ }
++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
++ rhs2 = gimple_assign_rhs2(oldstmt);
++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
++ }
++
++ stmt = gimple_copy(oldstmt);
++ gimple_set_location(stmt, gimple_location(oldstmt));
++ pointer_set_insert(visited->my_stmts, stmt);
++
++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
++ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
++
++ size_overflow_type = get_size_overflow_type(visited, oldstmt, node);
++
++ new_var = create_new_var(size_overflow_type);
++ new_var = make_ssa_name(new_var, stmt);
++ gimple_assign_set_lhs(stmt, new_var);
++
++ if (rhs1 != NULL_TREE)
++ gimple_assign_set_rhs1(stmt, rhs1);
++
++ if (rhs2 != NULL_TREE)
++ gimple_assign_set_rhs2(stmt, rhs2);
++#if BUILDING_GCC_VERSION >= 4006
++ if (rhs3 != NULL_TREE)
++ gimple_assign_set_rhs3(stmt, rhs3);
++#endif
++ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
++ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
++ update_stmt(stmt);
++ pointer_set_insert(visited->stmts, oldstmt);
++ return gimple_assign_lhs(stmt);
++}
++
++static tree cast_parm_decl(struct visited *visited, tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++ basic_block first_bb;
++
++ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
++
++ if (bb->index == 0) {
++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
++ bb = first_bb;
++ }
++
++ gsi = gsi_after_labels(bb);
++ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++
++ return gimple_assign_lhs(assign);
++}
++
++static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg)
++{
++ gimple_stmt_iterator gsi;
++ gimple assign, def_stmt = get_def_stmt(new_arg);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI) {
++ gsi = gsi_after_labels(gimple_bb(def_stmt));
++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
++ } else {
++ gsi = gsi_for_stmt(def_stmt);
++ assign = build_cast_stmt(visited, TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
++ }
++
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type)
++{
++ basic_block bb;
++ gimple_stmt_iterator gsi;
++ const_gimple def_stmt;
++ gimple assign;
++
++ def_stmt = get_def_stmt(arg);
++ bb = gimple_bb(def_stmt);
++ gcc_assert(bb->index != 0);
++ gsi = gsi_after_labels(bb);
++
++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
++{
++ tree size_overflow_type;
++ tree arg;
++ const_gimple def_stmt;
++
++ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
++ return new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ def_stmt = get_def_stmt(arg);
++ gcc_assert(def_stmt != NULL);
++ size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return cast_visited_phi_arg(visited, ssa_name_var, arg, size_overflow_type);
++ case GIMPLE_NOP: {
++ basic_block bb;
++
++ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb);
++ }
++ case GIMPLE_ASM: {
++ gimple_stmt_iterator gsi;
++ gimple assign, stmt = get_def_stmt(arg);
++
++ gsi = gsi_for_stmt(stmt);
++ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++ }
++ default:
++ gcc_assert(new_arg != NULL_TREE);
++ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
++ return use_phi_ssa_name(visited, ssa_name_var, new_arg);
++ }
++}
++
++static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result)
++{
++ basic_block bb;
++ gimple phi;
++ gimple_seq seq;
++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
++
++ bb = gsi_bb(gsi);
++
++ if (result == NULL_TREE) {
++ tree old_result = gimple_phi_result(oldstmt);
++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, old_result);
++
++ result = create_new_var(size_overflow_type);
++ }
++
++ phi = create_phi_node(result, bb);
++ gimple_phi_set_result(phi, make_ssa_name(result, phi));
++ seq = phi_nodes(bb);
++ gsi = gsi_last(seq);
++ gsi_remove(&gsi, false);
++
++ gsi = gsi_for_stmt(oldstmt);
++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
++ gimple_set_bb(phi, bb);
++ return phi;
++}
++
++#if BUILDING_GCC_VERSION <= 4007
++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
++#else
++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
++#endif
++{
++ gimple new_phi;
++ unsigned int i;
++ tree arg, result;
++ location_t loc = gimple_location(oldstmt);
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(!VEC_empty(tree, *args));
++#else
++ gcc_assert(!args->is_empty());
++#endif
++
++ new_phi = overflow_create_phi_node(visited, oldstmt, ssa_name_var);
++ result = gimple_phi_result(new_phi);
++ ssa_name_var = SSA_NAME_VAR(result);
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, *args, i, arg) {
++#else
++ FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
++#endif
++ arg = create_new_phi_arg(visited, ssa_name_var, arg, oldstmt, i);
++ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, heap, *args);
++#else
++ vec_free(args);
++#endif
++ update_stmt(new_phi);
++ pointer_set_insert(visited->my_stmts, new_phi);
++ return result;
++}
++
++static tree handle_phi(struct visited *visited, struct cgraph_node *caller_node, tree orig_result)
++{
++ tree ssa_name_var = NULL_TREE;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, heap) *args = NULL;
++#else
++ vec<tree, va_heap, vl_embed> *args = NULL;
++#endif
++ gimple oldstmt = get_def_stmt(orig_result);
++ unsigned int i, len = gimple_phi_num_args(oldstmt);
++
++ pointer_set_insert(visited->stmts, oldstmt);
++ for (i = 0; i < len; i++) {
++ tree arg, new_arg;
++
++ arg = gimple_phi_arg_def(oldstmt, i);
++ new_arg = expand(visited, caller_node, arg);
++
++ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
++ ssa_name_var = SSA_NAME_VAR(new_arg);
++
++ if (is_gimple_constant(arg)) {
++ tree size_overflow_type = get_size_overflow_type(visited, oldstmt, arg);
++
++ new_arg = cast_a_tree(size_overflow_type, arg);
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_safe_push(tree, heap, args, new_arg);
++#else
++ vec_safe_push(args, new_arg);
++#endif
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ return create_new_phi_node(visited, &args, ssa_name_var, oldstmt);
++#else
++ return create_new_phi_node(visited, args, ssa_name_var, oldstmt);
++#endif
++}
++
++static tree create_cast_assign(struct visited *visited, gimple stmt)
++{
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++ const_tree rhs1_type = TREE_TYPE(rhs1);
++ const_tree lhs_type = TREE_TYPE(lhs);
++
++ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ return create_assign(visited, stmt, rhs1, AFTER_STMT);
++}
++
++static bool skip_lhs_cast_check(const_gimple stmt)
++{
++ const_tree rhs = gimple_assign_rhs1(stmt);
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
++ if (gimple_code(def_stmt) == GIMPLE_ASM)
++ return true;
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return true;
++
++ return false;
++}
++
++static tree create_string_param(tree string)
++{
++ tree i_type, a_type;
++ const int length = TREE_STRING_LENGTH(string);
++
++ gcc_assert(length > 0);
++
++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
++ a_type = build_array_type(char_type_node, i_type);
++
++ TREE_TYPE(string) = a_type;
++ TREE_CONSTANT(string) = 1;
++ TREE_READONLY(string) = 1;
++
++ return build1(ADDR_EXPR, ptr_type_node, string);
++}
++
++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
++{
++ gimple cond_stmt;
++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
++
++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
++ update_stmt(cond_stmt);
++}
++
++static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
++{
++ gimple func_stmt;
++ const_gimple def_stmt;
++ const_tree loc_line;
++ tree loc_file, ssa_name, current_func;
++ expanded_location xloc;
++ char *ssa_name_buf;
++ int len;
++ struct cgraph_edge *edge;
++ struct cgraph_node *callee_node;
++ int frequency;
++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
++
++ def_stmt = get_def_stmt(arg);
++ xloc = expand_location(gimple_location(def_stmt));
++
++ if (!gimple_has_location(def_stmt)) {
++ xloc = expand_location(gimple_location(stmt));
++ if (!gimple_has_location(stmt))
++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
++ }
++
++ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
++
++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
++ loc_file = create_string_param(loc_file);
++
++ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
++ current_func = create_string_param(current_func);
++
++ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
++ call_count++;
++ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
++ gcc_assert(len > 0);
++ ssa_name = build_string(len + 1, ssa_name_buf);
++ free(ssa_name_buf);
++ ssa_name = create_string_param(ssa_name);
++
++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
++
++ callee_node = cgraph_get_create_node(report_size_overflow_decl);
++ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
++
++ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
++ gcc_assert(edge != NULL);
++}
++
++static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++{
++ basic_block cond_bb, join_bb, bb_true;
++ edge e;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ cond_bb = gimple_bb(stmt);
++ if (before)
++ gsi_prev(&gsi);
++ if (gsi_end_p(gsi))
++ e = split_block_after_labels(cond_bb);
++ else
++ e = split_block(cond_bb, gsi_stmt(gsi));
++ cond_bb = e->src;
++ join_bb = e->dest;
++ e->flags = EDGE_FALSE_VALUE;
++ e->probability = REG_BR_PROB_BASE;
++
++ bb_true = create_empty_bb(cond_bb);
++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
++ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
++
++ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
++
++ if (current_loops != NULL) {
++ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
++ add_bb_to_loop(bb_true, cond_bb->loop_father);
++ }
++
++ insert_cond(cond_bb, arg, cond_code, type_value);
++ insert_cond_result(caller_node, bb_true, stmt, arg, min);
++
++// print_the_code_insertions(stmt);
++}
++
++void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
++{
++ const_tree rhs_type = TREE_TYPE(rhs);
++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
++
++ gcc_assert(rhs_type != NULL_TREE);
++ if (TREE_CODE(rhs_type) == POINTER_TYPE)
++ return;
++
++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
++
++ if (is_const_plus_unsigned_signed_truncation(rhs))
++ return;
++
++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
++ // typemax (-1) < typemin (0)
++ if (TREE_OVERFLOW(type_max))
++ return;
++
++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++
++ cast_rhs_type = TREE_TYPE(cast_rhs);
++ type_max_type = TREE_TYPE(type_max);
++ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
++
++ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
++
++ // special case: get_size_overflow_type(), 32, u64->s
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
++ return;
++
++ type_min_type = TREE_TYPE(type_min);
++ gcc_assert(types_compatible_p(type_max_type, type_min_type));
++ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
++}
++
++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
++{
++ bool cast_lhs, cast_rhs;
++ tree lhs = gimple_assign_lhs(stmt);
++ tree rhs = gimple_assign_rhs1(stmt);
++ const_tree lhs_type = TREE_TYPE(lhs);
++ const_tree rhs_type = TREE_TYPE(rhs);
++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
++ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
++ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
++
++ static bool check_lhs[3][4] = {
++ // ss su us uu
++ { false, true, true, false }, // lhs > rhs
++ { false, false, false, false }, // lhs = rhs
++ { true, true, true, true }, // lhs < rhs
++ };
++
++ static bool check_rhs[3][4] = {
++ // ss su us uu
++ { true, false, true, true }, // lhs > rhs
++ { true, false, true, true }, // lhs = rhs
++ { true, false, true, true }, // lhs < rhs
++ };
++
++ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
++ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (lhs_size > rhs_size) {
++ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else if (lhs_size == rhs_size) {
++ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ } else {
++ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
++ }
++
++ if (!cast_lhs && !cast_rhs)
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ if (cast_lhs && !skip_lhs_cast_check(stmt))
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
++
++ if (cast_rhs)
++ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++}
++
++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ enum tree_code rhs_code;
++ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
++
++ if (pointer_set_contains(visited->my_stmts, stmt))
++ return lhs;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++
++ if (new_rhs1 == NULL_TREE)
++ return create_cast_assign(visited, stmt);
++
++ if (pointer_set_contains(visited->no_cast_check, stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ rhs_code = gimple_assign_rhs_code(stmt);
++ if (rhs_code == BIT_NOT_EXPR || rhs_code == NEGATE_EXPR) {
++ tree size_overflow_type = get_size_overflow_type(visited, stmt, rhs1);
++
++ new_rhs1 = cast_to_new_size_overflow_type(visited, stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
++ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++ }
++
++ if (!gimple_assign_cast_p(stmt))
++ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
++ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
++}
++
++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++{
++ tree rhs1, lhs = gimple_assign_lhs(stmt);
++ gimple def_stmt = get_def_stmt(lhs);
++
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
++ rhs1 = gimple_assign_rhs1(def_stmt);
++
++ if (is_gimple_constant(rhs1))
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++
++ switch (TREE_CODE(rhs1)) {
++ case SSA_NAME: {
++ tree ret = handle_unary_rhs(visited, caller_node, def_stmt);
++
++ if (gimple_assign_cast_p(stmt))
++ unsigned_signed_cast_intentional_overflow(visited, stmt);
++ return ret;
++ }
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case ADDR_EXPR:
++ case COMPONENT_REF:
++ case INDIRECT_REF:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case TARGET_MEM_REF:
++ case VIEW_CONVERT_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case PARM_DECL:
++ case VAR_DECL:
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ default:
++ debug_gimple_stmt(def_stmt);
++ debug_tree(rhs1);
++ gcc_unreachable();
++ }
++}
++
++static void __unused print_the_code_insertions(const_gimple stmt)
++{
++ location_t loc = gimple_location(stmt);
++
++ inform(loc, "Integer size_overflow check applied here.");
++}
++
++static tree handle_binary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ enum intentional_overflow_type res;
++ tree rhs1, rhs2, new_lhs;
++ gimple def_stmt = get_def_stmt(lhs);
++ tree new_rhs1 = NULL_TREE;
++ tree new_rhs2 = NULL_TREE;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ /* no DImode/TImode division in the 32/64 bit kernel */
++ switch (gimple_assign_rhs_code(def_stmt)) {
++ case RDIV_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ case EXACT_DIV_EXPR:
++ case POINTER_PLUS_EXPR:
++ case BIT_AND_EXPR:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ default:
++ break;
++ }
++
++ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
++ if (new_lhs != NULL_TREE)
++ return new_lhs;
++
++ if (TREE_CODE(rhs1) == SSA_NAME)
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ if (TREE_CODE(rhs2) == SSA_NAME)
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ res = add_mul_intentional_overflow(def_stmt);
++ if (res != NO_INTENTIONAL_OVERFLOW) {
++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ insert_cast_expr(visited, get_def_stmt(new_lhs), res);
++ return new_lhs;
++ }
++
++ if (skip_expr_on_double_type(def_stmt)) {
++ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++ insert_cast_expr(visited, get_def_stmt(new_lhs), NO_INTENTIONAL_OVERFLOW);
++ return new_lhs;
++ }
++
++ if (is_a_neg_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_neg_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
++
++
++ if (is_a_constant_overflow(def_stmt, rhs2))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
++ if (is_a_constant_overflow(def_stmt, rhs1))
++ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
++
++ // the const is between 0 and (signed) MAX
++ if (is_gimple_constant(rhs1))
++ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
++ if (is_gimple_constant(rhs2))
++ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++#if BUILDING_GCC_VERSION >= 4006
++static tree get_new_rhs(struct visited *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
++{
++ if (is_gimple_constant(rhs))
++ return cast_a_tree(size_overflow_type, rhs);
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return NULL_TREE;
++ return expand(visited, caller_node, rhs);
++}
++
++static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs);
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs3 = gimple_assign_rhs3(def_stmt);
++ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
++ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
++ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
++
++ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
++}
++#endif
++
++static tree get_my_stmt_lhs(struct visited *visited, gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ gimple next_stmt = NULL;
++
++ gsi = gsi_for_stmt(stmt);
++
++ do {
++ gsi_next(&gsi);
++ next_stmt = gsi_stmt(gsi);
++
++ if (gimple_code(stmt) == GIMPLE_PHI && !pointer_set_contains(visited->my_stmts, next_stmt))
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited->my_stmts, next_stmt) && !pointer_set_contains(visited->skip_expr_casts, next_stmt))
++ break;
++
++ gcc_assert(pointer_set_contains(visited->my_stmts, next_stmt));
++ } while (!gsi_end_p(gsi));
++
++ gcc_assert(next_stmt);
++ return get_lhs(next_stmt);
++}
++
++static tree expand_visited(struct visited *visited, gimple def_stmt)
++{
++ gimple_stmt_iterator gsi;
++ enum gimple_code code = gimple_code(def_stmt);
++
++ if (code == GIMPLE_ASM)
++ return NULL_TREE;
++
++ gsi = gsi_for_stmt(def_stmt);
++ gsi_next(&gsi);
++
++ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
++ return NULL_TREE;
++ return get_my_stmt_lhs(visited, def_stmt);
++}
++
++tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
++{
++ gimple def_stmt;
++
++ def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return NULL_TREE;
++
++ if (pointer_set_contains(visited->my_stmts, def_stmt))
++ return lhs;
++
++ if (pointer_set_contains(visited->stmts, def_stmt))
++ return expand_visited(visited, def_stmt);
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_PHI:
++ return handle_phi(visited, caller_node, lhs);
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return handle_unary_ops(visited, caller_node, def_stmt);
++ case 3:
++ return handle_binary_ops(visited, caller_node, lhs);
++#if BUILDING_GCC_VERSION >= 4006
++ case 4:
++ return handle_ternary_ops(visited, caller_node, lhs);
++#endif
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
+new file mode 100644
+index 0000000..f8f5dd5
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
+@@ -0,0 +1,1133 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++#define VEC_LEN 128
++#define RET_CHECK NULL_TREE
++#define WRONG_NODE 32
++#define NOT_INTENTIONAL_ASM NULL
++
++unsigned int call_count;
++
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
++
++struct visited_fns {
++ struct visited_fns *next;
++ const_tree fndecl;
++ unsigned int num;
++ const_gimple first_stmt;
++};
++
++struct next_cgraph_node {
++ struct next_cgraph_node *next;
++ struct cgraph_node *current_function;
++ tree callee_fndecl;
++ unsigned int num;
++};
++
++// Don't want to duplicate entries in next_cgraph_node
++static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
++{
++ const_tree new_callee_fndecl;
++ struct next_cgraph_node *cur_node;
++
++ if (fndecl == RET_CHECK)
++ new_callee_fndecl = NODE_DECL(node);
++ else
++ new_callee_fndecl = fndecl;
++
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
++ continue;
++ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
++ continue;
++ if (num == cur_node->num)
++ return true;
++ }
++ return false;
++}
++
++/* Add a next_cgraph_node into the list for handle_function().
++ * handle_function() iterates over all the next cgraph nodes and
++ * starts the overflow check insertion process.
++ */
++static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
++{
++ struct next_cgraph_node *new_node;
++
++ if (is_in_next_cgraph_node(head, node, fndecl, num))
++ return head;
++
++ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
++ new_node->current_function = node;
++ new_node->next = NULL;
++ new_node->num = num;
++ if (fndecl == RET_CHECK)
++ new_node->callee_fndecl = NODE_DECL(node);
++ else
++ new_node->callee_fndecl = fndecl;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
++{
++ struct cgraph_edge *e;
++
++ if (num == 0)
++ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
++
++ for (e = node->callers; e; e = e->next_caller) {
++ tree fndecl = gimple_call_fndecl(e->call_stmt);
++
++ gcc_assert(fndecl != NULL_TREE);
++ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
++ }
++
++ return head;
++}
++
++struct missing_functions {
++ struct missing_functions *next;
++ const_tree node;
++ tree fndecl;
++};
++
++static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
++{
++ struct missing_functions *new_function;
++
++ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
++ new_function->node = node;
++ new_function->next = NULL;
++
++ if (TREE_CODE(node) == FUNCTION_DECL)
++ new_function->fndecl = node;
++ else
++ new_function->fndecl = current_function_decl;
++ gcc_assert(new_function->fndecl);
++
++ if (!missing_fn_head)
++ return new_function;
++
++ new_function->next = missing_fn_head;
++ return new_function;
++}
++
++/* If the function is missing from the hash table and it is a static function
++ * then create a next_cgraph_node from it for handle_function()
++ */
++static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
++{
++ unsigned int num;
++ const_tree orig_fndecl;
++ struct cgraph_node *next_node = NULL;
++
++ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
++
++ num = get_function_num(missing_fn_head->node, orig_fndecl);
++ if (num == CANNOT_FIND_ARG)
++ return cnodes;
++
++ if (!is_missing_function(orig_fndecl, num))
++ return cnodes;
++
++ next_node = cgraph_get_node(missing_fn_head->fndecl);
++ if (next_node && next_node->local.local)
++ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
++ return cnodes;
++}
++
++/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
++ * into the next_cgraph_node list. They will be the next interesting returns or callees.
++ */
++static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree node;
++ struct missing_functions *cur, *missing_fn_head = NULL;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
++#endif
++ switch (TREE_CODE(node)) {
++ case PARM_DECL:
++ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
++ break;
++ case FUNCTION_DECL:
++ missing_fn_head = create_new_missing_function(missing_fn_head, node);
++ break;
++ default:
++ break;
++ }
++ }
++
++ while (missing_fn_head) {
++ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
++
++ cur = missing_fn_head->next;
++ free(missing_fn_head);
++ missing_fn_head = cur;
++ }
++
++ return cnodes;
++}
++
++static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ const_tree arg = gimple_phi_arg_def(phi, i);
++
++ set_conditions(visited, interesting_conditions, arg);
++ }
++}
++
++enum conditions {
++ FROM_CONST, NOT_UNARY, CAST
++};
++
++// Search for constants, cast assignments and binary/ternary assignments
++static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (is_gimple_constant(lhs)) {
++ interesting_conditions[FROM_CONST] = true;
++ return;
++ }
++
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_contains(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ case GIMPLE_CALL:
++ case GIMPLE_ASM:
++ return;
++ case GIMPLE_PHI:
++ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
++ case GIMPLE_ASSIGN:
++ if (gimple_num_ops(def_stmt) == 2) {
++ const_tree rhs = gimple_assign_rhs1(def_stmt);
++
++ if (gimple_assign_cast_p(def_stmt))
++ interesting_conditions[CAST] = true;
++
++ return set_conditions(visited, interesting_conditions, rhs);
++ } else {
++ interesting_conditions[NOT_UNARY] = true;
++ return;
++ }
++ default:
++ debug_gimple_stmt(def_stmt);
++ gcc_unreachable();
++ }
++}
++
++// determine whether duplication will be necessary or not.
++static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
++{
++ struct pointer_set_t *visited;
++
++ if (gimple_assign_cast_p(cur_node->first_stmt))
++ interesting_conditions[CAST] = true;
++ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
++ interesting_conditions[NOT_UNARY] = true;
++
++ visited = pointer_set_create();
++ set_conditions(visited, interesting_conditions, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
++static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi;
++
++ // already removed
++ if (gimple_bb(asm_stmt) == NULL)
++ return;
++ gsi = gsi_for_stmt(asm_stmt);
++
++ assign = gimple_build_assign(lhs, rhs);
++ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
++ SSA_NAME_DEF_STMT(lhs) = assign;
++
++ gsi_remove(&gsi, true);
++}
++
++/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
++ * stmt is a return otherwise it is the callee function.
++ */
++const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
++{
++ const_tree fndecl;
++
++ if (argnum == 0)
++ fndecl = current_function_decl;
++ else
++ fndecl = gimple_call_fndecl(stmt);
++
++ if (fndecl == NULL_TREE)
++ return NULL_TREE;
++
++ return DECL_ORIGIN(fndecl);
++}
++
++// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
++static bool skip_asm(const_tree arg)
++{
++ gimple def_stmt = get_def_stmt(arg);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
++}
++
++static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
++{
++ gimple phi = get_def_stmt(result);
++ unsigned int i, n = gimple_phi_num_args(phi);
++
++ pointer_set_insert(visited, phi);
++ for (i = 0; i < n; i++) {
++ tree arg = gimple_phi_arg_def(phi, i);
++
++ walk_use_def(visited, cur_node, arg);
++ }
++}
++
++static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ gimple def_stmt = get_def_stmt(lhs);
++ tree rhs1, rhs2;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++
++ walk_use_def(visited, cur_node, rhs1);
++ walk_use_def(visited, cur_node, rhs2);
++}
++
++static void insert_last_node(struct interesting_node *cur_node, tree node)
++{
++ unsigned int i;
++ tree element;
++ enum tree_code code;
++
++ gcc_assert(node != NULL_TREE);
++
++ if (is_gimple_constant(node))
++ return;
++
++ code = TREE_CODE(node);
++ if (code == VAR_DECL) {
++ node = DECL_ORIGIN(node);
++ code = TREE_CODE(node);
++ }
++
++ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
++ return;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
++#endif
++ if (operand_equal_p(node, element, 0))
++ return;
++ }
++
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
++ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
++#else
++ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
++ vec_safe_push(cur_node->last_nodes, node);
++#endif
++}
++
++// a size_overflow asm stmt in the control flow doesn't stop the recursion
++static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
++{
++ if (!is_size_overflow_asm(stmt))
++ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++}
++
++/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
++ * and component refs (for checking the intentional_overflow attribute).
++ */
++static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(lhs) != SSA_NAME) {
++ insert_last_node(cur_node, lhs);
++ return;
++ }
++
++ def_stmt = get_def_stmt(lhs);
++ if (!def_stmt)
++ return;
++
++ if (pointer_set_insert(visited, def_stmt))
++ return;
++
++ switch (gimple_code(def_stmt)) {
++ case GIMPLE_NOP:
++ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
++ case GIMPLE_ASM:
++ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
++ case GIMPLE_CALL: {
++ tree fndecl = gimple_call_fndecl(def_stmt);
++
++ if (fndecl == NULL_TREE)
++ return;
++ insert_last_node(cur_node, fndecl);
++ return;
++ }
++ case GIMPLE_PHI:
++ return walk_use_def_phi(visited, cur_node, lhs);
++ case GIMPLE_ASSIGN:
++ switch (gimple_num_ops(def_stmt)) {
++ case 2:
++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
++ case 3:
++ return walk_use_def_binary(visited, cur_node, lhs);
++ }
++ default:
++ debug_gimple_stmt((gimple)def_stmt);
++ error("%s: unknown gimple code", __func__);
++ gcc_unreachable();
++ }
++}
++
++// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
++static void set_last_nodes(struct interesting_node *cur_node)
++{
++ struct pointer_set_t *visited;
++
++ visited = pointer_set_create();
++ walk_use_def(visited, cur_node, cur_node->node);
++ pointer_set_destroy(visited);
++}
++
++enum precond {
++ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
++};
++
++/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
++ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
++ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
++ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
++ */
++static enum precond check_preconditions(struct interesting_node *cur_node)
++{
++ bool interesting_conditions[3] = {false, false, false};
++
++ set_last_nodes(cur_node);
++
++ check_intentional_attribute_ipa(cur_node);
++ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
++ return NO_ATTRIBUTE_SEARCH;
++
++ search_interesting_conditions(cur_node, interesting_conditions);
++
++ // error code
++ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
++ return NO_ATTRIBUTE_SEARCH;
++
++ // unnecessary overflow check
++ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
++ return NO_CHECK_INSERT;
++
++ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
++ return NO_CHECK_INSERT;
++
++ return NONE;
++}
++
++static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node)
++{
++ const_gimple assign;
++ tree orig_type = TREE_TYPE(orig_node);
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ return gimple_assign_lhs(assign);
++}
++
++static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node)
++{
++ void (*set_rhs)(gimple, tree);
++ gimple stmt = cur_node->first_stmt;
++ const_tree orig_node = cur_node->node;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_RETURN:
++ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ case GIMPLE_CALL:
++ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ case GIMPLE_ASSIGN:
++ switch (cur_node->num) {
++ case 1:
++ set_rhs = &gimple_assign_set_rhs1;
++ break;
++ case 2:
++ set_rhs = &gimple_assign_set_rhs2;
++ break;
++#if BUILDING_GCC_VERSION >= 4006
++ case 3:
++ set_rhs = &gimple_assign_set_rhs3;
++ break;
++#endif
++ default:
++ gcc_unreachable();
++ }
++
++ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ }
++
++ update_stmt(stmt);
++}
++
++static struct visited *create_visited(void)
++{
++ struct visited *new_node;
++
++ new_node = (struct visited *)xmalloc(sizeof(*new_node));
++ new_node->stmts = pointer_set_create();
++ new_node->my_stmts = pointer_set_create();
++ new_node->skip_expr_casts = pointer_set_create();
++ new_node->no_cast_check = pointer_set_create();
++ return new_node;
++}
++
++static void free_visited(struct visited *visited)
++{
++ pointer_set_destroy(visited->stmts);
++ pointer_set_destroy(visited->my_stmts);
++ pointer_set_destroy(visited->skip_expr_casts);
++ pointer_set_destroy(visited->no_cast_check);
++
++ free(visited);
++}
++
++/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
++ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
++ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
++ */
++static struct next_cgraph_node *handle_interesting_stmt(struct visited *visited, struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
++{
++ enum precond ret;
++ tree new_node, orig_node = cur_node->node;
++
++ ret = check_preconditions(cur_node);
++ if (ret == NO_ATTRIBUTE_SEARCH)
++ return cnodes;
++
++ cnodes = search_overflow_attribute(cnodes, cur_node);
++
++ if (ret == NO_CHECK_INSERT)
++ return cnodes;
++
++ new_node = expand(visited, caller_node, orig_node);
++ if (new_node == NULL_TREE)
++ return cnodes;
++
++ change_orig_node(visited, cur_node, new_node);
++ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
++
++ return cnodes;
++}
++
++// Check visited_fns interesting nodes.
++static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
++{
++ struct interesting_node *cur;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (!operand_equal_p(node, cur->node, 0))
++ continue;
++ if (num != cur->num)
++ continue;
++ if (first_stmt == cur->first_stmt)
++ return true;
++ }
++ return false;
++}
++
++/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
++ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
++ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
++ the intentional_overflow attribute check. They are collected by set_last_nodes().
++ num: arg count of a call stmt or 0 when it is a ret
++ node: the recursion starts from here, it is a call arg or a return value
++ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
++ intentional_attr_decl: intentional_overflow attribute of the callee function
++ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
++ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
++ */
++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
++{
++ struct interesting_node *new_node;
++ tree fndecl;
++ enum gimple_code code;
++
++ gcc_assert(node != NULL_TREE);
++ code = gimple_code(first_stmt);
++ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
++
++ if (num == CANNOT_FIND_ARG)
++ return head;
++
++ if (skip_types(node))
++ return head;
++
++ if (skip_asm(node))
++ return head;
++
++ if (is_gimple_call(first_stmt))
++ fndecl = gimple_call_fndecl(first_stmt);
++ else
++ fndecl = current_function_decl;
++
++ if (fndecl == NULL_TREE)
++ return head;
++
++ if (is_in_interesting_node(head, first_stmt, node, num))
++ return head;
++
++ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
++
++ new_node->next = NULL;
++ new_node->first_stmt = first_stmt;
++#if BUILDING_GCC_VERSION <= 4007
++ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
++#else
++ vec_alloc(new_node->last_nodes, VEC_LEN);
++#endif
++ new_node->num = num;
++ new_node->node = node;
++ new_node->fndecl = fndecl;
++ new_node->intentional_attr_decl = MARK_NO;
++ new_node->intentional_attr_cur_fndecl = MARK_NO;
++ new_node->intentional_mark_from_gimple = asm_stmt;
++
++ if (!head)
++ return new_node;
++
++ new_node->next = head;
++ return new_node;
++}
++
++/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ struct next_cgraph_node *cur_node;
++ tree ret = gimple_return_retval(stmt);
++
++ if (ret == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
++ continue;
++ if (cur_node->num == 0)
++ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
++ * If the call stmt is in the next cgraph node list then it's an interesting call.
++ */
++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++{
++ unsigned int argnum;
++ tree arg;
++ const_tree fndecl;
++ struct next_cgraph_node *cur_node;
++
++ fndecl = gimple_call_fndecl(stmt);
++ if (fndecl == NULL_TREE)
++ return head;
++
++ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
++ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
++ continue;
++ argnum = get_correct_arg_count(cur_node->num, fndecl);
++ gcc_assert(argnum != CANNOT_FIND_ARG);
++ if (argnum == 0)
++ continue;
++
++ arg = gimple_call_arg(stmt, argnum - 1);
++ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
++ }
++
++ return head;
++}
++
++static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
++{
++ if (!operand_equal_p(orig_node, node, 0))
++ return WRONG_NODE;
++ if (skip_types(node))
++ return WRONG_NODE;
++ return ret_count;
++}
++
++// Get the index of the rhs node in an assignment
++static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
++{
++ const_tree rhs1, rhs2;
++ unsigned int ret;
++
++ gcc_assert(stmt);
++ gcc_assert(is_gimple_assign(stmt));
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ gcc_assert(rhs1 != NULL_TREE);
++
++ switch (gimple_num_ops(stmt)) {
++ case 2:
++ return check_ops(node, rhs1, 1);
++ case 3:
++ ret = check_ops(node, rhs1, 1);
++ if (ret != WRONG_NODE)
++ return ret;
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ gcc_assert(rhs2 != NULL_TREE);
++ return check_ops(node, rhs2, 2);
++ default:
++ gcc_unreachable();
++ }
++}
++
++// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
++static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
++{
++ unsigned int i;
++
++ if (gimple_call_fndecl(stmt) == NULL_TREE)
++ return CANNOT_FIND_ARG;
++
++ for (i = 0; i < gimple_call_num_args(stmt); i++) {
++ tree node;
++
++ node = gimple_call_arg(stmt, i);
++ if (!operand_equal_p(arg, node, 0))
++ continue;
++ if (!skip_types(node))
++ return i + 1;
++ }
++
++ return CANNOT_FIND_ARG;
++}
++
++/* starting from the size_overflow asm stmt collect interesting stmts. They can be
++ * any of return, call or assignment stmts (because of inlining).
++ */
++static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
++{
++ use_operand_p use_p;
++ imm_use_iterator imm_iter;
++ unsigned int argnum;
++
++ gcc_assert(TREE_CODE(node) == SSA_NAME);
++
++ if (pointer_set_insert(visited, node))
++ return head;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ gimple stmt = USE_STMT(use_p);
++
++ if (stmt == NULL)
++ return head;
++ if (is_gimple_debug(stmt))
++ continue;
++
++ switch (gimple_code(stmt)) {
++ case GIMPLE_CALL:
++ argnum = find_arg_number_gimple(node, stmt);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_RETURN:
++ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
++ break;
++ case GIMPLE_ASSIGN:
++ argnum = get_assign_ops_count(stmt, node);
++ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
++ break;
++ case GIMPLE_PHI: {
++ tree result = gimple_phi_result(stmt);
++ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
++ break;
++ }
++ case GIMPLE_ASM:
++ if (gimple_asm_noutputs(stmt) != 0)
++ break;
++ if (!is_size_overflow_asm(stmt))
++ break;
++ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
++ break;
++ case GIMPLE_COND:
++ case GIMPLE_SWITCH:
++ break;
++ default:
++ debug_gimple_stmt(stmt);
++ gcc_unreachable();
++ break;
++ }
++ }
++ return head;
++}
++
++static void remove_size_overflow_asm(gimple stmt)
++{
++ gimple_stmt_iterator gsi;
++ tree input, output;
++
++ if (!is_size_overflow_asm(stmt))
++ return;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ gsi = gsi_for_stmt(stmt);
++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
++ gsi_remove(&gsi, true);
++ return;
++ }
++
++ input = gimple_asm_input_op(stmt, 0);
++ output = gimple_asm_output_op(stmt, 0);
++ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++}
++
++/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
++ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
++ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
++ */
++static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
++{
++ const_tree output;
++ struct pointer_set_t *visited;
++ gimple intentional_asm = NOT_INTENTIONAL_ASM;
++
++ if (!is_size_overflow_asm(stmt))
++ return head;
++
++ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
++ intentional_asm = stmt;
++
++ gcc_assert(gimple_asm_ninputs(stmt) == 1);
++
++ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ if (gimple_asm_noutputs(stmt) == 0) {
++ const_tree input;
++
++ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ return head;
++
++ input = gimple_asm_input_op(stmt, 0);
++ remove_size_overflow_asm(stmt);
++ if (is_gimple_constant(TREE_VALUE(input)))
++ return head;
++ visited = pointer_set_create();
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++ }
++
++ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
++ remove_size_overflow_asm(stmt);
++
++ visited = pointer_set_create();
++ output = gimple_asm_output_op(stmt, 0);
++ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
++ pointer_set_destroy(visited);
++ return head;
++}
++
++/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
++ * or a call stmt or a return stmt and store them in the interesting_node list
++ */
++static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
++{
++ basic_block bb;
++ struct interesting_node *head = NULL;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator gsi;
++
++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
++ enum gimple_code code;
++ gimple stmt = gsi_stmt(gsi);
++
++ code = gimple_code(stmt);
++
++ if (code == GIMPLE_ASM)
++ head = handle_stmt_by_size_overflow_asm(stmt, head);
++
++ if (!next_node)
++ continue;
++ if (code == GIMPLE_CALL)
++ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
++ if (code == GIMPLE_RETURN)
++ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
++ }
++ }
++ return head;
++}
++
++static void free_interesting_node(struct interesting_node *head)
++{
++ struct interesting_node *cur;
++
++ while (head) {
++ cur = head->next;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC_free(tree, gc, head->last_nodes);
++#else
++ vec_free(head->last_nodes);
++#endif
++ free(head);
++ head = cur;
++ }
++}
++
++static struct visited_fns *insert_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
++{
++ struct visited_fns *new_visited_fns;
++
++ new_visited_fns = (struct visited_fns *)xmalloc(sizeof(*new_visited_fns));
++ new_visited_fns->fndecl = cur_node->fndecl;
++ new_visited_fns->num = cur_node->num;
++ new_visited_fns->first_stmt = cur_node->first_stmt;
++ new_visited_fns->next = NULL;
++
++ if (!head)
++ return new_visited_fns;
++
++ new_visited_fns->next = head;
++ return new_visited_fns;
++}
++
++/* Check whether the function was already visited_fns. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
++ * it is a visited_fns function.
++ */
++static bool is_visited_fns_function(struct visited_fns *head, struct interesting_node *cur_node)
++{
++ struct visited_fns *cur;
++
++ if (!head)
++ return false;
++
++ for (cur = head; cur; cur = cur->next) {
++ if (cur_node->first_stmt != cur->first_stmt)
++ continue;
++ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
++ continue;
++ if (cur_node->num == cur->num)
++ return true;
++ }
++ return false;
++}
++
++static void free_next_cgraph_node(struct next_cgraph_node *head)
++{
++ struct next_cgraph_node *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++static void remove_all_size_overflow_asm(void)
++{
++ basic_block bb;
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator si;
++
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ remove_size_overflow_asm(gsi_stmt(si));
++ }
++}
++
++/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
++ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
++ * the newly collected interesting functions (they are interesting if there is control flow between
++ * the interesting stmts and them).
++ */
++static struct visited_fns *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited_fns *visited_fns)
++{
++ struct visited *visited;
++ struct interesting_node *head, *cur_node;
++ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
++
++ set_current_function_decl(NODE_DECL(node));
++ call_count = 0;
++
++ head = collect_interesting_stmts(next_node);
++
++ visited = create_visited();
++ for (cur_node = head; cur_node; cur_node = cur_node->next) {
++ if (is_visited_fns_function(visited_fns, cur_node))
++ continue;
++ cnodes_head = handle_interesting_stmt(visited, cnodes_head, cur_node, node);
++ visited_fns = insert_visited_fns_function(visited_fns, cur_node);
++ }
++
++ free_visited(visited);
++ free_interesting_node(head);
++ remove_all_size_overflow_asm();
++ unset_current_function_decl();
++
++ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
++ visited_fns = handle_function(cur_cnodes->current_function, cur_cnodes, visited_fns);
++
++ free_next_cgraph_node(cnodes_head);
++ return visited_fns;
++}
++
++static void free_visited_fns(struct visited_fns *head)
++{
++ struct visited_fns *cur;
++
++ while (head) {
++ cur = head->next;
++ free(head);
++ head = cur;
++ }
++}
++
++// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
++unsigned int search_function(void)
++{
++ struct cgraph_node *node;
++ struct visited_fns *visited_fns = NULL;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ gcc_assert(cgraph_function_flags_ready);
++#if BUILDING_GCC_VERSION <= 4007
++ gcc_assert(node->reachable);
++#endif
++
++ visited_fns = handle_function(node, NULL, visited_fns);
++ }
++
++ free_visited_fns(visited_fns);
++ return 0;
++}
++
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data insert_size_overflow_check_data = {
++#else
++static struct ipa_opt_pass_d insert_size_overflow_check = {
++ .pass = {
++#endif
++ .type = SIMPLE_IPA_PASS,
++ .name = "size_overflow",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = search_function,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
++#if BUILDING_GCC_VERSION < 4009
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class insert_size_overflow_check : public ipa_opt_pass_d {
++public:
++ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++ unsigned int execute() { return search_function(); }
++};
++}
++#endif
++
++struct opt_pass *make_insert_size_overflow_check(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new insert_size_overflow_check();
++#else
++ return &insert_size_overflow_check.pass;
++#endif
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
+new file mode 100644
+index 0000000..38904bc
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
+@@ -0,0 +1,733 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++/* Get the param of the intentional_overflow attribute.
++ * * 0: MARK_NOT_INTENTIONAL
++ * * 1..MAX_PARAM: MARK_YES
++ * * -1: MARK_TURN_OFF
++ */
++static tree get_attribute_param(const_tree decl)
++{
++ const_tree attr;
++
++ if (decl == NULL_TREE)
++ return NULL_TREE;
++
++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
++ if (!attr || !TREE_VALUE(attr))
++ return NULL_TREE;
++
++ return TREE_VALUE(attr);
++}
++
++// MARK_TURN_OFF
++bool is_turn_off_intentional_attr(const_tree decl)
++{
++ const_tree param_head;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
++ return true;
++ return false;
++}
++
++// MARK_NOT_INTENTIONAL
++bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ const_tree param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ if (param_head == NULL_TREE)
++ return false;
++
++ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
++ return true;
++ return false;
++}
++
++// MARK_YES
++bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
++{
++ tree param, param_head;
++
++ if (argnum == 0)
++ return false;
++
++ param_head = get_attribute_param(decl);
++ for (param = param_head; param; param = TREE_CHAIN(param))
++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
++ return true;
++ return false;
++}
++
++void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
++{
++ location_t loc;
++
++ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
++ return;
++
++ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
++ return;
++
++ loc = DECL_SOURCE_LOCATION(decl);
++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
++}
++
++// Get the field decl of a component ref for intentional_overflow checking
++static const_tree search_field_decl(const_tree comp_ref)
++{
++ const_tree field = NULL_TREE;
++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
++
++ for (i = 0; i < len; i++) {
++ field = TREE_OPERAND(comp_ref, i);
++ if (TREE_CODE(field) == FIELD_DECL)
++ break;
++ }
++ gcc_assert(TREE_CODE(field) == FIELD_DECL);
++ return field;
++}
++
++/* Get the type of the intentional_overflow attribute of a node
++ * * MARK_TURN_OFF
++ * * MARK_YES
++ * * MARK_NO
++ * * MARK_NOT_INTENTIONAL
++ */
++enum mark get_intentional_attr_type(const_tree node)
++{
++ const_tree cur_decl;
++
++ if (node == NULL_TREE)
++ return MARK_NO;
++
++ switch (TREE_CODE(node)) {
++ case COMPONENT_REF:
++ cur_decl = search_field_decl(node);
++ if (is_turn_off_intentional_attr(cur_decl))
++ return MARK_TURN_OFF;
++ if (is_end_intentional_intentional_attr(cur_decl, 1))
++ return MARK_YES;
++ break;
++ case PARM_DECL: {
++ unsigned int argnum;
++
++ cur_decl = DECL_ORIGIN(current_function_decl);
++ argnum = find_arg_number_tree(node, cur_decl);
++ if (argnum == CANNOT_FIND_ARG)
++ return MARK_NO;
++ if (is_yes_intentional_attr(cur_decl, argnum))
++ return MARK_YES;
++ if (is_end_intentional_intentional_attr(cur_decl, argnum))
++ return MARK_NOT_INTENTIONAL;
++ break;
++ }
++ case FUNCTION_DECL:
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
++ return MARK_TURN_OFF;
++ break;
++ default:
++ break;
++ }
++ return MARK_NO;
++}
++
++// Search for the intentional_overflow attribute on the last nodes
++static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
++{
++ unsigned int i;
++ tree last_node;
++ enum mark mark = MARK_NO;
++
++#if BUILDING_GCC_VERSION <= 4007
++ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
++#else
++ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
++#endif
++ mark = get_intentional_attr_type(last_node);
++ if (mark != MARK_NO)
++ break;
++ }
++ return mark;
++}
++
++/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
++ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
++ */
++static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
++{
++ if (!cur_node->intentional_mark_from_gimple)
++ return false;
++
++ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
++ cur_node->intentional_attr_cur_fndecl = MARK_YES;
++ else
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++
++ // skip param decls
++ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
++ return true;
++ return true;
++}
++
++/* Search intentional_overflow attribute on caller and on callee too.
++ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
++ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
++ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
++*/
++void check_intentional_attribute_ipa(struct interesting_node *cur_node)
++{
++ const_tree fndecl;
++
++ if (is_intentional_attribute_from_gimple(cur_node))
++ return;
++
++ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
++ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
++ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
++ return;
++ }
++
++ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
++ return;
++
++ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
++ if (is_turn_off_intentional_attr(fndecl)) {
++ cur_node->intentional_attr_decl = MARK_TURN_OFF;
++ return;
++ }
++
++ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
++ else if (is_yes_intentional_attr(fndecl, cur_node->num))
++ cur_node->intentional_attr_decl = MARK_YES;
++
++ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
++ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
++}
++
++bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++{
++ const_tree rhs1, lhs, rhs1_type, lhs_type;
++ enum machine_mode lhs_mode, rhs_mode;
++ gimple def_stmt = get_def_stmt(no_const_rhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ lhs = gimple_assign_lhs(def_stmt);
++ rhs1_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ rhs_mode = TYPE_MODE(rhs1_type);
++ lhs_mode = TYPE_MODE(lhs_type);
++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++ return false;
++
++ return true;
++}
++
++static unsigned int uses_num(tree node)
++{
++ imm_use_iterator imm_iter;
++ use_operand_p use_p;
++ unsigned int num = 0;
++
++ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
++ gimple use_stmt = USE_STMT(use_p);
++
++ if (use_stmt == NULL)
++ return num;
++ if (is_gimple_debug(use_stmt))
++ continue;
++ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(use_stmt)))
++ continue;
++ num++;
++ }
++ return num;
++}
++
++static bool no_uses(tree node)
++{
++ return !uses_num(node);
++}
++
++// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
++bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++{
++ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++ gimple def_stmt = get_def_stmt(lhs);
++
++ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs_type = TREE_TYPE(rhs1);
++ lhs_type = TREE_TYPE(lhs);
++ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
++ return false;
++ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
++ return false;
++
++ def_stmt = get_def_stmt(rhs1);
++ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ return false;
++
++ if (is_gimple_constant(rhs2))
++ not_const_rhs = rhs1;
++ else
++ not_const_rhs = rhs2;
++
++ return no_uses(not_const_rhs);
++}
++
++static bool is_lt_signed_type_max(const_tree rhs)
++{
++ const_tree new_type, type_max, type = TREE_TYPE(rhs);
++
++ if (!TYPE_UNSIGNED(type))
++ return true;
++
++ switch (TYPE_MODE(type)) {
++ case QImode:
++ new_type = intQI_type_node;
++ break;
++ case HImode:
++ new_type = intHI_type_node;
++ break;
++ case SImode:
++ new_type = intSI_type_node;
++ break;
++ case DImode:
++ new_type = intDI_type_node;
++ break;
++ default:
++ debug_tree((tree)type);
++ gcc_unreachable();
++ }
++
++ type_max = TYPE_MAX_VALUE(new_type);
++ if (!tree_int_cst_lt(type_max, rhs))
++ return true;
++
++ return false;
++}
++
++static bool is_gt_zero(const_tree rhs)
++{
++ const_tree type = TREE_TYPE(rhs);
++
++ if (TYPE_UNSIGNED(type))
++ return true;
++
++ if (!tree_int_cst_lt(rhs, integer_zero_node))
++ return true;
++
++ return false;
++}
++
++bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++{
++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
++ return false;
++ if (!is_gimple_constant(rhs))
++ return false;
++
++ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow
++ if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
++ return false;
++
++ return true;
++}
++
++static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree origtype = TREE_TYPE(orig_rhs);
++
++ gcc_assert(is_gimple_assign(stmt));
++
++ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, assign);
++ return gimple_assign_lhs(assign);
++}
++
++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++{
++ tree new_rhs, orig_rhs;
++ void (*gimple_assign_set_rhs)(gimple, tree);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++ tree lhs = gimple_assign_lhs(stmt);
++
++ if (!check_overflow)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (change_rhs == NULL_TREE)
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++ if (new_rhs2 == NULL_TREE) {
++ orig_rhs = rhs1;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
++ } else {
++ orig_rhs = rhs2;
++ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
++ }
++
++ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
++
++ new_rhs = change_assign_rhs(visited, stmt, orig_rhs, change_rhs);
++ gimple_assign_set_rhs(stmt, new_rhs);
++ update_stmt(stmt);
++
++ return create_assign(visited, stmt, lhs, AFTER_STMT);
++}
++
++static bool is_subtraction_special(struct visited *visited, const_gimple stmt)
++{
++ gimple rhs1_def_stmt, rhs2_def_stmt;
++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
++ const_tree rhs1 = gimple_assign_rhs1(stmt);
++ const_tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
++ return false;
++
++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
++
++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
++ return false;
++
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ rhs2_def_stmt = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ return false;
++
++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
++ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
++ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
++ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
++ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
++ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
++ return false;
++ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
++ return false;
++
++ pointer_set_insert(visited->no_cast_check, rhs1_def_stmt);
++ pointer_set_insert(visited->no_cast_check, rhs2_def_stmt);
++ return true;
++}
++
++static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++{
++ gimple assign;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++ tree type = TREE_TYPE(rhs1);
++ tree lhs = create_new_var(type);
++
++ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ pointer_set_insert(visited->my_stmts, assign);
++ return assign;
++}
++
++static tree cast_to_TI_type(struct visited *visited, gimple stmt, tree node)
++{
++ gimple_stmt_iterator gsi;
++ gimple cast_stmt;
++ tree type = TREE_TYPE(node);
++
++ if (types_compatible_p(type, intTI_type_node))
++ return node;
++
++ gsi = gsi_for_stmt(stmt);
++ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ pointer_set_insert(visited->my_stmts, cast_stmt);
++ return gimple_assign_lhs(cast_stmt);
++}
++
++static tree get_def_stmt_rhs(struct visited *visited, const_tree var)
++{
++ tree rhs1, def_stmt_rhs1;
++ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
++
++ def_stmt = get_def_stmt(var);
++ if (!gimple_assign_cast_p(def_stmt))
++ return NULL_TREE;
++ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt));
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1_def_stmt = get_def_stmt(rhs1);
++ if (!gimple_assign_cast_p(rhs1_def_stmt))
++ return rhs1;
++
++ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
++
++ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
++ case GIMPLE_CALL:
++ case GIMPLE_NOP:
++ case GIMPLE_ASM:
++ case GIMPLE_PHI:
++ return def_stmt_rhs1;
++ case GIMPLE_ASSIGN:
++ return rhs1;
++ default:
++ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
++ gcc_unreachable();
++ }
++}
++
++tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs)
++{
++ tree new_rhs1, new_rhs2;
++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
++ gimple assign, stmt = get_def_stmt(lhs);
++ tree rhs1 = gimple_assign_rhs1(stmt);
++ tree rhs2 = gimple_assign_rhs2(stmt);
++
++ if (!is_subtraction_special(visited, stmt))
++ return NULL_TREE;
++
++ new_rhs1 = expand(visited, caller_node, rhs1);
++ new_rhs2 = expand(visited, caller_node, rhs2);
++
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2);
++
++ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
++ return NULL_TREE;
++
++ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
++ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs1_def_stmt_rhs1);
++ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(visited, stmt, new_rhs2_def_stmt_rhs1);
++ }
++
++ assign = create_binary_assign(visited, MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
++ new_lhs = gimple_assign_lhs(assign);
++ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
++
++ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
++}
++
++bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
++{
++ const_gimple def_stmt;
++
++ if (TREE_CODE(rhs) != SSA_NAME)
++ return false;
++
++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
++ return false;
++
++ def_stmt = get_def_stmt(rhs);
++ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
++ return false;
++
++ return true;
++}
++
++/* e.g., drivers/acpi/acpica/utids.c acpi_ut_execute_CID()
++ * ((count - 1) * sizeof(struct acpi_pnp_dee_id_list) -> (count + fffffff) * 16
++ * fffffff * 16 > signed max -> truncate
++ */
++static bool look_for_mult_and_add(const_gimple stmt)
++{
++ const_tree res;
++ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs;
++ const_gimple def_stmt;
++
++ if (!stmt || gimple_code(stmt) == GIMPLE_NOP)
++ return false;
++ if (!is_gimple_assign(stmt))
++ return false;
++ if (gimple_assign_rhs_code(stmt) != MULT_EXPR)
++ return false;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ rhs2 = gimple_assign_rhs2(stmt);
++ if (is_gimple_constant(rhs1)) {
++ const_rhs = rhs1;
++ def_stmt = get_def_stmt(rhs2);
++ } else if (is_gimple_constant(rhs2)) {
++ const_rhs = rhs2;
++ def_stmt = get_def_stmt(rhs1);
++ } else
++ return false;
++
++ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
++ return false;
++
++ def_rhs1 = gimple_assign_rhs1(def_stmt);
++ def_rhs2 = gimple_assign_rhs2(def_stmt);
++ if (is_gimple_constant(def_rhs1))
++ def_const_rhs = def_rhs1;
++ else if (is_gimple_constant(def_rhs2))
++ def_const_rhs = def_rhs2;
++ else
++ return false;
++
++ res = fold_binary_loc(gimple_location(def_stmt), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
++ if (is_lt_signed_type_max(res) && is_gt_zero(res))
++ return false;
++ return true;
++}
++
++enum intentional_overflow_type add_mul_intentional_overflow(const_gimple stmt)
++{
++ const_gimple def_stmt_1, def_stmt_2;
++ const_tree rhs1, rhs2;
++ bool add_mul_rhs1, add_mul_rhs2;
++
++ rhs1 = gimple_assign_rhs1(stmt);
++ def_stmt_1 = get_def_stmt(rhs1);
++ add_mul_rhs1 = look_for_mult_and_add(def_stmt_1);
++
++ rhs2 = gimple_assign_rhs2(stmt);
++ def_stmt_2 = get_def_stmt(rhs2);
++ add_mul_rhs2 = look_for_mult_and_add(def_stmt_2);
++
++ if (add_mul_rhs1)
++ return RHS1_INTENTIONAL_OVERFLOW;
++ if (add_mul_rhs2)
++ return RHS2_INTENTIONAL_OVERFLOW;
++ return NO_INTENTIONAL_OVERFLOW;
++}
++
++static gimple get_dup_stmt(struct visited *visited, gimple stmt)
++{
++ gimple my_stmt;
++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
++
++ gsi_next(&gsi);
++ my_stmt = gsi_stmt(gsi);
++
++ gcc_assert(pointer_set_contains(visited->my_stmts, my_stmt));
++ gcc_assert(gimple_assign_rhs_code(stmt) == gimple_assign_rhs_code(my_stmt));
++
++ return my_stmt;
++}
++
++/* unsigned type -> unary or binary assign (rhs1 or rhs2 is constant)
++ * unsigned type cast to signed type, unsigned type: no more uses
++ * e.g., lib/vsprintf.c:simple_strtol()
++ * _10 = (unsigned long int) _9
++ * _11 = -_10;
++ * _12 = (long int) _11; (_11_ no more uses)
++ */
++static bool is_call_or_cast(gimple stmt)
++{
++ return gimple_assign_cast_p(stmt) || is_gimple_call(stmt);
++}
++
++static bool is_unsigned_cast_or_call_def_stmt(const_tree node)
++{
++ const_tree rhs;
++ gimple def_stmt;
++
++ if (node == NULL_TREE)
++ return true;
++ if (is_gimple_constant(node))
++ return true;
++
++ def_stmt = get_def_stmt(node);
++ if (!def_stmt)
++ return false;
++
++ if (is_call_or_cast(def_stmt))
++ return true;
++
++ if (!is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 2)
++ return false;
++ rhs = gimple_assign_rhs1(def_stmt);
++ def_stmt = get_def_stmt(rhs);
++ if (!def_stmt)
++ return false;
++ return is_call_or_cast(def_stmt);
++}
++
++void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt)
++{
++ unsigned int use_num;
++ gimple so_stmt;
++ const_gimple def_stmt;
++ const_tree rhs1, rhs2;
++ tree rhs = gimple_assign_rhs1(stmt);
++ tree lhs_type = TREE_TYPE(gimple_assign_lhs(stmt));
++ const_tree rhs_type = TREE_TYPE(rhs);
++
++ if (!(TYPE_UNSIGNED(rhs_type) && !TYPE_UNSIGNED(lhs_type)))
++ return;
++ if (GET_MODE_BITSIZE(TYPE_MODE(rhs_type)) != GET_MODE_BITSIZE(TYPE_MODE(lhs_type)))
++ return;
++ use_num = uses_num(rhs);
++ if (use_num != 1)
++ return;
++
++ def_stmt = get_def_stmt(rhs);
++ if (!def_stmt)
++ return;
++ if (!is_gimple_assign(def_stmt))
++ return;
++
++ rhs1 = gimple_assign_rhs1(def_stmt);
++ if (!is_unsigned_cast_or_call_def_stmt(rhs1))
++ return;
++
++ rhs2 = gimple_assign_rhs2(def_stmt);
++ if (!is_unsigned_cast_or_call_def_stmt(rhs2))
++ return;
++ if (gimple_num_ops(def_stmt) == 3 && !is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ return;
++
++ so_stmt = get_dup_stmt(visited, stmt);
++ create_up_and_down_cast(visited, so_stmt, lhs_type, gimple_assign_rhs1(so_stmt));
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c
+new file mode 100644
+index 0000000..4bddad2
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/misc.c
+@@ -0,0 +1,203 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++void set_current_function_decl(tree fndecl)
++{
++ gcc_assert(fndecl != NULL_TREE);
++
++ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
++ calculate_dominance_info(CDI_DOMINATORS);
++ current_function_decl = fndecl;
++}
++
++void unset_current_function_decl(void)
++{
++ free_dominance_info(CDI_DOMINATORS);
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++}
++
++static bool is_bool(const_tree node)
++{
++ const_tree type;
++
++ if (node == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(node);
++ if (!INTEGRAL_TYPE_P(type))
++ return false;
++ if (TREE_CODE(type) == BOOLEAN_TYPE)
++ return true;
++ if (TYPE_PRECISION(type) == 1)
++ return true;
++ return false;
++}
++
++bool skip_types(const_tree var)
++{
++ tree type;
++ enum tree_code code;
++
++ if (is_gimple_constant(var))
++ return true;
++
++ switch (TREE_CODE(var)) {
++ case ADDR_EXPR:
++#if BUILDING_GCC_VERSION >= 4006
++ case MEM_REF:
++#endif
++ case ARRAY_REF:
++ case BIT_FIELD_REF:
++ case INDIRECT_REF:
++ case TARGET_MEM_REF:
++ case COMPONENT_REF:
++ case VAR_DECL:
++ case VIEW_CONVERT_EXPR:
++ return true;
++ default:
++ break;
++ }
++
++ code = TREE_CODE(var);
++ gcc_assert(code == SSA_NAME || code == PARM_DECL);
++
++ type = TREE_TYPE(var);
++ switch (TREE_CODE(type)) {
++ case INTEGER_TYPE:
++ case ENUMERAL_TYPE:
++ return false;
++ case BOOLEAN_TYPE:
++ return is_bool(var);
++ default:
++ return true;
++ }
++}
++
++gimple get_def_stmt(const_tree node)
++{
++ gcc_assert(node != NULL_TREE);
++
++ if (skip_types(node))
++ return NULL;
++
++ if (TREE_CODE(node) != SSA_NAME)
++ return NULL;
++ return SSA_NAME_DEF_STMT(node);
++}
++
++tree create_new_var(tree type)
++{
++ tree new_var = create_tmp_var(type, "cicus");
++
++ add_referenced_var(new_var);
++ return new_var;
++}
++
++static bool skip_cast(tree dst_type, const_tree rhs, bool force)
++{
++ const_gimple def_stmt = get_def_stmt(rhs);
++
++ if (force)
++ return false;
++
++ if (is_gimple_constant(rhs))
++ return false;
++
++ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
++ return false;
++
++ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
++ return false;
++
++ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
++ return false;
++
++ return true;
++}
++
++tree cast_a_tree(tree type, tree var)
++{
++ gcc_assert(type != NULL_TREE);
++ gcc_assert(var != NULL_TREE);
++ gcc_assert(fold_convertible_p(type, var));
++
++ return fold_convert(type, var);
++}
++
++gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
++{
++ gimple assign, def_stmt;
++
++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
++ gcc_assert(!is_gimple_constant(rhs));
++ if (gsi_end_p(*gsi) && before == AFTER_STMT)
++ gcc_unreachable();
++
++ def_stmt = get_def_stmt(rhs);
++ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && pointer_set_contains(visited->my_stmts, def_stmt))
++ return def_stmt;
++
++ if (lhs == CREATE_NEW_VAR)
++ lhs = create_new_var(dst_type);
++
++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
++
++ if (!gsi_end_p(*gsi)) {
++ location_t loc = gimple_location(gsi_stmt(*gsi));
++ gimple_set_location(assign, loc);
++ }
++
++ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
++
++ if (before)
++ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
++ else
++ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
++ update_stmt(assign);
++ return assign;
++}
++
++bool is_size_overflow_type(const_tree var)
++{
++ const char *name;
++ const_tree type_name, type;
++
++ if (var == NULL_TREE)
++ return false;
++
++ type = TREE_TYPE(var);
++ type_name = TYPE_NAME(type);
++ if (type_name == NULL_TREE)
++ return false;
++
++ if (DECL_P(type_name))
++ name = DECL_NAME_POINTER(type_name);
++ else
++ name = IDENTIFIER_POINTER(type_name);
++
++ if (!strncmp(name, "size_overflow_type", 18))
++ return true;
++ return false;
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
+new file mode 100644
+index 0000000..7c9e6d1
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
+@@ -0,0 +1,138 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++#include "size_overflow.h"
++
++bool skip_expr_on_double_type(const_gimple stmt)
++{
++ enum tree_code code = gimple_assign_rhs_code(stmt);
++
++ switch (code) {
++ case RSHIFT_EXPR:
++ case TRUNC_DIV_EXPR:
++ case CEIL_DIV_EXPR:
++ case FLOOR_DIV_EXPR:
++ case ROUND_DIV_EXPR:
++ case EXACT_DIV_EXPR:
++ case RDIV_EXPR:
++ case TRUNC_MOD_EXPR:
++ case CEIL_MOD_EXPR:
++ case FLOOR_MOD_EXPR:
++ case ROUND_MOD_EXPR:
++ return true;
++ default:
++ return false;
++ }
++}
++
++void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs)
++{
++ const_tree orig_rhs1;
++ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs);
++ gimple down_cast, up_cast;
++ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt);
++
++ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ down_lhs = gimple_assign_lhs(down_cast);
++
++ gsi = gsi_for_stmt(use_stmt);
++ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
++ new_lhs = gimple_assign_lhs(up_cast);
++
++ orig_rhs1 = gimple_assign_rhs1(use_stmt);
++ if (operand_equal_p(orig_rhs1, rhs, 0))
++ gimple_assign_set_rhs1(use_stmt, new_lhs);
++ else
++ gimple_assign_set_rhs2(use_stmt, new_lhs);
++ update_stmt(use_stmt);
++
++ pointer_set_insert(visited->my_stmts, up_cast);
++ pointer_set_insert(visited->my_stmts, down_cast);
++ pointer_set_insert(visited->skip_expr_casts, up_cast);
++ pointer_set_insert(visited->skip_expr_casts, down_cast);
++}
++
++static tree get_proper_unsigned_half_type(const_tree node)
++{
++ tree new_type, type;
++
++ gcc_assert(is_size_overflow_type(node));
++
++ type = TREE_TYPE(node);
++ switch (TYPE_MODE(type)) {
++ case HImode:
++ new_type = unsigned_intQI_type_node;
++ break;
++ case SImode:
++ new_type = unsigned_intHI_type_node;
++ break;
++ case DImode:
++ new_type = unsigned_intSI_type_node;
++ break;
++ case TImode:
++ new_type = unsigned_intDI_type_node;
++ break;
++ default:
++ gcc_unreachable();
++ }
++
++ if (TYPE_QUALS(type) != 0)
++ return build_qualified_type(new_type, TYPE_QUALS(type));
++ return new_type;
++}
++
++static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs)
++{
++ tree type;
++
++ if (rhs == NULL_TREE)
++ return;
++ if (!is_size_overflow_type(rhs))
++ return;
++
++ type = get_proper_unsigned_half_type(rhs);
++ if (is_gimple_constant(rhs))
++ return;
++ create_up_and_down_cast(visited, stmt, type, rhs);
++}
++
++static void insert_cast(struct visited *visited, gimple stmt, tree rhs)
++{
++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs))
++ return;
++ gcc_assert(is_size_overflow_type(rhs));
++ insert_cast_rhs(visited, stmt, rhs);
++}
++
++void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type)
++{
++ tree rhs1, rhs2;
++
++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS1_INTENTIONAL_OVERFLOW) {
++ rhs1 = gimple_assign_rhs1(stmt);
++ insert_cast(visited, stmt, rhs1);
++ }
++
++ if (type == NO_INTENTIONAL_OVERFLOW || type == RHS2_INTENTIONAL_OVERFLOW) {
++ rhs2 = gimple_assign_rhs2(stmt);
++ insert_cast(visited, stmt, rhs2);
++ }
++}
++
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
+new file mode 100644
+index 0000000..e5b4e50
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow.h
+@@ -0,0 +1,127 @@
++#ifndef SIZE_OVERFLOW_H
++#define SIZE_OVERFLOW_H
++
++#define CREATE_NEW_VAR NULL_TREE
++#define CANNOT_FIND_ARG 32
++#define MAX_PARAM 31
++#define BEFORE_STMT true
++#define AFTER_STMT false
++
++#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
++#define YES_ASM_STR "# size_overflow MARK_YES "
++#define OK_ASM_STR "# size_overflow "
++
++enum mark {
++ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
++};
++
++enum intentional_overflow_type {
++ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW
++};
++
++struct visited {
++ struct pointer_set_t *stmts;
++ struct pointer_set_t *my_stmts;
++ struct pointer_set_t *skip_expr_casts;
++ struct pointer_set_t *no_cast_check;
++};
++
++// size_overflow_plugin.c
++extern tree report_size_overflow_decl;
++extern tree size_overflow_type_HI;
++extern tree size_overflow_type_SI;
++extern tree size_overflow_type_DI;
++extern tree size_overflow_type_TI;
++
++
++// size_overflow_plugin_hash.c
++struct size_overflow_hash {
++ const struct size_overflow_hash * const next;
++ const char * const name;
++ const unsigned int param;
++};
++
++struct interesting_node {
++ struct interesting_node *next;
++ gimple first_stmt;
++ const_tree fndecl;
++ tree node;
++#if BUILDING_GCC_VERSION <= 4007
++ VEC(tree, gc) *last_nodes;
++#else
++ vec<tree, va_gc> *last_nodes;
++#endif
++ unsigned int num;
++ enum mark intentional_attr_decl;
++ enum mark intentional_attr_cur_fndecl;
++ gimple intentional_mark_from_gimple;
++};
++
++extern bool is_size_overflow_asm(const_gimple stmt);
++extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl);
++extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl);
++extern bool is_missing_function(const_tree orig_fndecl, unsigned int num);
++extern bool is_a_return_check(const_tree node);
++extern const struct size_overflow_hash *get_function_hash(const_tree fndecl);
++extern unsigned int find_arg_number_tree(const_tree arg, const_tree func);
++
++
++// size_overflow_debug.c
++extern struct opt_pass *make_dump_pass(void);
++
++
++// intentional_overflow.c
++extern enum mark get_intentional_attr_type(const_tree node);
++extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt);
++extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt);
++extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum);
++extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum);
++extern bool is_turn_off_intentional_attr(const_tree decl);
++extern void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum);
++extern void check_intentional_attribute_ipa(struct interesting_node *cur_node);
++extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs);
++extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs);
++extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs);
++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2);
++extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs);
++extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs);
++extern enum intentional_overflow_type add_mul_intentional_overflow(const_gimple def_stmt);
++extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt);
++
++
++// insert_size_overflow_check_ipa.c
++extern unsigned int search_function(void);
++extern unsigned int call_count;
++extern struct opt_pass *make_insert_size_overflow_check(void);
++extern const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum);
++
++
++// insert_size_overflow_asm.c
++extern struct opt_pass *make_insert_size_overflow_asm_pass(void);
++
++
++// misc.c
++extern void set_current_function_decl(tree fndecl);
++extern void unset_current_function_decl(void);
++extern gimple get_def_stmt(const_tree node);
++extern tree create_new_var(tree type);
++extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force);
++extern bool skip_types(const_tree var);
++extern tree cast_a_tree(tree type, tree var);
++extern bool is_size_overflow_type(const_tree var);
++
++
++// insert_size_overflow_check_core.c
++extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs);
++extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
++extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before);
++
++
++// remove_unnecessary_dup.c
++extern struct opt_pass *make_remove_unnecessary_dup_pass(void);
++extern void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type);
++extern bool skip_expr_on_double_type(const_gimple stmt);
++extern void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs);
++
++#endif
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
+new file mode 100644
+index 0000000..4378111
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
+@@ -0,0 +1,116 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
++
++#include "gcc-common.h"
++
++static unsigned int dump_functions(void)
++{
++ struct cgraph_node *node;
++
++ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
++ basic_block bb;
++
++ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
++ current_function_decl = NODE_DECL(node);
++
++ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
++
++ FOR_ALL_BB_FN(bb, cfun) {
++ gimple_stmt_iterator si;
++
++ fprintf(stderr, "<bb %u>:\n", bb->index);
++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
++ debug_gimple_stmt(gsi_stmt(si));
++ fprintf(stderr, "\n");
++ }
++
++ fprintf(stderr, "-------------------------------------------------------------------------\n");
++
++ pop_cfun();
++ current_function_decl = NULL_TREE;
++ }
++
++ fprintf(stderr, "###############################################################################\n");
++
++ return 0;
++}
++
++#if BUILDING_GCC_VERSION >= 4009
++static const struct pass_data dump_pass_data = {
++#else
++static struct ipa_opt_pass_d dump_pass = {
++ .pass = {
++#endif
++ .type = SIMPLE_IPA_PASS,
++ .name = "dump",
++#if BUILDING_GCC_VERSION >= 4008
++ .optinfo_flags = OPTGROUP_NONE,
++#endif
++#if BUILDING_GCC_VERSION >= 4009
++ .has_gate = false,
++ .has_execute = true,
++#else
++ .gate = NULL,
++ .execute = dump_functions,
++ .sub = NULL,
++ .next = NULL,
++ .static_pass_number = 0,
++#endif
++ .tv_id = TV_NONE,
++ .properties_required = 0,
++ .properties_provided = 0,
++ .properties_destroyed = 0,
++ .todo_flags_start = 0,
++ .todo_flags_finish = 0,
++#if BUILDING_GCC_VERSION < 4009
++ },
++ .generate_summary = NULL,
++ .write_summary = NULL,
++ .read_summary = NULL,
++#if BUILDING_GCC_VERSION >= 4006
++ .write_optimization_summary = NULL,
++ .read_optimization_summary = NULL,
++#endif
++ .stmt_fixup = NULL,
++ .function_transform_todo_flags_start = 0,
++ .function_transform = NULL,
++ .variable_transform = NULL,
++#endif
++};
++
++#if BUILDING_GCC_VERSION >= 4009
++namespace {
++class dump_pass : public ipa_opt_pass_d {
++public:
++ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++ unsigned int execute() { return dump_functions(); }
++};
++}
++#endif
++
++struct opt_pass *make_dump_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return new dump_pass();
++#else
++ return &dump_pass.pass;
++#endif
++}
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..7435554
+index 0000000..a75d300
--- /dev/null
-+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,5105 @@
++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
+@@ -0,0 +1,5106 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -116139,7 +120536,7 @@ index 0000000..7435554
+find_mergeable_31093 find_mergeable 2 31093 NULL
+compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
+depth_read_31112 depth_read 3 31112 NULL
-+kvm_mmu_pte_write_31120 kvm_mmu_pte_write 2 31120 NULL
++kvm_mmu_pte_write_31120 kvm_mmu_pte_write 2-4 31120 NULL
+ssb_read16_31139 ssb_read16 0 31139 NULL
+kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
+size_inside_page_31141 size_inside_page 0 31141 NULL
@@ -118755,6 +123152,7 @@ index 0000000..7435554
+ip_vs_create_timeout_table_64478 ip_vs_create_timeout_table 2 64478 NULL
+alloc_large_system_hash_64490 alloc_large_system_hash 2 64490 NULL
+p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
++emulator_cmpxchg_emulated_64501 emulator_cmpxchg_emulated 5 64501 NULL
+msg_data_sz_64503 msg_data_sz 0 64503 NULL
+crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
+opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
@@ -118824,11 +123222,11 @@ index 0000000..7435554
+ath_rx_edma_init_65483 ath_rx_edma_init 2 65483 NULL
+alloc_dr_65495 alloc_dr 2 65495 NULL
+selnl_msglen_65499 selnl_msglen 0 65499 NULL
-diff --git a/tools/gcc/size_overflow_hash_aux.data b/tools/gcc/size_overflow_hash_aux.data
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
new file mode 100644
index 0000000..4ad4525
--- /dev/null
-+++ b/tools/gcc/size_overflow_hash_aux.data
++++ b/tools/gcc/size_overflow_plugin/size_overflow_hash_aux.data
@@ -0,0 +1,91 @@
+spa_set_aux_vdevs_746 spa_set_aux_vdevs 3 746 NULL
+zfs_lookup_2144 zfs_lookup 0 2144 NULL
@@ -118921,12 +123319,12 @@ index 0000000..4ad4525
+proc_copyin_string_62019 proc_copyin_string 4 62019 NULL
+random_get_pseudo_bytes_64611 random_get_pseudo_bytes 2 64611 NULL
+zpios_read_64734 zpios_read 3 64734 NULL
-diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..948ec25
+index 0000000..e6fe17b
--- /dev/null
-+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,4169 @@
++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
+@@ -0,0 +1,259 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -118942,100 +123340,27 @@ index 0000000..948ec25
+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
+ *
+ * Usage:
-+ * $ # for 4.5/4.6/C based 4.7
-+ * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu99 -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
-+ * $ # for C++ based 4.7/4.8+
-+ * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -std=gnu++98 -fno-rtti -ggdb -o size_overflow_plugin.so size_overflow_plugin.c
-+ *
-+ * $ gcc -fplugin=./size_overflow_plugin.so test.c -O2
++ * $ make
++ * $ make run
+ */
+
+#include "gcc-common.h"
++#include "size_overflow.h"
+
+int plugin_is_GPL_compatible;
+
-+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140407",
-+ .help = "no-size-overflow\tturn off size overflow checking\n",
-+};
-+
-+#define BEFORE_STMT true
-+#define AFTER_STMT false
-+#define CREATE_NEW_VAR NULL_TREE
-+#define CODES_LIMIT 32
-+#define MAX_PARAM 31
-+#define VEC_LEN 128
-+#define RET_CHECK NULL_TREE
-+#define CANNOT_FIND_ARG 32
-+#define WRONG_NODE 32
-+#define NOT_INTENTIONAL_ASM NULL
-+#define MIN_CHECK true
-+#define MAX_CHECK false
-+
-+#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF "
-+#define YES_ASM_STR "# size_overflow MARK_YES "
-+#define OK_ASM_STR "# size_overflow "
++tree report_size_overflow_decl;
+
-+struct size_overflow_hash {
-+ const struct size_overflow_hash * const next;
-+ const char * const name;
-+ const unsigned int param;
-+};
-+
-+#include "size_overflow_hash.h"
-+#include "size_overflow_hash_aux.h"
++tree size_overflow_type_HI;
++tree size_overflow_type_SI;
++tree size_overflow_type_DI;
++tree size_overflow_type_TI;
+
-+enum mark {
-+ MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
-+};
-+
-+static unsigned int call_count;
-+
-+enum stmt_flags {
-+ MY_STMT, NO_CAST_CHECK, VISITED_STMT, NO_FLAGS
-+};
-+
-+struct visited {
-+ struct visited *next;
-+ const_tree fndecl;
-+ unsigned int num;
-+};
-+
-+struct next_cgraph_node {
-+ struct next_cgraph_node *next;
-+ struct cgraph_node *current_function;
-+ tree callee_fndecl;
-+ unsigned int num;
-+};
-+
-+struct interesting_node {
-+ struct interesting_node *next;
-+ gimple first_stmt;
-+ const_tree fndecl;
-+ tree node;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *last_nodes;
-+#else
-+ vec<tree, va_gc> *last_nodes;
-+#endif
-+ unsigned int num;
-+ enum mark intentional_attr_decl;
-+ enum mark intentional_attr_cur_fndecl;
-+ gimple intentional_mark_from_gimple;
++static struct plugin_info size_overflow_plugin_info = {
++ .version = "20140517",
++ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
-+static tree report_size_overflow_decl;
-+
-+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
-+
-+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
-+static tree get_size_overflow_type(gimple stmt, const_tree node);
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
-+
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
+{
+ unsigned int arg_count;
@@ -119131,117 +123456,166 @@ index 0000000..948ec25
+ register_attribute(&intentional_overflow_attr);
+}
+
-+static enum stmt_flags get_stmt_flag(gimple stmt)
++static tree create_typedef(tree type, const char* ident)
+{
-+ bool bit_1, bit_2;
++ tree new_type, decl;
+
-+ bit_1 = gimple_plf(stmt, GF_PLF_1);
-+ bit_2 = gimple_plf(stmt, GF_PLF_2);
-+
-+ if (!bit_1 && !bit_2)
-+ return NO_FLAGS;
-+ if (bit_1 && bit_2)
-+ return MY_STMT;
-+ if (!bit_1 && bit_2)
-+ return VISITED_STMT;
-+ return NO_CAST_CHECK;
++ new_type = build_variant_type_copy(type);
++ decl = build_decl(BUILTINS_LOCATION, TYPE_DECL, get_identifier(ident), new_type);
++ DECL_ORIGINAL_TYPE(decl) = type;
++ TYPE_NAME(new_type) = decl;
++ return new_type;
+}
+
-+static void set_stmt_flag(gimple stmt, enum stmt_flags new_flag)
++// Create the noreturn report_size_overflow() function decl.
++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
-+ bool bit_1, bit_2;
++ tree const_char_ptr_type_node;
++ tree fntype;
+
-+ switch (new_flag) {
-+ case NO_FLAGS:
-+ bit_1 = bit_2 = false;
-+ break;
-+ case MY_STMT:
-+ bit_1 = bit_2 = true;
-+ break;
-+ case VISITED_STMT:
-+ bit_1 = false;
-+ bit_2 = true;
-+ break;
-+ case NO_CAST_CHECK:
-+ bit_1 = true;
-+ bit_2 = false;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
++
++ size_overflow_type_HI = create_typedef(intHI_type_node, "size_overflow_type_HI");
++ size_overflow_type_SI = create_typedef(intSI_type_node, "size_overflow_type_SI");
++ size_overflow_type_DI = create_typedef(intDI_type_node, "size_overflow_type_DI");
++ size_overflow_type_TI = create_typedef(intTI_type_node, "size_overflow_type_TI");
+
-+ gimple_set_plf(stmt, GF_PLF_1, bit_1);
-+ gimple_set_plf(stmt, GF_PLF_2, bit_2);
++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
++ fntype = build_function_type_list(void_type_node,
++ const_char_ptr_type_node,
++ unsigned_type_node,
++ const_char_ptr_type_node,
++ const_char_ptr_type_node,
++ NULL_TREE);
++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
++
++ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
++ TREE_PUBLIC(report_size_overflow_decl) = 1;
++ DECL_EXTERNAL(report_size_overflow_decl) = 1;
++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
+}
+
-+static bool is_bool(const_tree node)
-+{
-+ const_tree type;
+
-+ if (node == NULL_TREE)
-+ return false;
++extern struct gimple_opt_pass pass_dce;
+
-+ type = TREE_TYPE(node);
-+ if (!INTEGRAL_TYPE_P(type))
-+ return false;
-+ if (TREE_CODE(type) == BOOLEAN_TYPE)
-+ return true;
-+ if (TYPE_PRECISION(type) == 1)
-+ return true;
-+ return false;
++static struct opt_pass *make_dce_pass(void)
++{
++#if BUILDING_GCC_VERSION >= 4009
++ return make_pass_dce(g);
++#else
++ return &pass_dce.pass;
++#endif
+}
+
-+static bool skip_types(const_tree var)
++
++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
-+ tree type;
-+ enum tree_code code;
++ int i;
++ const char * const plugin_name = plugin_info->base_name;
++ const int argc = plugin_info->argc;
++ const struct plugin_argument * const argv = plugin_info->argv;
++ bool enable = true;
++ struct register_pass_info insert_size_overflow_asm_pass_info;
++ struct register_pass_info __unused dump_before_pass_info;
++ struct register_pass_info __unused dump_after_pass_info;
++ struct register_pass_info insert_size_overflow_check_info;
++ struct register_pass_info dce_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
++ {
++ .base = &report_size_overflow_decl,
++ .nelt = 1,
++ .stride = sizeof(report_size_overflow_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
-+ if (is_gimple_constant(var))
-+ return true;
++ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
++ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
++ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
++ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
+
-+ switch (TREE_CODE(var)) {
-+ case ADDR_EXPR:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case INDIRECT_REF:
-+ case TARGET_MEM_REF:
-+ case COMPONENT_REF:
-+ case VAR_DECL:
-+ case VIEW_CONVERT_EXPR:
-+ return true;
-+ default:
-+ break;
++ dump_before_pass_info.pass = make_dump_pass();
++ dump_before_pass_info.reference_pass_name = "increase_alignment";
++ dump_before_pass_info.ref_pass_instance_number = 1;
++ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ insert_size_overflow_check_info.pass = make_insert_size_overflow_check();
++ insert_size_overflow_check_info.reference_pass_name = "increase_alignment";
++ insert_size_overflow_check_info.ref_pass_instance_number = 1;
++ insert_size_overflow_check_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ dump_after_pass_info.pass = make_dump_pass();
++ dump_after_pass_info.reference_pass_name = "increase_alignment";
++ dump_after_pass_info.ref_pass_instance_number = 1;
++ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++
++ dce_pass_info.pass = make_dce_pass();
++ dce_pass_info.reference_pass_name = "vrp";
++ dce_pass_info.ref_pass_instance_number = 1;
++ dce_pass_info.pos_op = PASS_POS_INSERT_AFTER;
++
++ if (!plugin_default_version_check(version, &gcc_version)) {
++ error(G_("incompatible gcc/plugin versions"));
++ return 1;
+ }
+
-+ code = TREE_CODE(var);
-+ gcc_assert(code == SSA_NAME || code == PARM_DECL);
++ for (i = 0; i < argc; ++i) {
++ if (!strcmp(argv[i].key, "no-size-overflow")) {
++ enable = false;
++ continue;
++ }
++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
++ }
+
-+ type = TREE_TYPE(var);
-+ switch (TREE_CODE(type)) {
-+ case INTEGER_TYPE:
-+ case ENUMERAL_TYPE:
-+ return false;
-+ case BOOLEAN_TYPE:
-+ return is_bool(var);
-+ default:
-+ return true;
++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
++ if (enable) {
++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_check_info);
++// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dce_pass_info);
+ }
++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
++
++ return 0;
+}
+diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
+new file mode 100644
+index 0000000..0888f6c
+--- /dev/null
++++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
+@@ -0,0 +1,364 @@
++/*
++ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Licensed under the GPL v2, or (at your option) v3
++ *
++ * Homepage:
++ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ *
++ * Documentation:
++ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
++ *
++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
++ * with double integer precision (DImode/TImode for 32/64 bit integer types).
++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
++ *
++ * Usage:
++ * $ make
++ * $ make run
++ */
+
-+static inline gimple get_def_stmt(const_tree node)
-+{
-+ gcc_assert(node != NULL_TREE);
++#include "gcc-common.h"
++#include "size_overflow.h"
+
-+ if (skip_types(node))
-+ return NULL;
++#include "size_overflow_hash.h"
++#include "size_overflow_hash_aux.h"
+
-+ if (TREE_CODE(node) != SSA_NAME)
-+ return NULL;
-+ return SSA_NAME_DEF_STMT(node);
-+}
++#define CODES_LIMIT 32
+
+static unsigned char get_tree_code(const_tree type)
+{
@@ -119385,7 +123759,7 @@ index 0000000..948ec25
+ return NULL;
+}
+
-+static const struct size_overflow_hash *get_function_hash(const_tree fndecl)
++const struct size_overflow_hash *get_function_hash(const_tree fndecl)
+{
+ const struct size_overflow_hash *entry;
+ struct function_hash fn_hash_data;
@@ -119430,7 +123804,7 @@ index 0000000..948ec25
+ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
+}
+
-+static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
++unsigned int find_arg_number_tree(const_tree arg, const_tree func)
+{
+ tree var;
+ unsigned int argnum = 1;
@@ -119448,1296 +123822,65 @@ index 0000000..948ec25
+ return CANNOT_FIND_ARG;
+}
+
-+static tree create_new_var(tree type)
-+{
-+ tree new_var = create_tmp_var(type, "cicus");
-+
-+ add_referenced_var(new_var);
-+ return new_var;
-+}
-+
-+static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree type = TREE_TYPE(rhs1);
-+ tree lhs = create_new_var(type);
-+
-+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
-+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ set_stmt_flag(assign, MY_STMT);
-+ return assign;
-+}
-+
-+static tree cast_a_tree(tree type, tree var)
-+{
-+ gcc_assert(type != NULL_TREE);
-+ gcc_assert(var != NULL_TREE);
-+ gcc_assert(fold_convertible_p(type, var));
-+
-+ return fold_convert(type, var);
-+}
-+
-+static tree get_lhs(const_gimple stmt)
-+{
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ return gimple_get_lhs(stmt);
-+ case GIMPLE_PHI:
-+ return gimple_phi_result(stmt);
-+ default:
-+ return NULL_TREE;
-+ }
-+}
-+
-+static bool skip_cast(tree dst_type, const_tree rhs, bool force)
-+{
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ if (force)
-+ return false;
-+
-+ if (is_gimple_constant(rhs))
-+ return false;
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return false;
-+
-+ if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
-+ return false;
-+
-+ // DI type can be on 32 bit (from create_assign) but overflow type stays DI
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ return false;
-+
-+ return true;
-+}
-+
-+static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
-+{
-+ gimple assign, def_stmt;
-+
-+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
-+ if (gsi_end_p(*gsi) && before == AFTER_STMT)
-+ gcc_unreachable();
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && get_stmt_flag(def_stmt) == MY_STMT)
-+ return def_stmt;
-+
-+ if (lhs == CREATE_NEW_VAR)
-+ lhs = create_new_var(dst_type);
-+
-+ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
-+
-+ if (!gsi_end_p(*gsi)) {
-+ location_t loc = gimple_location(gsi_stmt(*gsi));
-+ gimple_set_location(assign, loc);
-+ }
-+
-+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
-+
-+ if (before)
-+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
-+ else
-+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
-+ update_stmt(assign);
-+ return assign;
-+}
-+
-+static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
-+{
-+ gimple_stmt_iterator gsi;
-+ tree lhs;
-+ gimple new_stmt;
-+
-+ if (rhs == NULL_TREE)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
-+ set_stmt_flag(new_stmt, MY_STMT);
-+
-+ lhs = get_lhs(new_stmt);
-+ gcc_assert(lhs != NULL_TREE);
-+ return lhs;
-+}
-+
-+static tree cast_to_TI_type(gimple stmt, tree node)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple cast_stmt;
-+ tree type = TREE_TYPE(node);
-+
-+ if (types_compatible_p(type, intTI_type_node))
-+ return node;
-+
-+ gsi = gsi_for_stmt(stmt);
-+ cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(cast_stmt, MY_STMT);
-+ return gimple_assign_lhs(cast_stmt);
-+}
-+
-+static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
-+{
-+ tree lhs, new_lhs;
-+ gimple_stmt_iterator gsi;
-+
-+ if (rhs1 == NULL_TREE) {
-+ debug_gimple_stmt(oldstmt);
-+ error("%s: rhs1 is NULL_TREE", __func__);
-+ gcc_unreachable();
-+ }
-+
-+ switch (gimple_code(oldstmt)) {
-+ case GIMPLE_ASM:
-+ lhs = rhs1;
-+ break;
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASSIGN:
-+ lhs = gimple_get_lhs(oldstmt);
-+ break;
-+ default:
-+ debug_gimple_stmt(oldstmt);
-+ gcc_unreachable();
-+ }
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ pointer_set_insert(visited, oldstmt);
-+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
-+ basic_block next_bb, cur_bb;
-+ const_edge e;
-+
-+ gcc_assert(before == false);
-+ gcc_assert(stmt_can_throw_internal(oldstmt));
-+ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ cur_bb = gimple_bb(oldstmt);
-+ next_bb = cur_bb->next_bb;
-+ e = find_edge(cur_bb, next_bb);
-+ gcc_assert(e != NULL);
-+ gcc_assert(e->flags & EDGE_FALLTHRU);
-+
-+ gsi = gsi_after_labels(next_bb);
-+ gcc_assert(!gsi_end_p(gsi));
-+
-+ before = true;
-+ oldstmt = gsi_stmt(gsi);
-+ }
-+
-+ new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
-+ return new_lhs;
-+}
-+
-+static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
-+{
-+ gimple stmt;
-+ gimple_stmt_iterator gsi;
-+ tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
-+
-+ if (get_stmt_flag(oldstmt) == MY_STMT)
-+ return lhs;
-+
-+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
-+ rhs1 = gimple_assign_rhs1(oldstmt);
-+ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
-+ }
-+ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
-+ rhs2 = gimple_assign_rhs2(oldstmt);
-+ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
-+ }
-+
-+ stmt = gimple_copy(oldstmt);
-+ gimple_set_location(stmt, gimple_location(oldstmt));
-+ set_stmt_flag(stmt, MY_STMT);
-+
-+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
-+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
-+
-+ size_overflow_type = get_size_overflow_type(oldstmt, node);
-+
-+ new_var = create_new_var(size_overflow_type);
-+ new_var = make_ssa_name(new_var, stmt);
-+ gimple_assign_set_lhs(stmt, new_var);
-+
-+ if (rhs1 != NULL_TREE)
-+ gimple_assign_set_rhs1(stmt, rhs1);
-+
-+ if (rhs2 != NULL_TREE)
-+ gimple_assign_set_rhs2(stmt, rhs2);
-+#if BUILDING_GCC_VERSION >= 4006
-+ if (rhs3 != NULL_TREE)
-+ gimple_assign_set_rhs3(stmt, rhs3);
-+#endif
-+ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
-+ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
-+ update_stmt(stmt);
-+ pointer_set_insert(visited, oldstmt);
-+ return gimple_assign_lhs(stmt);
-+}
-+
-+static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+ basic_block first_bb;
-+
-+ gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
-+
-+ if (bb->index == 0) {
-+ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR_FOR_FN(cfun))->dest;
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR_FOR_FN(cfun));
-+ bb = first_bb;
-+ }
-+
-+ gsi = gsi_after_labels(bb);
-+ assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign, def_stmt = get_def_stmt(new_arg);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI) {
-+ gsi = gsi_after_labels(gimple_bb(def_stmt));
-+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
-+ } else {
-+ gsi = gsi_for_stmt(def_stmt);
-+ assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
-+ }
-+
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
-+{
-+ basic_block bb;
-+ gimple_stmt_iterator gsi;
-+ const_gimple def_stmt;
-+ gimple assign;
-+
-+ def_stmt = get_def_stmt(arg);
-+ bb = gimple_bb(def_stmt);
-+ gcc_assert(bb->index != 0);
-+ gsi = gsi_after_labels(bb);
-+
-+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
-+{
-+ tree size_overflow_type;
-+ tree arg;
-+ const_gimple def_stmt;
-+
-+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
-+ return new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ def_stmt = get_def_stmt(arg);
-+ gcc_assert(def_stmt != NULL);
-+ size_overflow_type = get_size_overflow_type(oldstmt, arg);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
-+ case GIMPLE_NOP: {
-+ basic_block bb;
-+
-+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
-+ return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
-+ }
-+ case GIMPLE_ASM: {
-+ gimple_stmt_iterator gsi;
-+ gimple assign, stmt = get_def_stmt(arg);
-+
-+ gsi = gsi_for_stmt(stmt);
-+ assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
-+ }
-+ default:
-+ gcc_assert(new_arg != NULL_TREE);
-+ gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
-+ return use_phi_ssa_name(ssa_name_var, new_arg);
-+ }
-+}
-+
-+static gimple overflow_create_phi_node(gimple oldstmt, tree result)
-+{
-+ basic_block bb;
-+ gimple phi;
-+ gimple_seq seq;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
-+
-+ bb = gsi_bb(gsi);
-+
-+ if (result == NULL_TREE) {
-+ tree old_result = gimple_phi_result(oldstmt);
-+ tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
-+
-+ result = create_new_var(size_overflow_type);
-+ }
-+
-+ phi = create_phi_node(result, bb);
-+ gimple_phi_set_result(phi, make_ssa_name(result, phi));
-+ seq = phi_nodes(bb);
-+ gsi = gsi_last(seq);
-+ gsi_remove(&gsi, false);
-+
-+ gsi = gsi_for_stmt(oldstmt);
-+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
-+ gimple_set_bb(phi, bb);
-+ set_stmt_flag(phi, MY_STMT);
-+ return phi;
-+}
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static tree create_new_phi_node(VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
-+#else
-+static tree create_new_phi_node(vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
-+#endif
-+{
-+ gimple new_phi;
-+ unsigned int i;
-+ tree arg, result;
-+ location_t loc = gimple_location(oldstmt);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(!VEC_empty(tree, *args));
-+#else
-+ gcc_assert(!args->is_empty());
-+#endif
-+
-+ new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
-+ result = gimple_phi_result(new_phi);
-+ ssa_name_var = SSA_NAME_VAR(result);
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, *args, i, arg) {
-+#else
-+ FOR_EACH_VEC_SAFE_ELT(args, i, arg) {
-+#endif
-+ arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
-+ add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, heap, *args);
-+#else
-+ vec_free(args);
-+#endif
-+ update_stmt(new_phi);
-+ return result;
-+}
-+
-+static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result)
-+{
-+ tree ssa_name_var = NULL_TREE;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, heap) *args = NULL;
-+#else
-+ vec<tree, va_heap, vl_embed> *args = NULL;
-+#endif
-+ gimple oldstmt = get_def_stmt(orig_result);
-+ unsigned int i, len = gimple_phi_num_args(oldstmt);
-+
-+ pointer_set_insert(visited, oldstmt);
-+ for (i = 0; i < len; i++) {
-+ tree arg, new_arg;
-+
-+ arg = gimple_phi_arg_def(oldstmt, i);
-+ new_arg = expand(visited, caller_node, arg);
-+
-+ if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
-+ ssa_name_var = SSA_NAME_VAR(new_arg);
-+
-+ if (is_gimple_constant(arg)) {
-+ tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
-+
-+ new_arg = cast_a_tree(size_overflow_type, arg);
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, heap, args, new_arg);
-+#else
-+ vec_safe_push(args, new_arg);
-+#endif
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ return create_new_phi_node(&args, ssa_name_var, oldstmt);
-+#else
-+ return create_new_phi_node(args, ssa_name_var, oldstmt);
-+#endif
-+}
-+
-+static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
++static const char *get_asm_string(const_gimple stmt)
+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+ tree origtype = TREE_TYPE(orig_rhs);
-+
-+ gcc_assert(is_gimple_assign(stmt));
++ if (!stmt)
++ return NULL;
++ if (gimple_code(stmt) != GIMPLE_ASM)
++ return NULL;
+
-+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ set_stmt_flag(assign, MY_STMT);
-+ return gimple_assign_lhs(assign);
++ return gimple_asm_string(stmt);
+}
+
-+static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
++bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
+{
-+ const_tree rhs1, lhs, rhs1_type, lhs_type;
-+ enum machine_mode lhs_mode, rhs_mode;
-+ gimple def_stmt = get_def_stmt(no_const_rhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
++ const char *str;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ lhs = gimple_assign_lhs(def_stmt);
-+ rhs1_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ rhs_mode = TYPE_MODE(rhs1_type);
-+ lhs_mode = TYPE_MODE(lhs_type);
-+ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ return true;
-+}
-+
-+static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
-+{
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+ const_tree rhs1_type = TREE_TYPE(rhs1);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+
-+ if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ return create_assign(visited, stmt, rhs1, AFTER_STMT);
++ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
+}
+
-+static bool no_uses(tree node)
++bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
+{
-+ imm_use_iterator imm_iter;
-+ use_operand_p use_p;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ const_gimple use_stmt = USE_STMT(use_p);
++ const char *str;
+
-+ if (use_stmt == NULL)
-+ return true;
-+ if (is_gimple_debug(use_stmt))
-+ continue;
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+ }
-+ return true;
++ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
+}
+
-+// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
-+static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
++bool is_size_overflow_asm(const_gimple stmt)
+{
-+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs_type = TREE_TYPE(rhs1);
-+ lhs_type = TREE_TYPE(lhs);
-+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
-+ return false;
-+ if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs1);
-+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
-+ return false;
-+
-+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
-+ return false;
++ const char *str;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
++ str = get_asm_string(stmt);
++ if (!str)
+ return false;
-+
-+ if (is_gimple_constant(rhs2))
-+ not_const_rhs = rhs1;
-+ else
-+ not_const_rhs = rhs2;
-+
-+ return no_uses(not_const_rhs);
++ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
+}
+
-+static bool skip_lhs_cast_check(const_gimple stmt)
++bool is_a_return_check(const_tree node)
+{
-+ const_tree rhs = gimple_assign_rhs1(stmt);
-+ const_gimple def_stmt = get_def_stmt(rhs);
-+
-+ // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
-+ if (gimple_code(def_stmt) == GIMPLE_ASM)
-+ return true;
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
++ if (TREE_CODE(node) == FUNCTION_DECL)
+ return true;
+
++ gcc_assert(TREE_CODE(node) == PARM_DECL);
+ return false;
+}
+
-+static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
-+{
-+ bool cast_lhs, cast_rhs;
-+ tree lhs = gimple_assign_lhs(stmt);
-+ tree rhs = gimple_assign_rhs1(stmt);
-+ const_tree lhs_type = TREE_TYPE(lhs);
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
-+ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
-+ unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
-+ unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
-+
-+ static bool check_lhs[3][4] = {
-+ // ss su us uu
-+ { false, true, true, false }, // lhs > rhs
-+ { false, false, false, false }, // lhs = rhs
-+ { true, true, true, true }, // lhs < rhs
-+ };
-+
-+ static bool check_rhs[3][4] = {
-+ // ss su us uu
-+ { true, false, true, true }, // lhs > rhs
-+ { true, false, true, true }, // lhs = rhs
-+ { true, false, true, true }, // lhs < rhs
-+ };
-+
-+ // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
-+ if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (lhs_size > rhs_size) {
-+ cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else if (lhs_size == rhs_size) {
-+ cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ } else {
-+ cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
-+ }
-+
-+ if (!cast_lhs && !cast_rhs)
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ if (cast_lhs && !skip_lhs_cast_check(stmt))
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
-+
-+ if (cast_rhs)
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+}
-+
-+static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
-+{
-+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
-+
-+ if (get_stmt_flag(stmt) == MY_STMT)
-+ return lhs;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+
-+ if (new_rhs1 == NULL_TREE)
-+ return create_cast_assign(visited, stmt);
-+
-+ if (get_stmt_flag(stmt) == NO_CAST_CHECK)
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
-+ tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
-+
-+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
-+ check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+ }
-+
-+ if (!gimple_assign_cast_p(stmt))
-+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
-+
-+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
-+}
-+
-+static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
-+{
-+ tree rhs1, lhs = gimple_assign_lhs(stmt);
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+
-+ if (is_gimple_constant(rhs1))
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+
-+ switch (TREE_CODE(rhs1)) {
-+ case SSA_NAME:
-+ return handle_unary_rhs(visited, caller_node, def_stmt);
-+ case ARRAY_REF:
-+ case BIT_FIELD_REF:
-+ case ADDR_EXPR:
-+ case COMPONENT_REF:
-+ case INDIRECT_REF:
-+#if BUILDING_GCC_VERSION >= 4006
-+ case MEM_REF:
-+#endif
-+ case TARGET_MEM_REF:
-+ case VIEW_CONVERT_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case PARM_DECL:
-+ case VAR_DECL:
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ debug_tree(rhs1);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
-+{
-+ gimple cond_stmt;
-+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
-+
-+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
-+ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
-+ update_stmt(cond_stmt);
-+}
-+
-+static tree create_string_param(tree string)
-+{
-+ tree i_type, a_type;
-+ const int length = TREE_STRING_LENGTH(string);
-+
-+ gcc_assert(length > 0);
-+
-+ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
-+ a_type = build_array_type(char_type_node, i_type);
-+
-+ TREE_TYPE(string) = a_type;
-+ TREE_CONSTANT(string) = 1;
-+ TREE_READONLY(string) = 1;
-+
-+ return build1(ADDR_EXPR, ptr_type_node, string);
-+}
-+
-+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
-+{
-+ gimple func_stmt;
-+ const_gimple def_stmt;
-+ const_tree loc_line;
-+ tree loc_file, ssa_name, current_func;
-+ expanded_location xloc;
-+ char *ssa_name_buf;
-+ int len;
-+ struct cgraph_edge *edge;
-+ struct cgraph_node *callee_node;
-+ int frequency;
-+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
-+
-+ def_stmt = get_def_stmt(arg);
-+ xloc = expand_location(gimple_location(def_stmt));
-+
-+ if (!gimple_has_location(def_stmt)) {
-+ xloc = expand_location(gimple_location(stmt));
-+ if (!gimple_has_location(stmt))
-+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
-+ }
-+
-+ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
-+
-+ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
-+ loc_file = create_string_param(loc_file);
-+
-+ current_func = build_string(DECL_NAME_LENGTH(current_function_decl) + 1, DECL_NAME_POINTER(current_function_decl));
-+ current_func = create_string_param(current_func);
-+
-+ gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
-+ call_count++;
-+ len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", DECL_NAME_POINTER(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
-+ gcc_assert(len > 0);
-+ ssa_name = build_string(len + 1, ssa_name_buf);
-+ free(ssa_name_buf);
-+ ssa_name = create_string_param(ssa_name);
-+
-+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
-+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
-+
-+ callee_node = cgraph_get_create_node(report_size_overflow_decl);
-+ frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
-+
-+ edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
-+ gcc_assert(edge != NULL);
-+}
-+
-+static void __unused print_the_code_insertions(const_gimple stmt)
-+{
-+ location_t loc = gimple_location(stmt);
-+
-+ inform(loc, "Integer size_overflow check applied here.");
-+}
-+
-+static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
++// Get the argnum of a function decl, if node is a return then the argnum is 0
++unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
+{
-+ basic_block cond_bb, join_bb, bb_true;
-+ edge e;
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ cond_bb = gimple_bb(stmt);
-+ if (before)
-+ gsi_prev(&gsi);
-+ if (gsi_end_p(gsi))
-+ e = split_block_after_labels(cond_bb);
++ if (is_a_return_check(node))
++ return 0;
+ else
-+ e = split_block(cond_bb, gsi_stmt(gsi));
-+ cond_bb = e->src;
-+ join_bb = e->dest;
-+ e->flags = EDGE_FALSE_VALUE;
-+ e->probability = REG_BR_PROB_BASE;
-+
-+ bb_true = create_empty_bb(cond_bb);
-+ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
-+ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
-+ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
-+
-+ gcc_assert(dom_info_available_p(CDI_DOMINATORS));
-+ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
-+ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
-+
-+ if (current_loops != NULL) {
-+ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
-+ add_bb_to_loop(bb_true, cond_bb->loop_father);
-+ }
-+
-+ insert_cond(cond_bb, arg, cond_code, type_value);
-+ insert_cond_result(caller_node, bb_true, stmt, arg, min);
-+
-+// print_the_code_insertions(stmt);
-+}
-+
-+static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
-+{
-+ const_tree rhs_type = TREE_TYPE(rhs);
-+ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
-+
-+ gcc_assert(rhs_type != NULL_TREE);
-+ if (TREE_CODE(rhs_type) == POINTER_TYPE)
-+ return;
-+
-+ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
-+
-+ if (is_const_plus_unsigned_signed_truncation(rhs))
-+ return;
-+
-+ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+ // typemax (-1) < typemin (0)
-+ if (TREE_OVERFLOW(type_max))
-+ return;
-+
-+ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
-+
-+ cast_rhs_type = TREE_TYPE(cast_rhs);
-+ type_max_type = TREE_TYPE(type_max);
-+ gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
-+
-+ insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
-+
-+ // special case: get_size_overflow_type(), 32, u64->s
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
-+ return;
-+
-+ type_min_type = TREE_TYPE(type_min);
-+ gcc_assert(types_compatible_p(type_max_type, type_min_type));
-+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
-+}
-+
-+static bool is_lt_signed_type_max(const_tree rhs)
-+{
-+ const_tree new_type, type_max, type = TREE_TYPE(rhs);
-+
-+ if (!TYPE_UNSIGNED(type))
-+ return true;
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = intQI_type_node;
-+ break;
-+ case HImode:
-+ new_type = intHI_type_node;
-+ break;
-+ case SImode:
-+ new_type = intSI_type_node;
-+ break;
-+ case DImode:
-+ new_type = intDI_type_node;
-+ break;
-+ default:
-+ debug_tree((tree)type);
-+ gcc_unreachable();
-+ }
-+
-+ type_max = TYPE_MAX_VALUE(new_type);
-+ if (!tree_int_cst_lt(type_max, rhs))
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool is_gt_zero(const_tree rhs)
-+{
-+ const_tree type = TREE_TYPE(rhs);
-+
-+ if (TYPE_UNSIGNED(type))
-+ return true;
-+
-+ if (!tree_int_cst_lt(rhs, integer_zero_node))
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
-+{
-+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
-+ return false;
-+ if (!is_gimple_constant(rhs))
-+ return false;
-+
-+ // If the const is between 0 and the max value of the signed type of the same bitsize then there is no intentional overflow
-+// if (is_lt_signed_type_max(rhs) && is_gt_zero(rhs))
-+// return false;
-+
-+ return true;
-+}
-+
-+static tree get_def_stmt_rhs(const_tree var)
-+{
-+ tree rhs1, def_stmt_rhs1;
-+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
-+
-+ def_stmt = get_def_stmt(var);
-+ if (!gimple_assign_cast_p(def_stmt))
-+ return NULL_TREE;
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && get_stmt_flag(def_stmt) == MY_STMT && gimple_assign_cast_p(def_stmt));
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt))
-+ return rhs1;
-+
-+ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
-+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
-+
-+ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
-+ case GIMPLE_CALL:
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_PHI:
-+ return def_stmt_rhs1;
-+ case GIMPLE_ASSIGN:
-+ return rhs1;
-+ default:
-+ debug_gimple_stmt(def_stmt_rhs1_def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
-+{
-+ tree new_rhs, orig_rhs;
-+ void (*gimple_assign_set_rhs)(gimple, tree);
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+ tree lhs = gimple_assign_lhs(stmt);
-+
-+ if (!check_overflow)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (change_rhs == NULL_TREE)
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+
-+ if (new_rhs2 == NULL_TREE) {
-+ orig_rhs = rhs1;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
-+ } else {
-+ orig_rhs = rhs2;
-+ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
-+ }
-+
-+ check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
-+
-+ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
-+ gimple_assign_set_rhs(stmt, new_rhs);
-+ update_stmt(stmt);
-+
-+ return create_assign(visited, stmt, lhs, AFTER_STMT);
-+}
-+
-+static bool is_subtraction_special(const_gimple stmt)
-+{
-+ gimple rhs1_def_stmt, rhs2_def_stmt;
-+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
-+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
-+ const_tree rhs1 = gimple_assign_rhs1(stmt);
-+ const_tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
-+ return false;
-+
-+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ rhs2_def_stmt = get_def_stmt(rhs2);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
-+ return false;
-+
-+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
-+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
-+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
-+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
-+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
-+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
-+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
-+ rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
-+ if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
-+ return false;
-+ if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
-+ return false;
-+
-+ set_stmt_flag(rhs1_def_stmt, NO_CAST_CHECK);
-+ set_stmt_flag(rhs2_def_stmt, NO_CAST_CHECK);
-+ return true;
-+}
-+
-+static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs)
-+{
-+ tree new_rhs1, new_rhs2;
-+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ gimple assign, stmt = get_def_stmt(lhs);
-+ tree rhs1 = gimple_assign_rhs1(stmt);
-+ tree rhs2 = gimple_assign_rhs2(stmt);
-+
-+ if (!is_subtraction_special(stmt))
-+ return NULL_TREE;
-+
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
-+
-+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
-+ return NULL_TREE;
-+
-+ if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
-+ new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
-+ new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
-+ }
-+
-+ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
-+ new_lhs = gimple_assign_lhs(assign);
-+ check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
-+
-+ return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return false;
-+
-+ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
-+ return false;
-+
-+ def_stmt = get_def_stmt(rhs);
-+ if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
-+ return false;
-+
-+ return true;
-+}
-+
-+static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ tree rhs1, rhs2, new_lhs;
-+ gimple def_stmt = get_def_stmt(lhs);
-+ tree new_rhs1 = NULL_TREE;
-+ tree new_rhs2 = NULL_TREE;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ /* no DImode/TImode division in the 32/64 bit kernel */
-+ switch (gimple_assign_rhs_code(def_stmt)) {
-+ case RDIV_EXPR:
-+ case TRUNC_DIV_EXPR:
-+ case CEIL_DIV_EXPR:
-+ case FLOOR_DIV_EXPR:
-+ case ROUND_DIV_EXPR:
-+ case TRUNC_MOD_EXPR:
-+ case CEIL_MOD_EXPR:
-+ case FLOOR_MOD_EXPR:
-+ case ROUND_MOD_EXPR:
-+ case EXACT_DIV_EXPR:
-+ case POINTER_PLUS_EXPR:
-+ case BIT_AND_EXPR:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ default:
-+ break;
-+ }
-+
-+ new_lhs = handle_integer_truncation(visited, caller_node, lhs);
-+ if (new_lhs != NULL_TREE)
-+ return new_lhs;
-+
-+ if (TREE_CODE(rhs1) == SSA_NAME)
-+ new_rhs1 = expand(visited, caller_node, rhs1);
-+ if (TREE_CODE(rhs2) == SSA_NAME)
-+ new_rhs2 = expand(visited, caller_node, rhs2);
-+
-+ if (is_a_neg_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_neg_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
-+
-+
-+ if (is_a_constant_overflow(def_stmt, rhs2))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
-+ if (is_a_constant_overflow(def_stmt, rhs1))
-+ return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
-+
-+ // the const is between 0 and (signed) MAX
-+ if (is_gimple_constant(rhs1))
-+ new_rhs1 = create_assign(visited, def_stmt, rhs1, BEFORE_STMT);
-+ if (is_gimple_constant(rhs2))
-+ new_rhs2 = create_assign(visited, def_stmt, rhs2, BEFORE_STMT);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4006
-+static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
-+{
-+ if (is_gimple_constant(rhs))
-+ return cast_a_tree(size_overflow_type, rhs);
-+ if (TREE_CODE(rhs) != SSA_NAME)
-+ return NULL_TREE;
-+ return expand(visited, caller_node, rhs);
-+}
-+
-+static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+ rhs3 = gimple_assign_rhs3(def_stmt);
-+ new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
-+ new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
-+ new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
-+
-+ return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
-+}
-+#endif
-+
-+static tree get_size_overflow_type(gimple stmt, const_tree node)
-+{
-+ const_tree type;
-+ tree new_type;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ type = TREE_TYPE(node);
-+
-+ if (get_stmt_flag(stmt) == MY_STMT)
-+ return TREE_TYPE(node);
-+
-+ switch (TYPE_MODE(type)) {
-+ case QImode:
-+ new_type = intHI_type_node;
-+ break;
-+ case HImode:
-+ new_type = intSI_type_node;
-+ break;
-+ case SImode:
-+ new_type = intDI_type_node;
-+ break;
-+ case DImode:
-+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+ new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
-+ else
-+ new_type = intTI_type_node;
-+ break;
-+ case TImode:
-+ gcc_assert(!TYPE_UNSIGNED(type));
-+ new_type = intTI_type_node;
-+ break;
-+ default:
-+ debug_tree((tree)node);
-+ error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
-+ gcc_unreachable();
-+ }
-+
-+ if (TYPE_QUALS(type) != 0)
-+ return build_qualified_type(new_type, TYPE_QUALS(type));
-+ return new_type;
-+}
-+
-+static tree expand_visited(gimple def_stmt)
-+{
-+ const_gimple next_stmt;
-+ gimple_stmt_iterator gsi;
-+ enum gimple_code code = gimple_code(def_stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ return NULL_TREE;
-+
-+ gsi = gsi_for_stmt(def_stmt);
-+ gsi_next(&gsi);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
-+ return NULL_TREE;
-+ gcc_assert(!gsi_end_p(gsi));
-+ next_stmt = gsi_stmt(gsi);
-+
-+ if (gimple_code(def_stmt) == GIMPLE_PHI && get_stmt_flag((gimple)next_stmt) != MY_STMT)
-+ return NULL_TREE;
-+ gcc_assert(get_stmt_flag((gimple)next_stmt) == MY_STMT);
-+
-+ return get_lhs(next_stmt);
-+}
-+
-+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+
-+ if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
-+ return NULL_TREE;
-+
-+ if (get_stmt_flag(def_stmt) == MY_STMT)
-+ return lhs;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return expand_visited(def_stmt);
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_PHI:
-+ return handle_phi(visited, caller_node, lhs);
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASM:
-+ return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return handle_unary_ops(visited, caller_node, def_stmt);
-+ case 3:
-+ return handle_binary_ops(visited, caller_node, lhs);
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 4:
-+ return handle_ternary_ops(visited, caller_node, lhs);
-+#endif
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
-+{
-+ const_gimple assign;
-+ tree orig_type = TREE_TYPE(orig_node);
-+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
-+
-+ assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ return gimple_assign_lhs(assign);
-+}
-+
-+static void change_orig_node(struct interesting_node *cur_node, tree new_node)
-+{
-+ void (*set_rhs)(gimple, tree);
-+ gimple stmt = cur_node->first_stmt;
-+ const_tree orig_node = cur_node->node;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_RETURN:
-+ gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_CALL:
-+ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ case GIMPLE_ASSIGN:
-+ switch (cur_node->num) {
-+ case 1:
-+ set_rhs = &gimple_assign_set_rhs1;
-+ break;
-+ case 2:
-+ set_rhs = &gimple_assign_set_rhs2;
-+ break;
-+#if BUILDING_GCC_VERSION >= 4006
-+ case 3:
-+ set_rhs = &gimple_assign_set_rhs3;
-+ break;
-+#endif
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ }
-+
-+ update_stmt(stmt);
++ return find_arg_number_tree(node, orig_fndecl);
+}
+
-+static unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
++unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl)
+{
+ const struct size_overflow_hash *hash;
+ unsigned int new_argnum;
@@ -120770,81 +123913,6 @@ index 0000000..948ec25
+ return CANNOT_FIND_ARG;
+}
+
-+// Don't want to duplicate entries in next_cgraph_node
-+static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
-+{
-+ const_tree new_callee_fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ if (fndecl == RET_CHECK)
-+ new_callee_fndecl = NODE_DECL(node);
-+ else
-+ new_callee_fndecl = fndecl;
-+
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(NODE_DECL(cur_node->current_function), NODE_DECL(node), 0))
-+ continue;
-+ if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
-+ continue;
-+ if (num == cur_node->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Add a next_cgraph_node into the list for handle_function().
-+ * handle_function() iterates over all the next cgraph nodes and
-+ * starts the overflow check insertion process.
-+ */
-+static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
-+{
-+ struct next_cgraph_node *new_node;
-+
-+ if (is_in_next_cgraph_node(head, node, fndecl, num))
-+ return head;
-+
-+ new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
-+ new_node->current_function = node;
-+ new_node->next = NULL;
-+ new_node->num = num;
-+ if (fndecl == RET_CHECK)
-+ new_node->callee_fndecl = NODE_DECL(node);
-+ else
-+ new_node->callee_fndecl = fndecl;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
-+{
-+ struct cgraph_edge *e;
-+
-+ if (num == 0)
-+ return create_new_next_cgraph_node(head, node, RET_CHECK, num);
-+
-+ for (e = node->callers; e; e = e->next_caller) {
-+ tree fndecl = gimple_call_fndecl(e->call_stmt);
-+
-+ gcc_assert(fndecl != NULL_TREE);
-+ head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
-+ }
-+
-+ return head;
-+}
-+
-+static bool is_a_return_check(const_tree node)
-+{
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ return true;
-+
-+ gcc_assert(TREE_CODE(node) == PARM_DECL);
-+ return false;
-+}
-+
+static bool is_in_hash_table(const_tree fndecl, unsigned int num)
+{
+ const struct size_overflow_hash *hash;
@@ -120855,37 +123923,10 @@ index 0000000..948ec25
+ return false;
+}
+
-+struct missing_functions {
-+ struct missing_functions *next;
-+ const_tree node;
-+ tree fndecl;
-+};
-+
-+static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
-+{
-+ struct missing_functions *new_function;
-+
-+ new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
-+ new_function->node = node;
-+ new_function->next = NULL;
-+
-+ if (TREE_CODE(node) == FUNCTION_DECL)
-+ new_function->fndecl = node;
-+ else
-+ new_function->fndecl = current_function_decl;
-+ gcc_assert(new_function->fndecl);
-+
-+ if (!missing_fn_head)
-+ return new_function;
-+
-+ new_function->next = missing_fn_head;
-+ return new_function;
-+}
-+
+/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
+ * If the function is missing everywhere then print the missing message into stderr.
+ */
-+static bool is_missing_function(const_tree orig_fndecl, unsigned int num)
++bool is_missing_function(const_tree orig_fndecl, unsigned int num)
+{
+ switch (DECL_FUNCTION_CODE(orig_fndecl)) {
+#if BUILDING_GCC_VERSION >= 4008
@@ -120913,2189 +123954,6 @@ index 0000000..948ec25
+ return true;
+}
+
-+// Get the argnum of a function decl, if node is a return then the argnum is 0
-+static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
-+{
-+ if (is_a_return_check(node))
-+ return 0;
-+ else
-+ return find_arg_number_tree(node, orig_fndecl);
-+}
-+
-+/* If the function is missing from the hash table and it is a static function
-+ * then create a next_cgraph_node from it for handle_function()
-+ */
-+static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
-+{
-+ unsigned int num;
-+ const_tree orig_fndecl;
-+ struct cgraph_node *next_node = NULL;
-+
-+ orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
-+
-+ num = get_function_num(missing_fn_head->node, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return cnodes;
-+
-+ if (!is_missing_function(orig_fndecl, num))
-+ return cnodes;
-+
-+ next_node = cgraph_get_node(missing_fn_head->fndecl);
-+ if (next_node && next_node->local.local)
-+ cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
-+ return cnodes;
-+}
-+
-+/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
-+ * into the next_cgraph_node list. They will be the next interesting returns or callees.
-+ */
-+static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree node;
-+ struct missing_functions *cur, *missing_fn_head = NULL;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
-+#endif
-+ switch (TREE_CODE(node)) {
-+ case PARM_DECL:
-+ if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
-+ break;
-+ case FUNCTION_DECL:
-+ missing_fn_head = create_new_missing_function(missing_fn_head, node);
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ while (missing_fn_head) {
-+ cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
-+
-+ cur = missing_fn_head->next;
-+ free(missing_fn_head);
-+ missing_fn_head = cur;
-+ }
-+
-+ return cnodes;
-+}
-+
-+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ const_tree arg = gimple_phi_arg_def(phi, i);
-+
-+ set_conditions(visited, interesting_conditions, arg);
-+ }
-+}
-+
-+enum conditions {
-+ FROM_CONST, NOT_UNARY, CAST
-+};
-+
-+// Search for constants, cast assignments and binary/ternary assignments
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
-+{
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ if (is_gimple_constant(lhs)) {
-+ interesting_conditions[FROM_CONST] = true;
-+ return;
-+ }
-+
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ case GIMPLE_CALL:
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_PHI:
-+ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
-+ case GIMPLE_ASSIGN:
-+ if (gimple_num_ops(def_stmt) == 2) {
-+ const_tree rhs = gimple_assign_rhs1(def_stmt);
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ interesting_conditions[CAST] = true;
-+
-+ return set_conditions(visited, interesting_conditions, rhs);
-+ } else {
-+ interesting_conditions[NOT_UNARY] = true;
-+ return;
-+ }
-+ default:
-+ debug_gimple_stmt(def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// determine whether duplication will be necessary or not.
-+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
-+{
-+ struct pointer_set_t *visited;
-+
-+ if (gimple_assign_cast_p(cur_node->first_stmt))
-+ interesting_conditions[CAST] = true;
-+ else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
-+ interesting_conditions[NOT_UNARY] = true;
-+
-+ visited = pointer_set_create();
-+ set_conditions(visited, interesting_conditions, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
-+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
-+{
-+ gimple assign;
-+ gimple_stmt_iterator gsi;
-+
-+ // already removed
-+ if (gimple_bb(asm_stmt) == NULL)
-+ return;
-+ gsi = gsi_for_stmt(asm_stmt);
-+
-+ assign = gimple_build_assign(lhs, rhs);
-+ gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
-+ SSA_NAME_DEF_STMT(lhs) = assign;
-+
-+ gsi_remove(&gsi, true);
-+}
-+
-+// Get the field decl of a component ref for intentional_overflow checking
-+static const_tree search_field_decl(const_tree comp_ref)
-+{
-+ const_tree field = NULL_TREE;
-+ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
-+
-+ for (i = 0; i < len; i++) {
-+ field = TREE_OPERAND(comp_ref, i);
-+ if (TREE_CODE(field) == FIELD_DECL)
-+ break;
-+ }
-+ gcc_assert(TREE_CODE(field) == FIELD_DECL);
-+ return field;
-+}
-+
-+/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
-+ * stmt is a return otherwise it is the callee function.
-+ */
-+static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+
-+ if (argnum == 0)
-+ fndecl = current_function_decl;
-+ else
-+ fndecl = gimple_call_fndecl(stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ return DECL_ORIGIN(fndecl);
-+}
-+
-+/* Get the param of the intentional_overflow attribute.
-+ * * 0: MARK_NOT_INTENTIONAL
-+ * * 1..MAX_PARAM: MARK_YES
-+ * * -1: MARK_TURN_OFF
-+ */
-+static tree get_attribute_param(const_tree decl)
-+{
-+ const_tree attr;
-+
-+ if (decl == NULL_TREE)
-+ return NULL_TREE;
-+
-+ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
-+ if (!attr || !TREE_VALUE(attr))
-+ return NULL_TREE;
-+
-+ return TREE_VALUE(attr);
-+}
-+
-+// MARK_TURN_OFF
-+static bool is_turn_off_intentional_attr(const_tree decl)
-+{
-+ const_tree param_head;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
-+ return true;
-+ return false;
-+}
-+
-+// MARK_NOT_INTENTIONAL
-+static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ const_tree param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ if (param_head == NULL_TREE)
-+ return false;
-+
-+ if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
-+ return true;
-+ return false;
-+}
-+
-+// MARK_YES
-+static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
-+{
-+ tree param, param_head;
-+
-+ if (argnum == 0)
-+ return false;
-+
-+ param_head = get_attribute_param(decl);
-+ for (param = param_head; param; param = TREE_CHAIN(param))
-+ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
-+ return true;
-+ return false;
-+}
-+
-+static const char *get_asm_string(const_gimple stmt)
-+{
-+ if (!stmt)
-+ return NULL;
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return NULL;
-+
-+ return gimple_asm_string(stmt);
-+}
-+
-+static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
-+}
-+
-+static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
-+}
-+
-+static bool is_size_overflow_asm(const_gimple stmt)
-+{
-+ const char *str;
-+
-+ str = get_asm_string(stmt);
-+ if (!str)
-+ return false;
-+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
-+}
-+
-+static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
-+{
-+ location_t loc;
-+
-+ if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
-+ return;
-+
-+ if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
-+ return;
-+
-+ loc = DECL_SOURCE_LOCATION(decl);
-+ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", DECL_NAME_POINTER(decl), argnum);
-+}
-+
-+/* Get the type of the intentional_overflow attribute of a node
-+ * * MARK_TURN_OFF
-+ * * MARK_YES
-+ * * MARK_NO
-+ * * MARK_NOT_INTENTIONAL
-+ */
-+static enum mark get_intentional_attr_type(const_tree node)
-+{
-+ const_tree cur_decl;
-+
-+ if (node == NULL_TREE)
-+ return MARK_NO;
-+
-+ switch (TREE_CODE(node)) {
-+ case COMPONENT_REF:
-+ cur_decl = search_field_decl(node);
-+ if (is_turn_off_intentional_attr(cur_decl))
-+ return MARK_TURN_OFF;
-+ if (is_end_intentional_intentional_attr(cur_decl, 1))
-+ return MARK_YES;
-+ break;
-+ case PARM_DECL: {
-+ unsigned int argnum;
-+
-+ cur_decl = DECL_ORIGIN(current_function_decl);
-+ argnum = find_arg_number_tree(node, cur_decl);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return MARK_NO;
-+ if (is_yes_intentional_attr(cur_decl, argnum))
-+ return MARK_YES;
-+ if (is_end_intentional_intentional_attr(cur_decl, argnum))
-+ return MARK_NOT_INTENTIONAL;
-+ break;
-+ }
-+ case FUNCTION_DECL:
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
-+ return MARK_TURN_OFF;
-+ break;
-+ default:
-+ break;
-+ }
-+ return MARK_NO;
-+}
-+
-+// Search for the intentional_overflow attribute on the last nodes
-+static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
-+{
-+ unsigned int i;
-+ tree last_node;
-+ enum mark mark = MARK_NO;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
-+#endif
-+ mark = get_intentional_attr_type(last_node);
-+ if (mark != MARK_NO)
-+ break;
-+ }
-+ return mark;
-+}
-+
-+/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
-+ * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
-+ */
-+static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
-+{
-+ if (!cur_node->intentional_mark_from_gimple)
-+ return false;
-+
-+ if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
-+ cur_node->intentional_attr_cur_fndecl = MARK_YES;
-+ else
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+
-+ // skip param decls
-+ if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
-+ return true;
-+ return true;
-+}
-+
-+/* Search intentional_overflow attribute on caller and on callee too.
-+ * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
-+ * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
-+ * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
-+*/
-+static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
-+{
-+ const_tree fndecl;
-+
-+ if (is_intentional_attribute_from_gimple(cur_node))
-+ return;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
-+ cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
-+ return;
-+ }
-+
-+ if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
-+ return;
-+
-+ fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
-+ if (is_turn_off_intentional_attr(fndecl)) {
-+ cur_node->intentional_attr_decl = MARK_TURN_OFF;
-+ return;
-+ }
-+
-+ if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, cur_node->num))
-+ cur_node->intentional_attr_decl = MARK_YES;
-+
-+ cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
-+ print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
-+}
-+
-+// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
-+static bool skip_asm(const_tree arg)
-+{
-+ gimple def_stmt = get_def_stmt(arg);
-+
-+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
-+ return false;
-+
-+ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
-+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
-+}
-+
-+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ walk_use_def(visited, cur_node, arg);
-+ }
-+}
-+
-+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ gimple def_stmt = get_def_stmt(lhs);
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ walk_use_def(visited, cur_node, rhs1);
-+ walk_use_def(visited, cur_node, rhs2);
-+}
-+
-+static void insert_last_node(struct interesting_node *cur_node, tree node)
-+{
-+ unsigned int i;
-+ tree element;
-+ enum tree_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+
-+ if (is_gimple_constant(node))
-+ return;
-+
-+ code = TREE_CODE(node);
-+ if (code == VAR_DECL) {
-+ node = DECL_ORIGIN(node);
-+ code = TREE_CODE(node);
-+ }
-+
-+ if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
-+ return;
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
-+#else
-+ FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
-+#endif
-+ if (operand_equal_p(node, element, 0))
-+ return;
-+ }
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
-+ VEC_safe_push(tree, gc, cur_node->last_nodes, node);
-+#else
-+ gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
-+ vec_safe_push(cur_node->last_nodes, node);
-+#endif
-+}
-+
-+// a size_overflow asm stmt in the control flow doesn't stop the recursion
-+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
-+{
-+ if (!is_size_overflow_asm(stmt))
-+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+}
-+
-+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
-+ * and component refs (for checking the intentional_overflow attribute).
-+ */
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME) {
-+ insert_last_node(cur_node, lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ insert_last_node(cur_node, fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return walk_use_def_phi(visited, cur_node, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return walk_use_def_binary(visited, cur_node, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
-+static void set_last_nodes(struct interesting_node *cur_node)
-+{
-+ struct pointer_set_t *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def(visited, cur_node, cur_node->node);
-+ pointer_set_destroy(visited);
-+}
-+
-+enum precond {
-+ NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
-+};
-+
-+/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
-+ * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
-+ * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
-+ * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
-+ */
-+static enum precond check_preconditions(struct interesting_node *cur_node)
-+{
-+ bool interesting_conditions[3] = {false, false, false};
-+
-+ set_last_nodes(cur_node);
-+
-+ check_intentional_attribute_ipa(cur_node);
-+ if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ search_interesting_conditions(cur_node, interesting_conditions);
-+
-+ // error code
-+ if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
-+ return NO_ATTRIBUTE_SEARCH;
-+
-+ // unnecessary overflow check
-+ if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
-+ return NO_CHECK_INSERT;
-+
-+ if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
-+ return NO_CHECK_INSERT;
-+
-+ return NONE;
-+}
-+
-+/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
-+ * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
-+ * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
-+ */
-+static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
-+{
-+ enum precond ret;
-+ struct pointer_set_t *visited;
-+ tree new_node, orig_node = cur_node->node;
-+
-+ ret = check_preconditions(cur_node);
-+ if (ret == NO_ATTRIBUTE_SEARCH)
-+ return cnodes;
-+
-+ cnodes = search_overflow_attribute(cnodes, cur_node);
-+
-+ if (ret == NO_CHECK_INSERT)
-+ return cnodes;
-+
-+ visited = pointer_set_create();
-+ new_node = expand(visited, caller_node, orig_node);
-+ pointer_set_destroy(visited);
-+
-+ if (new_node == NULL_TREE)
-+ return cnodes;
-+
-+ change_orig_node(cur_node, new_node);
-+ check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
-+
-+ return cnodes;
-+}
-+
-+// Check visited interesting nodes.
-+static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
-+{
-+ struct interesting_node *cur;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (!operand_equal_p(node, cur->node, 0))
-+ continue;
-+ if (num != cur->num)
-+ continue;
-+ if (first_stmt == cur->first_stmt)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
-+ first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
-+ last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
-+ the intentional_overflow attribute check. They are collected by set_last_nodes().
-+ num: arg count of a call stmt or 0 when it is a ret
-+ node: the recursion starts from here, it is a call arg or a return value
-+ fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
-+ intentional_attr_decl: intentional_overflow attribute of the callee function
-+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
-+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
-+ */
-+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
-+{
-+ struct interesting_node *new_node;
-+ tree fndecl;
-+ enum gimple_code code;
-+
-+ gcc_assert(node != NULL_TREE);
-+ code = gimple_code(first_stmt);
-+ gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
-+
-+ if (num == CANNOT_FIND_ARG)
-+ return head;
-+
-+ if (skip_types(node))
-+ return head;
-+
-+ if (skip_asm(node))
-+ return head;
-+
-+ if (is_gimple_call(first_stmt))
-+ fndecl = gimple_call_fndecl(first_stmt);
-+ else
-+ fndecl = current_function_decl;
-+
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ if (is_in_interesting_node(head, first_stmt, node, num))
-+ return head;
-+
-+ new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
-+
-+ new_node->next = NULL;
-+ new_node->first_stmt = first_stmt;
-+#if BUILDING_GCC_VERSION <= 4007
-+ new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
-+#else
-+ vec_alloc(new_node->last_nodes, VEC_LEN);
-+#endif
-+ new_node->num = num;
-+ new_node->node = node;
-+ new_node->fndecl = fndecl;
-+ new_node->intentional_attr_decl = MARK_NO;
-+ new_node->intentional_attr_cur_fndecl = MARK_NO;
-+ new_node->intentional_mark_from_gimple = asm_stmt;
-+
-+ if (!head)
-+ return new_node;
-+
-+ new_node->next = head;
-+ return new_node;
-+}
-+
-+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
-+{
-+ struct next_cgraph_node *cur_node;
-+ tree ret = gimple_return_retval(stmt);
-+
-+ if (ret == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
-+ continue;
-+ if (cur_node->num == 0)
-+ head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
-+ * If the call stmt is in the next cgraph node list then it's an interesting call.
-+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
-+{
-+ unsigned int argnum;
-+ tree arg;
-+ const_tree fndecl;
-+ struct next_cgraph_node *cur_node;
-+
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return head;
-+
-+ for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
-+ if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
-+ continue;
-+ argnum = get_correct_arg_count(cur_node->num, fndecl);
-+ gcc_assert(argnum != CANNOT_FIND_ARG);
-+ if (argnum == 0)
-+ continue;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
-+ }
-+
-+ return head;
-+}
-+
-+static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
-+{
-+ if (!operand_equal_p(orig_node, node, 0))
-+ return WRONG_NODE;
-+ if (skip_types(node))
-+ return WRONG_NODE;
-+ return ret_count;
-+}
-+
-+// Get the index of the rhs node in an assignment
-+static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
-+{
-+ const_tree rhs1, rhs2;
-+ unsigned int ret;
-+
-+ gcc_assert(stmt);
-+ gcc_assert(is_gimple_assign(stmt));
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ gcc_assert(rhs1 != NULL_TREE);
-+
-+ switch (gimple_num_ops(stmt)) {
-+ case 2:
-+ return check_ops(node, rhs1, 1);
-+ case 3:
-+ ret = check_ops(node, rhs1, 1);
-+ if (ret != WRONG_NODE)
-+ return ret;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ gcc_assert(rhs2 != NULL_TREE);
-+ return check_ops(node, rhs2, 2);
-+ default:
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
-+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
-+{
-+ unsigned int i;
-+
-+ if (gimple_call_fndecl(stmt) == NULL_TREE)
-+ return CANNOT_FIND_ARG;
-+
-+ for (i = 0; i < gimple_call_num_args(stmt); i++) {
-+ tree node;
-+
-+ node = gimple_call_arg(stmt, i);
-+ if (!operand_equal_p(arg, node, 0))
-+ continue;
-+ if (!skip_types(node))
-+ return i + 1;
-+ }
-+
-+ return CANNOT_FIND_ARG;
-+}
-+
-+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
-+ * any of return, call or assignment stmts (because of inlining).
-+ */
-+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
-+{
-+ use_operand_p use_p;
-+ imm_use_iterator imm_iter;
-+ unsigned int argnum;
-+
-+ gcc_assert(TREE_CODE(node) == SSA_NAME);
-+
-+ if (pointer_set_insert(visited, node))
-+ return head;
-+
-+ FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
-+ gimple stmt = USE_STMT(use_p);
-+
-+ if (stmt == NULL)
-+ return head;
-+ if (is_gimple_debug(stmt))
-+ continue;
-+
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_CALL:
-+ argnum = find_arg_number_gimple(node, stmt);
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_RETURN:
-+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
-+ break;
-+ case GIMPLE_ASSIGN:
-+ argnum = get_assign_ops_count(stmt, node);
-+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
-+ break;
-+ case GIMPLE_PHI: {
-+ tree result = gimple_phi_result(stmt);
-+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
-+ break;
-+ }
-+ case GIMPLE_ASM:
-+ if (gimple_asm_noutputs(stmt) != 0)
-+ break;
-+ if (!is_size_overflow_asm(stmt))
-+ break;
-+ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
-+ break;
-+ case GIMPLE_COND:
-+ case GIMPLE_SWITCH:
-+ break;
-+ default:
-+ debug_gimple_stmt(stmt);
-+ gcc_unreachable();
-+ break;
-+ }
-+ }
-+ return head;
-+}
-+
-+static void remove_size_overflow_asm(gimple stmt)
-+{
-+ gimple_stmt_iterator gsi;
-+ tree input, output;
-+
-+ if (!is_size_overflow_asm(stmt))
-+ return;
-+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ gsi = gsi_for_stmt(stmt);
-+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
-+ gsi_remove(&gsi, true);
-+ return;
-+ }
-+
-+ input = gimple_asm_input_op(stmt, 0);
-+ output = gimple_asm_output_op(stmt, 0);
-+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
-+}
-+
-+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
-+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
-+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
-+ */
-+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
-+{
-+ const_tree output;
-+ struct pointer_set_t *visited;
-+ gimple intentional_asm = NOT_INTENTIONAL_ASM;
-+
-+ if (!is_size_overflow_asm(stmt))
-+ return head;
-+
-+ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
-+ intentional_asm = stmt;
-+
-+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
-+
-+ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
-+ return head;
-+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ const_tree input;
-+
-+ if (!is_size_overflow_intentional_asm_turn_off(stmt))
-+ return head;
-+
-+ input = gimple_asm_input_op(stmt, 0);
-+ remove_size_overflow_asm(stmt);
-+ if (is_gimple_constant(TREE_VALUE(input)))
-+ return head;
-+ visited = pointer_set_create();
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+ }
-+
-+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
-+ remove_size_overflow_asm(stmt);
-+
-+ visited = pointer_set_create();
-+ output = gimple_asm_output_op(stmt, 0);
-+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
-+ pointer_set_destroy(visited);
-+ return head;
-+}
-+
-+/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
-+ * or a call stmt or a return stmt and store them in the interesting_node list
-+ */
-+static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
-+{
-+ basic_block bb;
-+ struct interesting_node *head = NULL;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ enum gimple_code code;
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ code = gimple_code(stmt);
-+
-+ if (code == GIMPLE_ASM)
-+ head = handle_stmt_by_size_overflow_asm(stmt, head);
-+
-+ if (!next_node)
-+ continue;
-+ if (code == GIMPLE_CALL)
-+ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
-+ if (code == GIMPLE_RETURN)
-+ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
-+ }
-+ }
-+ return head;
-+}
-+
-+static void set_current_function_decl(tree fndecl)
-+{
-+ gcc_assert(fndecl != NULL_TREE);
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(fndecl));
-+ calculate_dominance_info(CDI_DOMINATORS);
-+ current_function_decl = fndecl;
-+}
-+
-+static void unset_current_function_decl(void)
-+{
-+ free_dominance_info(CDI_DOMINATORS);
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+}
-+
-+static void free_interesting_node(struct interesting_node *head)
-+{
-+ struct interesting_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_free(tree, gc, head->last_nodes);
-+#else
-+ vec_free(head->last_nodes);
-+#endif
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node)
-+{
-+ struct visited *new_visited;
-+
-+ new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
-+ new_visited->fndecl = cur_node->fndecl;
-+ new_visited->num = cur_node->num;
-+ new_visited->next = NULL;
-+
-+ if (!head)
-+ return new_visited;
-+
-+ new_visited->next = head;
-+ return new_visited;
-+}
-+
-+/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
-+ * it is a visited function.
-+ */
-+static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
-+{
-+ struct visited *cur;
-+
-+ if (!head)
-+ return false;
-+
-+ if (get_stmt_flag(cur_node->first_stmt) != VISITED_STMT)
-+ return false;
-+
-+ for (cur = head; cur; cur = cur->next) {
-+ if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
-+ continue;
-+ if (cur_node->num == cur->num)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static void free_next_cgraph_node(struct next_cgraph_node *head)
-+{
-+ struct next_cgraph_node *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+static void remove_all_size_overflow_asm(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ remove_size_overflow_asm(gsi_stmt(si));
-+ }
-+}
-+
-+/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
-+ * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
-+ * the newly collected interesting functions (they are interesting if there is control flow between
-+ * the interesting stmts and them).
-+ */
-+static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited)
-+{
-+ struct interesting_node *head, *cur_node;
-+ struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
-+
-+ set_current_function_decl(NODE_DECL(node));
-+ call_count = 0;
-+
-+ head = collect_interesting_stmts(next_node);
-+
-+ for (cur_node = head; cur_node; cur_node = cur_node->next) {
-+ if (is_visited_function(visited, cur_node))
-+ continue;
-+ cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
-+ set_stmt_flag(cur_node->first_stmt, VISITED_STMT);
-+ visited = insert_visited_function(visited, cur_node);
-+ }
-+
-+ free_interesting_node(head);
-+ remove_all_size_overflow_asm();
-+ unset_current_function_decl();
-+
-+ for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
-+ visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
-+
-+ free_next_cgraph_node(cnodes_head);
-+ return visited;
-+}
-+
-+static void free_visited(struct visited *head)
-+{
-+ struct visited *cur;
-+
-+ while (head) {
-+ cur = head->next;
-+ free(head);
-+ head = cur;
-+ }
-+}
-+
-+// erase the local flag
-+static void set_plf_false(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ set_stmt_flag(gsi_stmt(si), NO_FLAGS);
-+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
-+ set_stmt_flag(gsi_stmt(si), NO_FLAGS);
-+ }
-+}
-+
-+// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
-+static unsigned int search_function(void)
-+{
-+ struct cgraph_node *node;
-+ struct visited *visited = NULL;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ set_current_function_decl(NODE_DECL(node));
-+ set_plf_false();
-+ unset_current_function_decl();
-+ }
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ gcc_assert(cgraph_function_flags_ready);
-+#if BUILDING_GCC_VERSION <= 4007
-+ gcc_assert(node->reachable);
-+#endif
-+
-+ visited = handle_function(node, NULL, visited);
-+ }
-+
-+ free_visited(visited);
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data ipa_pass_data = {
-+#else
-+static struct ipa_opt_pass_d ipa_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "size_overflow",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_function,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class ipa_pass : public ipa_opt_pass_d {
-+public:
-+ ipa_pass() : ipa_opt_pass_d(ipa_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+ unsigned int execute() { return search_function(); }
-+};
-+}
-+
-+static opt_pass *make_ipa_pass(void)
-+{
-+ return new ipa_pass();
-+}
-+#else
-+static struct opt_pass *make_ipa_pass(void)
-+{
-+ return &ipa_pass.pass;
-+}
-+#endif
-+
-+// data for the size_overflow asm stmt
-+struct asm_data {
-+ gimple def_stmt;
-+ tree input;
-+ tree output;
-+};
-+
-+#if BUILDING_GCC_VERSION <= 4007
-+static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
-+#else
-+static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
-+#endif
-+{
-+ tree list;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *vec_list = NULL;
-+#else
-+ vec<tree, va_gc> *vec_list = NULL;
-+#endif
-+
-+ list = build_tree_list(NULL_TREE, string);
-+ list = chainon(NULL_TREE, build_tree_list(list, io));
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC_safe_push(tree, gc, vec_list, list);
-+#else
-+ vec_safe_push(vec_list, list);
-+#endif
-+ return vec_list;
-+}
-+
-+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
-+{
-+ gimple asm_stmt;
-+ gimple_stmt_iterator gsi;
-+#if BUILDING_GCC_VERSION <= 4007
-+ VEC(tree, gc) *input, *output = NULL;
-+#else
-+ vec<tree, va_gc> *input, *output = NULL;
-+#endif
-+
-+ input = create_asm_io_list(str_input, asm_data->input);
-+
-+ if (asm_data->output)
-+ output = create_asm_io_list(str_output, asm_data->output);
-+
-+ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
-+ gsi = gsi_for_stmt(asm_data->def_stmt);
-+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
-+
-+ if (asm_data->output)
-+ SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
-+}
-+
-+static void replace_call_lhs(const struct asm_data *asm_data)
-+{
-+ gimple_set_lhs(asm_data->def_stmt, asm_data->input);
-+ update_stmt(asm_data->def_stmt);
-+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
-+}
-+
-+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
-+{
-+ enum mark cur_fndecl_attr;
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ }
-+ return MARK_NO;
-+}
-+
-+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ enum mark cur_fndecl_attr;
-+ const_tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(lhs);
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ cur_fndecl_attr = search_intentional(visited, rhs1);
-+ if (cur_fndecl_attr != MARK_NO)
-+ return cur_fndecl_attr;
-+ return search_intentional(visited, rhs2);
-+}
-+
-+// Look up the intentional_overflow attribute on the caller and the callee functions.
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) != SSA_NAME)
-+ return get_intentional_attr_type(lhs);
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return MARK_NO;
-+
-+ if (pointer_set_contains(visited, def_stmt))
-+ return MARK_NO;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_intentional(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
-+ return MARK_TURN_OFF;
-+ return MARK_NO;
-+ case GIMPLE_CALL:
-+ return MARK_NO;
-+ case GIMPLE_PHI:
-+ return search_intentional_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return search_intentional_binary(visited, lhs);
-+ }
-+ case GIMPLE_RETURN:
-+ return MARK_NO;
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
-+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
-+{
-+ const_tree fndecl;
-+ struct pointer_set_t *visited;
-+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
-+
-+ fndecl = get_interesting_orig_fndecl(stmt, argnum);
-+ if (is_end_intentional_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_NOT_INTENTIONAL;
-+ else if (is_yes_intentional_attr(fndecl, argnum))
-+ decl_attr = MARK_YES;
-+ else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
-+ return MARK_TURN_OFF;
-+ }
-+
-+ visited = pointer_set_create();
-+ cur_fndecl_attr = search_intentional(visited, arg);
-+ pointer_set_destroy(visited);
-+
-+ switch (cur_fndecl_attr) {
-+ case MARK_NO:
-+ case MARK_TURN_OFF:
-+ return cur_fndecl_attr;
-+ default:
-+ print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
-+ return MARK_YES;
-+ }
-+}
-+
-+static void check_missing_size_overflow_attribute(tree var)
-+{
-+ tree orig_fndecl;
-+ unsigned int num;
-+
-+ if (is_a_return_check(var))
-+ orig_fndecl = DECL_ORIGIN(var);
-+ else
-+ orig_fndecl = DECL_ORIGIN(current_function_decl);
-+
-+ num = get_function_num(var, orig_fndecl);
-+ if (num == CANNOT_FIND_ARG)
-+ return;
-+
-+ is_missing_function(orig_fndecl, num);
-+}
-+
-+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
-+{
-+ gimple phi = get_def_stmt(result);
-+ unsigned int i, n = gimple_phi_num_args(phi);
-+
-+ pointer_set_insert(visited, phi);
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(phi, i);
-+
-+ search_size_overflow_attribute(visited, arg);
-+ }
-+}
-+
-+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ const_gimple def_stmt = get_def_stmt(lhs);
-+ tree rhs1, rhs2;
-+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
-+
-+ search_size_overflow_attribute(visited, rhs1);
-+ search_size_overflow_attribute(visited, rhs2);
-+}
-+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
-+{
-+ const_gimple def_stmt;
-+
-+ if (TREE_CODE(lhs) == PARM_DECL) {
-+ check_missing_size_overflow_attribute(lhs);
-+ return;
-+ }
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
-+ case GIMPLE_ASM:
-+ return;
-+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
-+
-+ if (fndecl == NULL_TREE)
-+ return;
-+ check_missing_size_overflow_attribute(fndecl);
-+ return;
-+ }
-+ case GIMPLE_PHI:
-+ return search_size_overflow_attribute_phi(visited, lhs);
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
-+ case 3:
-+ return search_size_overflow_attribute_binary(visited, lhs);
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Search missing entries in the hash table (invoked from the gimple pass)
-+static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
-+{
-+ tree fndecl = NULL_TREE;
-+ tree lhs;
-+ struct pointer_set_t *visited;
-+
-+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
-+ return;
-+
-+ if (num == 0) {
-+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
-+ lhs = gimple_return_retval(stmt);
-+ } else {
-+ gcc_assert(is_gimple_call(stmt));
-+ lhs = gimple_call_arg(stmt, num - 1);
-+ fndecl = gimple_call_fndecl(stmt);
-+ }
-+
-+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
-+ return;
-+
-+ visited = pointer_set_create();
-+ search_size_overflow_attribute(visited, lhs);
-+ pointer_set_destroy(visited);
-+}
-+
-+static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ gimple_stmt_iterator gsi;
-+ gimple assign;
-+
-+ assign = gimple_build_assign(asm_data->input, asm_data->output);
-+ gsi = gsi_for_stmt(stmt);
-+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
-+ asm_data->def_stmt = assign;
-+
-+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->output = make_ssa_name(asm_data->output, stmt);
-+ if (gimple_code(stmt) == GIMPLE_RETURN)
-+ gimple_return_set_retval(stmt, asm_data->output);
-+ else
-+ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
-+ update_stmt(stmt);
-+}
-+
-+static char *create_asm_comment(unsigned int argnum, const_gimple stmt , const char *mark_str)
-+{
-+ const char *fn_name;
-+ char *asm_comment;
-+ unsigned int len;
-+
-+ if (argnum == 0)
-+ fn_name = DECL_NAME_POINTER(current_function_decl);
-+ else
-+ fn_name = DECL_NAME_POINTER(gimple_call_fndecl(stmt));
-+
-+ len = asprintf(&asm_comment, "%s %s %u", mark_str, fn_name, argnum);
-+ gcc_assert(len > 0);
-+
-+ return asm_comment;
-+}
-+
-+static const char *convert_mark_to_str(enum mark mark)
-+{
-+ switch (mark) {
-+ case MARK_NO:
-+ return OK_ASM_STR;
-+ case MARK_YES:
-+ case MARK_NOT_INTENTIONAL:
-+ return YES_ASM_STR;
-+ case MARK_TURN_OFF:
-+ return TURN_OFF_ASM_STR;
-+ }
-+
-+ gcc_unreachable();
-+}
-+
-+/* Create the input of the size_overflow asm stmt.
-+ * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
-+ * otherwise create the input (for a phi stmt the output too) of the asm stmt.
-+ */
-+static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
-+{
-+ if (!asm_data->def_stmt) {
-+ asm_data->input = NULL_TREE;
-+ return;
-+ }
-+
-+ asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
-+ asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
-+
-+ switch (gimple_code(asm_data->def_stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ replace_call_lhs(asm_data);
-+ break;
-+ case GIMPLE_PHI:
-+ create_output_from_phi(stmt, argnum, asm_data);
-+ break;
-+ case GIMPLE_NOP: {
-+ enum mark mark;
-+ const char *mark_str;
-+ char *asm_comment;
-+
-+ mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
-+
-+ asm_data->input = asm_data->output;
-+ asm_data->output = NULL;
-+ asm_data->def_stmt = stmt;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+
-+ create_asm_stmt(asm_comment, build_string(2, "rm"), NULL, asm_data);
-+ free(asm_comment);
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ case GIMPLE_ASM:
-+ if (is_size_overflow_asm(asm_data->def_stmt)) {
-+ asm_data->input = NULL_TREE;
-+ break;
-+ }
-+ default:
-+ debug_gimple_stmt(asm_data->def_stmt);
-+ gcc_unreachable();
-+ }
-+}
-+
-+/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
-+ * is of the right kind create the appropriate size_overflow asm stmts:
-+ * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
-+ * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
-+ */
-+static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
-+{
-+ struct asm_data asm_data;
-+ const char *mark_str;
-+ char *asm_comment;
-+ enum mark mark;
-+
-+ if (is_gimple_constant(output_node))
-+ return;
-+
-+ asm_data.output = output_node;
-+ mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
-+ if (mark != MARK_TURN_OFF)
-+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
-+
-+ asm_data.def_stmt = get_def_stmt(asm_data.output);
-+ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
-+ return;
-+
-+ create_asm_input(stmt, argnum, &asm_data);
-+ if (asm_data.input == NULL_TREE)
-+ return;
-+
-+ mark_str = convert_mark_to_str(mark);
-+ asm_comment = create_asm_comment(argnum, stmt, mark_str);
-+ create_asm_stmt(asm_comment, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
-+ free(asm_comment);
-+}
-+
-+// Insert an asm stmt with "MARK_TURN_OFF", "MARK_YES" or "MARK_NOT_INTENTIONAL".
-+static bool create_mark_asm(gimple stmt, enum mark mark)
-+{
-+ struct asm_data asm_data;
-+ const char *asm_str;
-+
-+ switch (mark) {
-+ case MARK_TURN_OFF:
-+ asm_str = TURN_OFF_ASM_STR;
-+ break;
-+ case MARK_NOT_INTENTIONAL:
-+ case MARK_YES:
-+ asm_str = YES_ASM_STR;
-+ break;
-+ default:
-+ gcc_unreachable();
-+ }
-+
-+ asm_data.def_stmt = stmt;
-+ asm_data.output = gimple_call_lhs(stmt);
-+
-+ if (asm_data.output == NULL_TREE) {
-+ asm_data.input = gimple_call_arg(stmt, 0);
-+ if (is_gimple_constant(asm_data.input))
-+ return false;
-+ asm_data.output = NULL;
-+ create_asm_stmt(asm_str, build_string(2, "rm"), NULL, &asm_data);
-+ return true;
-+ }
-+
-+ create_asm_input(stmt, 0, &asm_data);
-+ gcc_assert(asm_data.input != NULL_TREE);
-+
-+ create_asm_stmt(asm_str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
-+ return true;
-+}
-+
-+static bool is_from_cast(const_tree node)
-+{
-+ gimple def_stmt = get_def_stmt(node);
-+
-+ if (!def_stmt)
-+ return false;
-+
-+ if (gimple_assign_cast_p(def_stmt))
-+ return true;
-+
-+ return false;
-+}
-+
-+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool skip_ptr_minus(gimple stmt)
-+{
-+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
-+
-+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
-+ return false;
-+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ if (!is_from_cast(rhs1))
-+ return false;
-+
-+ rhs2 = gimple_assign_rhs2(stmt);
-+ if (!is_from_cast(rhs2))
-+ return false;
-+
-+ ptr1_rhs = gimple_assign_rhs1(get_def_stmt(rhs1));
-+ ptr2_rhs = gimple_assign_rhs1(get_def_stmt(rhs2));
-+
-+ if (TREE_CODE(TREE_TYPE(ptr1_rhs)) != POINTER_TYPE && TREE_CODE(TREE_TYPE(ptr2_rhs)) != POINTER_TYPE)
-+ return false;
-+
-+ create_mark_asm(stmt, MARK_YES);
-+ return true;
-+}
-+
-+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
-+{
-+ gimple def_stmt;
-+
-+ def_stmt = get_def_stmt(lhs);
-+ if (!def_stmt)
-+ return;
-+
-+ if (pointer_set_insert(visited, def_stmt))
-+ return;
-+
-+ switch (gimple_code(def_stmt)) {
-+ case GIMPLE_NOP:
-+ case GIMPLE_ASM:
-+ case GIMPLE_CALL:
-+ break;
-+ case GIMPLE_PHI: {
-+ unsigned int i, n = gimple_phi_num_args(def_stmt);
-+
-+ pointer_set_insert(visited, def_stmt);
-+
-+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(def_stmt, i);
-+
-+ walk_use_def_ptr(visited, arg);
-+ }
-+ }
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
-+ case 2:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ return;
-+ case 3:
-+ if (skip_ptr_minus(def_stmt))
-+ return;
-+
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
-+ return;
-+ default:
-+ return;
-+ }
-+ default:
-+ debug_gimple_stmt((gimple)def_stmt);
-+ error("%s: unknown gimple code", __func__);
-+ gcc_unreachable();
-+ }
-+}
-+
-+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
-+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
-+{
-+ struct pointer_set_t *visited;
-+
-+ visited = pointer_set_create();
-+ walk_use_def_ptr(visited, arg);
-+ pointer_set_destroy(visited);
-+}
-+
-+// Determine the return value and insert the asm stmt to mark the return stmt.
-+static void insert_asm_ret(gimple stmt)
-+{
-+ tree ret;
-+
-+ ret = gimple_return_retval(stmt);
-+ create_size_overflow_asm(stmt, ret, 0);
-+}
-+
-+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
-+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
-+{
-+ tree arg;
-+ unsigned int argnum;
-+
-+ argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
-+ gcc_assert(argnum != 0);
-+ if (argnum == CANNOT_FIND_ARG)
-+ return;
-+
-+ arg = gimple_call_arg(stmt, argnum - 1);
-+ gcc_assert(arg != NULL_TREE);
-+
-+ // skip all ptr - ptr expressions
-+ insert_mark_not_intentional_asm_at_ptr(arg);
-+
-+ create_size_overflow_asm(stmt, arg, argnum);
-+}
-+
-+// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
-+static void set_argnum_attribute(const_tree attr, bool *argnums)
-+{
-+ unsigned int argnum;
-+ tree attr_value;
-+
-+ for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
-+ argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
-+ argnums[argnum] = true;
-+ }
-+}
-+
-+// If a function arg or the return value is in the hash table then set its index in the array.
-+static void set_argnum_hash(tree fndecl, bool *argnums)
-+{
-+ unsigned int num;
-+ const struct size_overflow_hash *hash;
-+
-+ hash = get_function_hash(DECL_ORIGIN(fndecl));
-+ if (!hash)
-+ return;
-+
-+ for (num = 0; num <= MAX_PARAM; num++) {
-+ if (!(hash->param & (1U << num)))
-+ continue;
-+
-+ argnums[num] = true;
-+ }
-+}
-+
-+static bool is_all_the_argnums_empty(bool *argnums)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i <= MAX_PARAM; i++)
-+ if (argnums[i])
-+ return false;
-+ return true;
-+}
-+
-+// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
-+static void search_interesting_args(tree fndecl, bool *argnums)
-+{
-+ const_tree attr;
-+
-+ set_argnum_hash(fndecl, argnums);
-+ if (!is_all_the_argnums_empty(argnums))
-+ return;
-+
-+ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
-+ if (attr && TREE_VALUE(attr))
-+ set_argnum_attribute(attr, argnums);
-+}
-+
-+/*
-+ * Look up the intentional_overflow attribute that turns off ipa based duplication
-+ * on the callee function.
-+ */
-+static bool is_mark_turn_off_attribute(gimple stmt)
-+{
-+ enum mark mark;
-+ const_tree fndecl = gimple_call_fndecl(stmt);
-+
-+ mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
-+ if (mark == MARK_TURN_OFF)
-+ return true;
-+ return false;
-+}
-+
-+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
-+static void handle_interesting_function(gimple stmt)
-+{
-+ unsigned int argnum;
-+ tree fndecl;
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ if (gimple_call_num_args(stmt) == 0)
-+ return;
-+ fndecl = gimple_call_fndecl(stmt);
-+ if (fndecl == NULL_TREE)
-+ return;
-+ fndecl = DECL_ORIGIN(fndecl);
-+
-+ if (is_mark_turn_off_attribute(stmt)) {
-+ create_mark_asm(stmt, MARK_TURN_OFF);
-+ return;
-+ }
-+
-+ search_interesting_args(fndecl, orig_argnums);
-+
-+ for (argnum = 1; argnum < MAX_PARAM; argnum++)
-+ if (orig_argnums[argnum])
-+ insert_asm_arg(stmt, argnum);
-+}
-+
-+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
-+static void handle_interesting_ret(gimple stmt)
-+{
-+ bool orig_argnums[MAX_PARAM + 1] = {false};
-+
-+ search_interesting_args(current_function_decl, orig_argnums);
-+
-+ if (orig_argnums[0])
-+ insert_asm_ret(stmt);
-+}
-+
-+// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
-+static unsigned int search_interesting_functions(void)
-+{
-+ basic_block bb;
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator gsi;
-+
-+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
-+ gimple stmt = gsi_stmt(gsi);
-+
-+ if (is_size_overflow_asm(stmt))
-+ continue;
-+
-+ if (is_gimple_call(stmt))
-+ handle_interesting_function(stmt);
-+ else if (gimple_code(stmt) == GIMPLE_RETURN)
-+ handle_interesting_ret(stmt);
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
-+ * this pass inserts asm stmts to mark the interesting args
-+ * that the ipa pass will detect and insert the size overflow checks for.
-+ */
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data insert_size_overflow_asm_pass_data = {
-+#else
-+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
-+ .pass = {
-+#endif
-+ .type = GIMPLE_PASS,
-+ .name = "insert_size_overflow_asm",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = search_interesting_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = PROP_cfg,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
-+#if BUILDING_GCC_VERSION < 4009
-+ }
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class insert_size_overflow_asm_pass : public gimple_opt_pass {
-+public:
-+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
-+ unsigned int execute() { return search_interesting_functions(); }
-+};
-+}
-+
-+static opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return new insert_size_overflow_asm_pass();
-+}
-+#else
-+static struct opt_pass *make_insert_size_overflow_asm_pass(void)
-+{
-+ return &insert_size_overflow_asm_pass.pass;
-+}
-+#endif
-+
-+// Create the noreturn report_size_overflow() function decl.
-+static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
-+{
-+ tree const_char_ptr_type_node;
-+ tree fntype;
-+
-+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
-+
-+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
-+ fntype = build_function_type_list(void_type_node,
-+ const_char_ptr_type_node,
-+ unsigned_type_node,
-+ const_char_ptr_type_node,
-+ const_char_ptr_type_node,
-+ NULL_TREE);
-+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
-+
-+ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
-+ TREE_PUBLIC(report_size_overflow_decl) = 1;
-+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
-+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
-+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
-+}
-+
-+static unsigned int dump_functions(void)
-+{
-+ struct cgraph_node *node;
-+
-+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ basic_block bb;
-+
-+ push_cfun(DECL_STRUCT_FUNCTION(NODE_DECL(node)));
-+ current_function_decl = NODE_DECL(node);
-+
-+ fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", DECL_NAME_POINTER(current_function_decl));
-+
-+ FOR_ALL_BB_FN(bb, cfun) {
-+ gimple_stmt_iterator si;
-+
-+ fprintf(stderr, "<bb %u>:\n", bb->index);
-+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
-+ debug_gimple_stmt(gsi_stmt(si));
-+ fprintf(stderr, "\n");
-+ }
-+
-+ fprintf(stderr, "-------------------------------------------------------------------------\n");
-+
-+ pop_cfun();
-+ current_function_decl = NULL_TREE;
-+ }
-+
-+ fprintf(stderr, "###############################################################################\n");
-+
-+ return 0;
-+}
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+static const struct pass_data dump_pass_data = {
-+#else
-+static struct ipa_opt_pass_d dump_pass = {
-+ .pass = {
-+#endif
-+ .type = SIMPLE_IPA_PASS,
-+ .name = "dump",
-+#if BUILDING_GCC_VERSION >= 4008
-+ .optinfo_flags = OPTGROUP_NONE,
-+#endif
-+#if BUILDING_GCC_VERSION >= 4009
-+ .has_gate = false,
-+ .has_execute = true,
-+#else
-+ .gate = NULL,
-+ .execute = dump_functions,
-+ .sub = NULL,
-+ .next = NULL,
-+ .static_pass_number = 0,
-+#endif
-+ .tv_id = TV_NONE,
-+ .properties_required = 0,
-+ .properties_provided = 0,
-+ .properties_destroyed = 0,
-+ .todo_flags_start = 0,
-+ .todo_flags_finish = 0,
-+#if BUILDING_GCC_VERSION < 4009
-+ },
-+ .generate_summary = NULL,
-+ .write_summary = NULL,
-+ .read_summary = NULL,
-+#if BUILDING_GCC_VERSION >= 4006
-+ .write_optimization_summary = NULL,
-+ .read_optimization_summary = NULL,
-+#endif
-+ .stmt_fixup = NULL,
-+ .function_transform_todo_flags_start = 0,
-+ .function_transform = NULL,
-+ .variable_transform = NULL,
-+#endif
-+};
-+
-+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
-+class dump_pass : public ipa_opt_pass_d {
-+public:
-+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
-+ unsigned int execute() { return dump_functions(); }
-+};
-+}
-+
-+static opt_pass *make_dump_pass(void)
-+{
-+ return new dump_pass();
-+}
-+#else
-+static struct opt_pass *make_dump_pass(void)
-+{
-+ return &dump_pass.pass;
-+}
-+#endif
-+
-+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
-+{
-+ int i;
-+ const char * const plugin_name = plugin_info->base_name;
-+ const int argc = plugin_info->argc;
-+ const struct plugin_argument * const argv = plugin_info->argv;
-+ bool enable = true;
-+ struct register_pass_info insert_size_overflow_asm_pass_info;
-+ struct register_pass_info __unused dump_before_pass_info;
-+ struct register_pass_info __unused dump_after_pass_info;
-+ struct register_pass_info ipa_pass_info;
-+ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
-+ {
-+ .base = &report_size_overflow_decl,
-+ .nelt = 1,
-+ .stride = sizeof(report_size_overflow_decl),
-+ .cb = &gt_ggc_mx_tree_node,
-+ .pchw = &gt_pch_nx_tree_node
-+ },
-+ LAST_GGC_ROOT_TAB
-+ };
-+
-+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
-+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
-+ insert_size_overflow_asm_pass_info.ref_pass_instance_number = 1;
-+ insert_size_overflow_asm_pass_info.pos_op = PASS_POS_INSERT_AFTER;
-+
-+ dump_before_pass_info.pass = make_dump_pass();
-+ dump_before_pass_info.reference_pass_name = "increase_alignment";
-+ dump_before_pass_info.ref_pass_instance_number = 1;
-+ dump_before_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ ipa_pass_info.pass = make_ipa_pass();
-+ ipa_pass_info.reference_pass_name = "increase_alignment";
-+ ipa_pass_info.ref_pass_instance_number = 1;
-+ ipa_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ dump_after_pass_info.pass = make_dump_pass();
-+ dump_after_pass_info.reference_pass_name = "increase_alignment";
-+ dump_after_pass_info.ref_pass_instance_number = 1;
-+ dump_after_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
-+
-+ if (!plugin_default_version_check(version, &gcc_version)) {
-+ error(G_("incompatible gcc/plugin versions"));
-+ return 1;
-+ }
-+
-+ for (i = 0; i < argc; ++i) {
-+ if (!strcmp(argv[i].key, "no-size-overflow")) {
-+ enable = false;
-+ continue;
-+ }
-+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
-+ }
-+
-+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
-+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
-+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
-+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
-+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
-+ }
-+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
-+
-+ return 0;
-+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
index 0000000..dd94983
@@ -123759,7 +124617,7 @@ index 0000000..4ee2231
+ return 0;
+}
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
-index 6789d78..4afd019e 100644
+index 6789d788..4afd019e 100644
--- a/tools/perf/util/include/asm/alternative-asm.h
+++ b/tools/perf/util/include/asm/alternative-asm.h
@@ -5,4 +5,7 @@
diff --git a/3.2.59/4450_grsec-kconfig-default-gids.patch b/3.2.59/4450_grsec-kconfig-default-gids.patch
index d3f1d5d..f3f6f14 100644
--- a/3.2.59/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.59/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
--- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400
-@@ -651,7 +651,7 @@
+@@ -664,7 +664,7 @@
config GRKERNSEC_AUDIT_GID
int "GID for auditing"
depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
config GRKERNSEC_EXECLOG
bool "Exec logging"
-@@ -882,7 +882,7 @@
+@@ -895,7 +895,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*enabled* for. If the sysctl option is enabled, a sysctl option
-@@ -891,7 +891,7 @@
+@@ -904,7 +904,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -984,7 +984,7 @@
+@@ -997,7 +997,7 @@
config GRKERNSEC_SOCKET_ALL_GID
int "GID to deny all sockets for"
depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Here you can choose the GID to disable socket access for. Remember to
add the users you want socket access disabled for to the GID
-@@ -1005,7 +1005,7 @@
+@@ -1018,7 +1018,7 @@
config GRKERNSEC_SOCKET_CLIENT_GID
int "GID to deny client sockets for"
depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
help
Here you can choose the GID to disable client socket access for.
Remember to add the users you want client socket access disabled for to
-@@ -1023,7 +1023,7 @@
+@@ -1036,7 +1036,7 @@
config GRKERNSEC_SOCKET_SERVER_GID
int "GID to deny server sockets for"
depends on GRKERNSEC_SOCKET_SERVER
diff --git a/3.2.59/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.59/4465_selinux-avc_audit-log-curr_ip.patch
index ad26eba..e10ec6d 100644
--- a/3.2.59/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.59/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
--- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400
+++ b/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400
-@@ -1118,6 +1118,27 @@
+@@ -1131,6 +1131,27 @@
menu "Logging Options"
depends on GRKERNSEC