summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2011-12-09 16:55:07 -0500
committerAnthony G. Basile <blueness@gentoo.org>2011-12-09 16:55:07 -0500
commitd1ce233f7d706b83c0e39124190402aa523f6bf4 (patch)
tree2b396dfd938ef8ef4c3f8955f589ede48622ea8d
parentGrsec/PaX: 2.2.2-2.6.32.49-201112041811 + 2.2.2-3.1.4-201112041811 (diff)
downloadhardened-patchset-d1ce233f7d706b83c0e39124190402aa523f6bf4.tar.gz
hardened-patchset-d1ce233f7d706b83c0e39124190402aa523f6bf4.tar.bz2
hardened-patchset-d1ce233f7d706b83c0e39124190402aa523f6bf4.zip
Grsec/PaX: 2.2.2-2.6.32.49-201112082138 + 2.2.2-3.1.4-20111208213920111208
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112082138.patch (renamed from 2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112041811.patch)52
-rw-r--r--3.1.4/0000_README2
-rw-r--r--3.1.4/4420_grsecurity-2.2.2-3.1.4-201112082139.patch (renamed from 3.1.4/4420_grsecurity-2.2.2-3.1.4-201112041811.patch)383
4 files changed, 286 insertions, 153 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index fd7832e..c1c7356 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -3,7 +3,7 @@ README
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.2-2.6.32.49-201112041811.patch
+Patch: 4420_grsecurity-2.2.2-2.6.32.49-201112082138.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112041811.patch b/2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112082138.patch
index dce7335..6bf32ae 100644
--- a/2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112041811.patch
+++ b/2.6.32/4420_grsecurity-2.2.2-2.6.32.49-201112082138.patch
@@ -52262,7 +52262,7 @@ index c5ef152..1363194 100644
+}
+#endif
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 67f7dc0..7171c9a 100644
+index 67f7dc0..e95ea4f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -102,6 +102,22 @@ struct pid_entry {
@@ -52622,13 +52622,10 @@ index 67f7dc0..7171c9a 100644
inode->i_op = &proc_tgid_base_inode_operations;
inode->i_fop = &proc_tgid_base_operations;
inode->i_flags|=S_IMMUTABLE;
-@@ -2777,7 +2902,14 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+@@ -2777,7 +2902,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
if (!task)
goto out;
-+ if (!has_group_leader_pid(task))
-+ goto out_put_task;
-+
+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+ goto out_put_task;
+
@@ -52637,7 +52634,7 @@ index 67f7dc0..7171c9a 100644
put_task_struct(task);
out:
return result;
-@@ -2842,6 +2974,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -2842,6 +2971,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned int nr;
struct task_struct *reaper;
@@ -52649,7 +52646,7 @@ index 67f7dc0..7171c9a 100644
struct tgid_iter iter;
struct pid_namespace *ns;
-@@ -2865,8 +3002,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -2865,8 +2999,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
for (iter = next_tgid(ns, iter);
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
@@ -52678,7 +52675,7 @@ index 67f7dc0..7171c9a 100644
put_task_struct(iter.task);
goto out;
}
-@@ -2892,7 +3048,7 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -2892,7 +3045,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
@@ -52687,7 +52684,7 @@ index 67f7dc0..7171c9a 100644
INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -2916,10 +3072,10 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -2916,10 +3069,10 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
#endif
@@ -70642,7 +70639,7 @@ index 0b5b5fc..419b86a 100644
if (ret < 0)
return ret;
diff --git a/kernel/exit.c b/kernel/exit.c
-index 0f8fae3..11757f1 100644
+index 0f8fae3..9344a56 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,6 +55,10 @@
@@ -70667,30 +70664,7 @@ index 0f8fae3..11757f1 100644
tracehook_prepare_release_task(p);
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials */
-@@ -341,11 +349,22 @@ static void reparent_to_kthreadd(void)
- {
- write_lock_irq(&tasklist_lock);
-
-+#ifdef CONFIG_GRKERNSEC
-+ write_lock(&grsec_exec_file_lock);
-+ if (current->exec_file) {
-+ fput(current->exec_file);
-+ current->exec_file = NULL;
-+ }
-+ write_unlock(&grsec_exec_file_lock);
-+#endif
-+
- ptrace_unlink(current);
- /* Reparent to init */
- current->real_parent = current->parent = kthreadd_task;
- list_move_tail(&current->sibling, &current->real_parent->children);
-
-+ gr_set_kernel_label(current);
-+
- /* Set the exit signal to SIGCHLD so we signal init on exit */
- current->exit_signal = SIGCHLD;
-
-@@ -397,7 +416,7 @@ int allow_signal(int sig)
+@@ -397,7 +405,7 @@ int allow_signal(int sig)
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
@@ -70699,7 +70673,7 @@ index 0f8fae3..11757f1 100644
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
-@@ -433,6 +452,17 @@ void daemonize(const char *name, ...)
+@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
vsnprintf(current->comm, sizeof(current->comm), name, args);
va_end(args);
@@ -70717,7 +70691,7 @@ index 0f8fae3..11757f1 100644
/*
* If we were started as result of loading a module, close all of the
* user space pages. We don't need them, and if we didn't close them
-@@ -897,17 +927,17 @@ NORET_TYPE void do_exit(long code)
+@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
@@ -70742,7 +70716,7 @@ index 0f8fae3..11757f1 100644
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
-@@ -915,6 +945,13 @@ NORET_TYPE void do_exit(long code)
+@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
*/
set_fs(USER_DS);
@@ -70756,7 +70730,7 @@ index 0f8fae3..11757f1 100644
tracehook_report_exit(&code);
validate_creds_for_do_exit(tsk);
-@@ -973,6 +1010,9 @@ NORET_TYPE void do_exit(long code)
+@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
@@ -70766,7 +70740,7 @@ index 0f8fae3..11757f1 100644
exit_mm(tsk);
if (group_dead)
-@@ -1188,7 +1228,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
+@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
if (unlikely(wo->wo_flags & WNOWAIT)) {
int exit_code = p->exit_code;
diff --git a/3.1.4/0000_README b/3.1.4/0000_README
index 933d2ae..2858d71 100644
--- a/3.1.4/0000_README
+++ b/3.1.4/0000_README
@@ -7,7 +7,7 @@ Patch: 1003_linux-3.1.4.patch
From: http://www.kernel.org
Desc: Linux 3.1.4
-Patch: 4420_grsecurity-2.2.2-3.1.4-201112041811.patch
+Patch: 4420_grsecurity-2.2.2-3.1.4-201112082139.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.1.4/4420_grsecurity-2.2.2-3.1.4-201112041811.patch b/3.1.4/4420_grsecurity-2.2.2-3.1.4-201112082139.patch
index 5d26ec9..9a6ec41 100644
--- a/3.1.4/4420_grsecurity-2.2.2-3.1.4-201112041811.patch
+++ b/3.1.4/4420_grsecurity-2.2.2-3.1.4-201112082139.patch
@@ -611,6 +611,25 @@ index fadd5f8..904e73a 100644
} else if (!cause) {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 86976d0..8a57797 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -239,6 +239,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ static inline u64 atomic64_read(atomic64_t *v)
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d..6ef1e03 100644
--- a/arch/arm/include/asm/elf.h
@@ -10784,7 +10803,7 @@ index 566e803..89f1e60 100644
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
-index 1c66d30..c299480 100644
+index 1c66d30..d407072 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -10,6 +10,9 @@
@@ -10797,25 +10816,41 @@ index 1c66d30..c299480 100644
/*
* Copy To/From Userspace
-@@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *from, unsigned len)
+@@ -17,12 +20,12 @@
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_generic_string(void *to, const void *from, unsigned len);
++copy_user_generic_string(void *to, const void *from, unsigned long len);
+ __must_check unsigned long
+-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
++copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
+
+ static __always_inline __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len)
++copy_user_generic(void *to, const void *from, unsigned long len)
+ {
+ unsigned ret;
+
+@@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
return ret;
}
--__must_check unsigned long
--_copy_to_user(void __user *to, const void *from, unsigned len);
--__must_check unsigned long
--_copy_from_user(void *to, const void __user *from, unsigned len);
+static __always_inline __must_check unsigned long
-+__copy_to_user(void __user *to, const void *from, unsigned len);
++__copy_to_user(void __user *to, const void *from, unsigned long len);
+static __always_inline __must_check unsigned long
-+__copy_from_user(void *to, const void __user *from, unsigned len);
++__copy_from_user(void *to, const void __user *from, unsigned long len);
__must_check unsigned long
- copy_in_user(void __user *to, const void __user *from, unsigned len);
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
-- unsigned long n)
-+ unsigned n)
+ unsigned long n)
{
- int sz = __compiletime_object_size(to);
-
@@ -10829,7 +10864,7 @@ index 1c66d30..c299480 100644
+
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
-+ else if ((int)n > 0) {
++ else if (n < INT_MAX) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
+ memset(to, 0, n);
@@ -10837,7 +10872,9 @@ index 1c66d30..c299480 100644
return n;
}
-@@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
+ static __always_inline __must_check
+-int copy_to_user(void __user *dst, const void *src, unsigned size)
++int copy_to_user(void __user *dst, const void *src, unsigned long size)
{
might_fault();
@@ -10849,7 +10886,7 @@ index 1c66d30..c299480 100644
static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
-+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
{
- int ret = 0;
+ int sz = __compiletime_object_size(dst);
@@ -10861,7 +10898,7 @@ index 1c66d30..c299480 100644
+
+ pax_track_stack();
+
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -10939,7 +10976,7 @@ index 1c66d30..c299480 100644
static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
-+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
{
- int ret = 0;
+ int sz = __compiletime_object_size(src);
@@ -10951,7 +10988,7 @@ index 1c66d30..c299480 100644
+
+ pax_track_stack();
+
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11029,7 +11066,7 @@ index 1c66d30..c299480 100644
static __always_inline __must_check
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
{
- int ret = 0;
+ unsigned ret = 0;
@@ -11039,7 +11076,7 @@ index 1c66d30..c299480 100644
- return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
+
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11115,14 +11152,17 @@ index 1c66d30..c299480 100644
}
}
-@@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+@@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
+ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+
static __must_check __always_inline int
- __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
{
- return copy_user_generic(dst, (__force const void *)src, size);
+ pax_track_stack();
+
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11137,11 +11177,12 @@ index 1c66d30..c299480 100644
}
-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+static __must_check __always_inline unsigned long
- __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
{
- return copy_user_generic((__force void *)dst, src, size);
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11156,16 +11197,17 @@ index 1c66d30..c299480 100644
}
-extern long __copy_user_nocache(void *dst, const void __user *src,
+- unsigned size, int zerorest);
+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
- unsigned size, int zerorest);
++ unsigned long size, int zerorest);
-static inline int
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
-+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
{
might_sleep();
+
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11178,10 +11220,11 @@ index 1c66d30..c299480 100644
-static inline int
-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+- unsigned size)
+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
- unsigned size)
++ unsigned long size)
{
-+ if ((int)size < 0)
++ if (size > INT_MAX)
+ return size;
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -11195,7 +11238,7 @@ index 1c66d30..c299480 100644
-unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+extern unsigned long
-+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
@@ -13365,7 +13408,7 @@ index f3f6f53..0841b66 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 6419bb0..4f4cf2b 100644
+index 6419bb0..00440bf 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,8 @@
@@ -13724,6 +13767,17 @@ index 6419bb0..4f4cf2b 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
+@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+- addq $8*6, %rsp
+- CFI_ADJUST_CFA_OFFSET -(6*8)
++ addq $8*6 + ARG_SKIP, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
+ .endm
+
+ /*
@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
movq %rsp, %rsi
@@ -14149,9 +14203,11 @@ index 6419bb0..4f4cf2b 100644
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1184,9 +1554,10 @@ ENTRY(kernel_execve)
+@@ -1182,11 +1552,11 @@ ENTRY(kernel_execve)
+ RESTORE_REST
+ testq %rax,%rax
je int_ret_from_sys_call
- RESTORE_ARGS
+- RESTORE_ARGS
UNFAKE_STACK_FRAME
+ pax_force_retaddr
ret
@@ -14161,7 +14217,7 @@ index 6419bb0..4f4cf2b 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
-@@ -1204,9 +1575,10 @@ ENTRY(call_softirq)
+@@ -1204,9 +1574,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -14173,7 +14229,7 @@ index 6419bb0..4f4cf2b 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1244,7 +1616,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -14182,7 +14238,7 @@ index 6419bb0..4f4cf2b 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1303,7 +1675,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -14191,7 +14247,7 @@ index 6419bb0..4f4cf2b 100644
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1352,16 +1724,31 @@ ENTRY(paranoid_exit)
+@@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -14224,7 +14280,7 @@ index 6419bb0..4f4cf2b 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1390,7 +1777,7 @@ paranoid_schedule:
+@@ -1390,7 +1776,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -14233,7 +14289,7 @@ index 6419bb0..4f4cf2b 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1417,12 +1804,13 @@ ENTRY(error_entry)
+@@ -1417,12 +1803,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -14248,7 +14304,7 @@ index 6419bb0..4f4cf2b 100644
ret
/*
-@@ -1449,7 +1837,7 @@ bstep_iret:
+@@ -1449,7 +1836,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -14257,7 +14313,7 @@ index 6419bb0..4f4cf2b 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1469,7 +1857,7 @@ ENTRY(error_exit)
+@@ -1469,7 +1856,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -14266,7 +14322,7 @@ index 6419bb0..4f4cf2b 100644
/* runs on exception stack */
-@@ -1481,6 +1869,16 @@ ENTRY(nmi)
+@@ -1481,6 +1868,16 @@ ENTRY(nmi)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
@@ -14283,7 +14339,7 @@ index 6419bb0..4f4cf2b 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1491,12 +1889,28 @@ ENTRY(nmi)
+@@ -1491,12 +1888,28 @@ ENTRY(nmi)
DISABLE_INTERRUPTS(CLBR_NONE)
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
@@ -14313,7 +14369,7 @@ index 6419bb0..4f4cf2b 100644
jmp irq_return
nmi_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1525,14 +1939,14 @@ nmi_schedule:
+@@ -1525,14 +1938,14 @@ nmi_schedule:
jmp paranoid_exit
CFI_ENDPROC
#endif
@@ -14851,7 +14907,7 @@ index ce0be7c..c41476e 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index e11e394..3d66dfe 100644
+index e11e394..9aebc5d 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,8 @@
@@ -14863,22 +14919,25 @@ index e11e394..3d66dfe 100644
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
-@@ -38,6 +40,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
+L4_VMALLOC_START = pgd_index(VMALLOC_START)
+L3_VMALLOC_START = pud_index(VMALLOC_START)
++L4_VMALLOC_END = pgd_index(VMALLOC_END)
++L3_VMALLOC_END = pud_index(VMALLOC_END)
+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
.text
__HEAD
-@@ -85,35 +91,22 @@ startup_64:
+@@ -85,35 +93,23 @@ startup_64:
*/
addq %rbp, init_level4_pgt + 0(%rip)
addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
++ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
@@ -14920,7 +14979,7 @@ index e11e394..3d66dfe 100644
/*
* Fixup the kernel text+data virtual addresses. Note that
-@@ -160,8 +153,8 @@ ENTRY(secondary_startup_64)
+@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
* after the boot processor executes this code.
*/
@@ -14931,7 +14990,7 @@ index e11e394..3d66dfe 100644
movq %rax, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -183,9 +176,16 @@ ENTRY(secondary_startup_64)
+@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -14944,12 +15003,13 @@ index e11e394..3d66dfe 100644
+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
+#endif
+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -247,6 +247,7 @@ ENTRY(secondary_startup_64)
+@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
* jump. In addition we need to ensure %cs is set so we make this
* a far return.
*/
@@ -14957,7 +15017,7 @@ index e11e394..3d66dfe 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -269,7 +270,7 @@ ENTRY(secondary_startup_64)
+@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
bad_address:
jmp bad_address
@@ -14966,7 +15026,7 @@ index e11e394..3d66dfe 100644
#ifdef CONFIG_EARLY_PRINTK
.globl early_idt_handlers
early_idt_handlers:
-@@ -314,18 +315,23 @@ ENTRY(early_idt_handler)
+@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
#endif /* EARLY_PRINTK */
1: hlt
jmp 1b
@@ -14991,7 +15051,7 @@ index e11e394..3d66dfe 100644
#define NEXT_PAGE(name) \
.balign PAGE_SIZE; \
ENTRY(name)
-@@ -338,7 +344,6 @@ ENTRY(name)
+@@ -338,7 +348,6 @@ ENTRY(name)
i = i + 1 ; \
.endr
@@ -14999,12 +15059,14 @@ index e11e394..3d66dfe 100644
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
-@@ -349,13 +354,36 @@ NEXT_PAGE(init_level4_pgt)
+@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
-+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
++ .org init_level4_pgt + L4_VMALLOC_END*8, 0
++ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_START_KERNEL*8, 0
@@ -15027,7 +15089,10 @@ index e11e394..3d66dfe 100644
+ .fill 510,8,0
+#endif
+
-+NEXT_PAGE(level3_vmalloc_pgt)
++NEXT_PAGE(level3_vmalloc_start_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level3_vmalloc_end_pgt)
+ .fill 512,8,0
+
+NEXT_PAGE(level3_vmemmap_pgt)
@@ -15036,7 +15101,7 @@ index e11e394..3d66dfe 100644
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
-@@ -363,20 +391,23 @@ NEXT_PAGE(level3_kernel_pgt)
+@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -15068,7 +15133,7 @@ index e11e394..3d66dfe 100644
NEXT_PAGE(level2_kernel_pgt)
/*
-@@ -389,33 +420,55 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
* If you want to increase this then increase MODULES_VADDR
* too.)
*/
@@ -20473,19 +20538,73 @@ index 36b0d15..d381858 100644
xor %eax,%eax
EXIT
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
-index 1cad221..6cc4b8d 100644
+index 1cad221..de671ee 100644
--- a/arch/x86/lib/rwlock.S
+++ b/arch/x86/lib/rwlock.S
-@@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
+@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
+ jne 1b
+ LOCK_PREFIX
WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
jnz 0b
ENDFRAME
+ pax_force_retaddr
ret
CFI_ENDPROC
END(__write_lock_failed)
-@@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
+@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
+ FRAME
+ 0: LOCK_PREFIX
+ READ_LOCK_SIZE(inc) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(dec) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
+ 1: rep; nop
+ READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
+ js 1b
+ LOCK_PREFIX
READ_LOCK_SIZE(dec) (%__lock_ptr)
++
++#ifdef CONFIG_PAX_REFCOUNT
++ jno 1234f
++ LOCK_PREFIX
++ READ_LOCK_SIZE(inc) (%__lock_ptr)
++ int $4
++1234:
++ _ASM_EXTABLE(1234b, 1234b)
++#endif
++
js 0b
ENDFRAME
+ pax_force_retaddr
@@ -21174,7 +21293,7 @@ index e218d5d..35679b4 100644
+EXPORT_SYMBOL(set_fs);
+#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
-index b7c2849..5ef0f95 100644
+index b7c2849..8633ad8 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -42,6 +42,12 @@ long
@@ -21203,9 +21322,12 @@ index b7c2849..5ef0f95 100644
/* no memory constraint because it doesn't change any memory gcc knows
about */
asm volatile(
-@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
+@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
+ }
+ EXPORT_SYMBOL(strlen_user);
- unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
+-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
++unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
{
- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
- return copy_user_generic((__force void *)to, (__force void *)from, len);
@@ -21231,7 +21353,7 @@ index b7c2849..5ef0f95 100644
*/
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
-+copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
{
char c;
unsigned zero_len;
@@ -28117,7 +28239,7 @@ index 7916bd9..7c17a0f 100644
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index 4934cf8..52e8e83 100644
+index 4934cf8..1da9c84 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
@@ -28129,6 +28251,18 @@ index 4934cf8..52e8e83 100644
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
+@@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+
+ static int
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+- int count)
++ unsigned int count)
+ {
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9cbb0cd..958a31f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
@@ -47425,6 +47559,29 @@ index f711921..28d5958 100644
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
newattrs.ia_valid |= ATTR_UID;
+diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
+index 6296b40..417c00f 100644
+--- a/fs/partitions/efi.c
++++ b/fs/partitions/efi.c
+@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+ if (!gpt)
+ return NULL;
+
+- count = le32_to_cpu(gpt->num_partition_entries) *
+- le32_to_cpu(gpt->sizeof_partition_entry);
+- if (!count)
++ if (!le32_to_cpu(gpt->num_partition_entries))
+ return NULL;
+- pte = kzalloc(count, GFP_KERNEL);
++ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
++ count = le32_to_cpu(gpt->num_partition_entries) *
++ le32_to_cpu(gpt->sizeof_partition_entry);
+ if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+ (u8 *) pte,
+ count) < count) {
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index af9fdf0..75b15c3 100644
--- a/fs/partitions/ldm.c
@@ -47720,7 +47877,7 @@ index 3a1dafd..c7fed72 100644
+}
+#endif
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 5eb0206..7e0dc06 100644
+index 5eb0206..fe01db4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -107,6 +107,22 @@ struct pid_entry {
@@ -48079,13 +48236,10 @@ index 5eb0206..7e0dc06 100644
inode->i_op = &proc_tgid_base_inode_operations;
inode->i_fop = &proc_tgid_base_operations;
inode->i_flags|=S_IMMUTABLE;
-@@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
+@@ -3031,7 +3156,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
if (!task)
goto out;
-+ if (!has_group_leader_pid(task))
-+ goto out_put_task;
-+
+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
+ goto out_put_task;
+
@@ -48094,7 +48248,7 @@ index 5eb0206..7e0dc06 100644
put_task_struct(task);
out:
return result;
-@@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -3096,6 +3225,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned int nr;
struct task_struct *reaper;
@@ -48106,7 +48260,7 @@ index 5eb0206..7e0dc06 100644
struct tgid_iter iter;
struct pid_namespace *ns;
-@@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
+@@ -3119,8 +3253,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
for (iter = next_tgid(ns, iter);
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
@@ -48135,7 +48289,7 @@ index 5eb0206..7e0dc06 100644
put_task_struct(iter.task);
goto out;
}
-@@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -3148,7 +3301,7 @@ static const struct pid_entry tid_base_stuff[] = {
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
@@ -48144,7 +48298,7 @@ index 5eb0206..7e0dc06 100644
INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
INF("cmdline", S_IRUGO, proc_pid_cmdline),
-@@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_stuff[] = {
+@@ -3172,10 +3325,10 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
#endif
@@ -59609,6 +59763,34 @@ index b7babf0..71e4e74 100644
+#endif
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
+index b18ce4f..2ee2843 100644
+--- a/include/asm-generic/atomic64.h
++++ b/include/asm-generic/atomic64.h
+@@ -16,6 +16,8 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++typedef atomic64_t atomic64_unchecked_t;
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ extern long long atomic64_read(const atomic64_t *v);
+@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..e04c5c9 100644
--- a/include/asm-generic/cache.h
@@ -65391,7 +65573,7 @@ index 0f85778..0d43716 100644
/*
diff --git a/kernel/exit.c b/kernel/exit.c
-index 2913b35..86c7364 100644
+index 2913b35..4465c81 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -57,6 +57,10 @@
@@ -65416,30 +65598,7 @@ index 2913b35..86c7364 100644
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
-@@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
- {
- write_lock_irq(&tasklist_lock);
-
-+#ifdef CONFIG_GRKERNSEC
-+ write_lock(&grsec_exec_file_lock);
-+ if (current->exec_file) {
-+ fput(current->exec_file);
-+ current->exec_file = NULL;
-+ }
-+ write_unlock(&grsec_exec_file_lock);
-+#endif
-+
- ptrace_unlink(current);
- /* Reparent to init */
- current->real_parent = current->parent = kthreadd_task;
- list_move_tail(&current->sibling, &current->real_parent->children);
-
-+ gr_set_kernel_label(current);
-+
- /* Set the exit signal to SIGCHLD so we signal init on exit */
- current->exit_signal = SIGCHLD;
-
-@@ -380,7 +399,7 @@ int allow_signal(int sig)
+@@ -380,7 +388,7 @@ int allow_signal(int sig)
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
@@ -65448,7 +65607,7 @@ index 2913b35..86c7364 100644
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
-@@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
+@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
vsnprintf(current->comm, sizeof(current->comm), name, args);
va_end(args);
@@ -65466,7 +65625,7 @@ index 2913b35..86c7364 100644
/*
* If we were started as result of loading a module, close all of the
* user space pages. We don't need them, and if we didn't close them
-@@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
+@@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
@@ -65475,7 +65634,7 @@ index 2913b35..86c7364 100644
profile_task_exit(tsk);
WARN_ON(blk_needs_flush_plug(tsk));
-@@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
+@@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code)
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
*/
@@ -65483,7 +65642,7 @@ index 2913b35..86c7364 100644
ptrace_event(PTRACE_EVENT_EXIT, code);
-@@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
+@@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
@@ -80051,7 +80210,7 @@ index 0000000..d41b5af
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..5b07edd
+index 0000000..704a564
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
@@ -0,0 +1,303 @@
@@ -80190,7 +80349,7 @@ index 0000000..5b07edd
+ .type_required = false,
+ .function_type_required = false,
+ .handler = handle_no_const_attribute,
-+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
++#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = true
+#endif
+};
@@ -80203,7 +80362,7 @@ index 0000000..5b07edd
+ .type_required = false,
+ .function_type_required = false,
+ .handler = handle_do_const_attribute,
-+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
++#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = true
+#endif
+};
@@ -80291,7 +80450,7 @@ index 0000000..5b07edd
+ tree var;
+ referenced_var_iterator rvi;
+
-+#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
++#if BUILDING_GCC_VERSION == 4005
+ FOR_EACH_REFERENCED_VAR(var, rvi) {
+#else
+ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
@@ -80887,7 +81046,7 @@ index 0000000..51f747e
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..41dd4b1
+index 0000000..d44f37c
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
@@ -0,0 +1,291 @@
@@ -81017,7 +81176,7 @@ index 0000000..41dd4b1
+ gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
+}
+
-+#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
++#if BUILDING_GCC_VERSION == 4005
+static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
+{
+ tree fndecl;
@@ -81039,7 +81198,7 @@ index 0000000..41dd4b1
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
+ return true;
+
-+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
++#if BUILDING_GCC_VERSION >= 4007
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+ return true;
+#endif
@@ -81115,7 +81274,7 @@ index 0000000..41dd4b1
+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
+ // 2. delete call
+ insn = delete_insn_and_edges(insn);
-+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
++#if BUILDING_GCC_VERSION >= 4007
+ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
+ insn = delete_insn_and_edges(insn);
+#endif