summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-02-17 07:35:38 -0500
committerAnthony G. Basile <blueness@gentoo.org>2014-02-17 07:35:38 -0500
commit6b98a13ee988b69e6fbfbbcb6a5dc5c43b4b19f4 (patch)
tree283672b54325e31b89c5d0898df4ae733e645d13
parentGrsec/PaX: 3.0-{3.2.54,3.13.2}-201402111747 (diff)
downloadhardened-patchset-6b98a13ee988b69e6fbfbbcb6a5dc5c43b4b19f4.tar.gz
hardened-patchset-6b98a13ee988b69e6fbfbbcb6a5dc5c43b4b19f4.tar.bz2
hardened-patchset-6b98a13ee988b69e6fbfbbcb6a5dc5c43b4b19f4.zip
Grsec/PaX: 3.0-{3.2.55,3.13.3}-20140215220420140215
-rw-r--r--3.13.3/0000_README (renamed from 3.13.2/0000_README)2
-rw-r--r--3.13.3/4420_grsecurity-3.0-3.13.3-201402152204.patch (renamed from 3.13.2/4420_grsecurity-3.0-3.13.2-201402111747.patch)908
-rw-r--r--3.13.3/4425_grsec_remove_EI_PAX.patch (renamed from 3.13.2/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.13.3/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.13.2/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.13.3/4430_grsec-remove-localversion-grsec.patch (renamed from 3.13.2/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.13.3/4435_grsec-mute-warnings.patch (renamed from 3.13.2/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.13.3/4440_grsec-remove-protected-paths.patch (renamed from 3.13.2/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.13.3/4450_grsec-kconfig-default-gids.patch (renamed from 3.13.2/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.13.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.13.2/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.13.3/4470_disable-compat_vdso.patch (renamed from 3.13.2/4470_disable-compat_vdso.patch)0
-rw-r--r--3.13.3/4475_emutramp_default_on.patch (renamed from 3.13.2/4475_emutramp_default_on.patch)0
-rw-r--r--3.2.55/0000_README (renamed from 3.2.54/0000_README)6
-rw-r--r--3.2.55/1021_linux-3.2.22.patch (renamed from 3.2.54/1021_linux-3.2.22.patch)0
-rw-r--r--3.2.55/1022_linux-3.2.23.patch (renamed from 3.2.54/1022_linux-3.2.23.patch)0
-rw-r--r--3.2.55/1023_linux-3.2.24.patch (renamed from 3.2.54/1023_linux-3.2.24.patch)0
-rw-r--r--3.2.55/1024_linux-3.2.25.patch (renamed from 3.2.54/1024_linux-3.2.25.patch)0
-rw-r--r--3.2.55/1025_linux-3.2.26.patch (renamed from 3.2.54/1025_linux-3.2.26.patch)0
-rw-r--r--3.2.55/1026_linux-3.2.27.patch (renamed from 3.2.54/1026_linux-3.2.27.patch)0
-rw-r--r--3.2.55/1027_linux-3.2.28.patch (renamed from 3.2.54/1027_linux-3.2.28.patch)0
-rw-r--r--3.2.55/1028_linux-3.2.29.patch (renamed from 3.2.54/1028_linux-3.2.29.patch)0
-rw-r--r--3.2.55/1029_linux-3.2.30.patch (renamed from 3.2.54/1029_linux-3.2.30.patch)0
-rw-r--r--3.2.55/1030_linux-3.2.31.patch (renamed from 3.2.54/1030_linux-3.2.31.patch)0
-rw-r--r--3.2.55/1031_linux-3.2.32.patch (renamed from 3.2.54/1031_linux-3.2.32.patch)0
-rw-r--r--3.2.55/1032_linux-3.2.33.patch (renamed from 3.2.54/1032_linux-3.2.33.patch)0
-rw-r--r--3.2.55/1033_linux-3.2.34.patch (renamed from 3.2.54/1033_linux-3.2.34.patch)0
-rw-r--r--3.2.55/1034_linux-3.2.35.patch (renamed from 3.2.54/1034_linux-3.2.35.patch)0
-rw-r--r--3.2.55/1035_linux-3.2.36.patch (renamed from 3.2.54/1035_linux-3.2.36.patch)0
-rw-r--r--3.2.55/1036_linux-3.2.37.patch (renamed from 3.2.54/1036_linux-3.2.37.patch)0
-rw-r--r--3.2.55/1037_linux-3.2.38.patch (renamed from 3.2.54/1037_linux-3.2.38.patch)0
-rw-r--r--3.2.55/1038_linux-3.2.39.patch (renamed from 3.2.54/1038_linux-3.2.39.patch)0
-rw-r--r--3.2.55/1039_linux-3.2.40.patch (renamed from 3.2.54/1039_linux-3.2.40.patch)0
-rw-r--r--3.2.55/1040_linux-3.2.41.patch (renamed from 3.2.54/1040_linux-3.2.41.patch)0
-rw-r--r--3.2.55/1041_linux-3.2.42.patch (renamed from 3.2.54/1041_linux-3.2.42.patch)0
-rw-r--r--3.2.55/1042_linux-3.2.43.patch (renamed from 3.2.54/1042_linux-3.2.43.patch)0
-rw-r--r--3.2.55/1043_linux-3.2.44.patch (renamed from 3.2.54/1043_linux-3.2.44.patch)0
-rw-r--r--3.2.55/1044_linux-3.2.45.patch (renamed from 3.2.54/1044_linux-3.2.45.patch)0
-rw-r--r--3.2.55/1045_linux-3.2.46.patch (renamed from 3.2.54/1045_linux-3.2.46.patch)0
-rw-r--r--3.2.55/1046_linux-3.2.47.patch (renamed from 3.2.54/1046_linux-3.2.47.patch)0
-rw-r--r--3.2.55/1047_linux-3.2.48.patch (renamed from 3.2.54/1047_linux-3.2.48.patch)0
-rw-r--r--3.2.55/1048_linux-3.2.49.patch (renamed from 3.2.54/1048_linux-3.2.49.patch)0
-rw-r--r--3.2.55/1049_linux-3.2.50.patch (renamed from 3.2.54/1049_linux-3.2.50.patch)0
-rw-r--r--3.2.55/1050_linux-3.2.51.patch (renamed from 3.2.54/1050_linux-3.2.51.patch)0
-rw-r--r--3.2.55/1051_linux-3.2.52.patch (renamed from 3.2.54/1051_linux-3.2.52.patch)0
-rw-r--r--3.2.55/1052_linux-3.2.53.patch (renamed from 3.2.54/1052_linux-3.2.53.patch)0
-rw-r--r--3.2.55/1053_linux-3.2.54.patch (renamed from 3.2.54/1053_linux-3.2.54.patch)0
-rw-r--r--3.2.55/1054_linux-3.2.55.patch2495
-rw-r--r--3.2.55/4420_grsecurity-3.0-3.2.55-201402152203.patch (renamed from 3.2.54/4420_grsecurity-3.0-3.2.54-201402111745.patch)893
-rw-r--r--3.2.55/4425_grsec_remove_EI_PAX.patch (renamed from 3.2.54/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.2.55/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.2.54/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.2.55/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.54/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.2.55/4435_grsec-mute-warnings.patch (renamed from 3.2.54/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.2.55/4440_grsec-remove-protected-paths.patch (renamed from 3.2.54/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.2.55/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.54/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.2.55/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.54/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.2.55/4470_disable-compat_vdso.patch (renamed from 3.2.54/4470_disable-compat_vdso.patch)0
-rw-r--r--3.2.55/4475_emutramp_default_on.patch (renamed from 3.2.54/4475_emutramp_default_on.patch)0
56 files changed, 3384 insertions, 920 deletions
diff --git a/3.13.2/0000_README b/3.13.3/0000_README
index ce8a461..f4a0bac 100644
--- a/3.13.2/0000_README
+++ b/3.13.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.13.2-201402111747.patch
+Patch: 4420_grsecurity-3.0-3.13.3-201402152204.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.13.2/4420_grsecurity-3.0-3.13.2-201402111747.patch b/3.13.3/4420_grsecurity-3.0-3.13.3-201402152204.patch
index 3ac109b..2fcc457 100644
--- a/3.13.2/4420_grsecurity-3.0-3.13.2-201402111747.patch
+++ b/3.13.3/4420_grsecurity-3.0-3.13.3-201402152204.patch
@@ -287,7 +287,7 @@ index b9e9bd8..bf49b92 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index a7fd5d9..dc8e4db 100644
+index 704b508..4a788c4 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -11662,7 +11662,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 0952ecd..75e0e8a 100644
+index 0952ecd..725c779 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -249,7 +249,7 @@ config X86_HT
@@ -11674,14 +11674,14 @@ index 0952ecd..75e0e8a 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -1104,6 +1104,7 @@ config MICROCODE_EARLY
+@@ -602,6 +602,7 @@ config SCHED_OMIT_FRAME_POINTER
- config X86_MSR
- tristate "/dev/cpu/*/msr - Model-specific register support"
-+ depends on !GRKERNSEC_KMEM
+ menuconfig HYPERVISOR_GUEST
+ bool "Linux guest support"
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST
---help---
- This device gives privileged processes access to the x86
- Model-Specific Registers (MSRs). It is a character device with
+ Say Y here to enable options for running Linux under various hyper-
+ visors. This option enables basic hypervisor detection and platform
@@ -1127,7 +1128,7 @@ choice
config NOHIGHMEM
@@ -17417,7 +17417,7 @@ index 2d88344..4679fc3 100644
#define EARLY_DYNAMIC_PAGE_TABLES 64
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index 0ecac25..7a15e09 100644
+index 840c127..a8f297b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -16,13 +16,12 @@
@@ -17457,7 +17457,7 @@ index 0ecac25..7a15e09 100644
#endif
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
-@@ -146,6 +146,9 @@
+@@ -147,6 +147,9 @@
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
_PAGE_ACCESSED)
@@ -17467,7 +17467,7 @@ index 0ecac25..7a15e09 100644
#define __PAGE_KERNEL_EXEC \
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
-@@ -156,7 +159,7 @@
+@@ -157,7 +160,7 @@
#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
@@ -17476,7 +17476,7 @@ index 0ecac25..7a15e09 100644
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-@@ -218,8 +221,8 @@
+@@ -219,8 +222,8 @@
* bits are combined, this will alow user to access the high address mapped
* VDSO in the presence of CONFIG_COMPAT_VDSO
*/
@@ -17487,7 +17487,7 @@ index 0ecac25..7a15e09 100644
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
#endif
-@@ -257,7 +260,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+@@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
@@ -17505,7 +17505,7 @@ index 0ecac25..7a15e09 100644
#if PAGETABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
-@@ -271,8 +284,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+@@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pud_t pud)
return pud.pud;
}
#else
@@ -17514,7 +17514,7 @@ index 0ecac25..7a15e09 100644
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.pgd);
-@@ -292,8 +303,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+@@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
return pmd.pmd;
}
#else
@@ -17523,7 +17523,7 @@ index 0ecac25..7a15e09 100644
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.pgd);
-@@ -333,7 +342,6 @@ typedef struct page *pgtable_t;
+@@ -334,7 +343,6 @@ typedef struct page *pgtable_t;
extern pteval_t __supported_pte_mask;
extern void set_nx(void);
@@ -20311,7 +20311,7 @@ index 59bfebc..d8f27bd 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 6abc172..3b0df94 100644
+index 6abc172..77b0d1b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
@@ -20375,8 +20375,18 @@ index 6abc172..3b0df94 100644
static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -288,6 +234,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -284,10 +230,68 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ raw_local_save_flags(eflags);
+ BUG_ON(eflags & X86_EFLAGS_AC);
+
+- if (cpu_has(c, X86_FEATURE_SMAP))
++ if (cpu_has(c, X86_FEATURE_SMAP)) {
++#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
++#else
++ clear_in_cr4(X86_CR4_SMAP);
++#endif
++ }
}
+#ifdef CONFIG_X86_64
@@ -20435,7 +20445,7 @@ index 6abc172..3b0df94 100644
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -388,7 +387,7 @@ void switch_to_new_gdt(int cpu)
+@@ -388,7 +392,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -20444,7 +20454,7 @@ index 6abc172..3b0df94 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -877,6 +876,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -877,6 +881,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
@@ -20455,7 +20465,7 @@ index 6abc172..3b0df94 100644
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -885,6 +888,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -885,6 +893,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
@@ -20466,7 +20476,7 @@ index 6abc172..3b0df94 100644
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
-@@ -1072,10 +1079,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1072,10 +1084,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -20482,7 +20492,7 @@ index 6abc172..3b0df94 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
-@@ -1089,7 +1098,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1089,7 +1103,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -20491,7 +20501,7 @@ index 6abc172..3b0df94 100644
EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1239,7 +1248,7 @@ void cpu_init(void)
+@@ -1239,7 +1253,7 @@ void cpu_init(void)
load_ucode_ap();
cpu = stack_smp_processor_id();
@@ -20500,7 +20510,7 @@ index 6abc172..3b0df94 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1274,7 +1283,6 @@ void cpu_init(void)
+@@ -1274,7 +1288,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -20508,7 +20518,7 @@ index 6abc172..3b0df94 100644
enable_x2apic();
/*
-@@ -1326,7 +1334,7 @@ void cpu_init(void)
+@@ -1326,7 +1339,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -25121,10 +25131,41 @@ index 18be189..4a9fe40 100644
if ((s64)val != *(s32 *)loc)
goto overflow;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
-index 05266b5..1577fde 100644
+index 05266b5..3432443 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
-@@ -233,7 +233,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
+@@ -37,6 +37,7 @@
+ #include <linux/notifier.h>
+ #include <linux/uaccess.h>
+ #include <linux/gfp.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/processor.h>
+ #include <asm/msr.h>
+@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
+ int err = 0;
+ ssize_t bytes = 0;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_msr_write();
++ return -EPERM;
++#endif
++
+ if (count % 8)
+ return -EINVAL; /* Invalid chunk size */
+
+@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+ err = -EBADF;
+ break;
+ }
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_msr_write();
++ return -EPERM;
++#endif
+ if (copy_from_user(&regs, uregs, sizeof regs)) {
+ err = -EFAULT;
+ break;
+@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
@@ -30661,7 +30702,7 @@ index 903ec1e..c4166b2 100644
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 9d591c8..2e61790 100644
+index 9d591c8..31e52ff 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -14,11 +14,18 @@
@@ -31025,7 +31066,16 @@ index 9d591c8..2e61790 100644
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -1004,7 +1212,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+@@ -1001,10 +1209,16 @@ static int fault_in_kernel_space(unsigned long address)
+
+ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+ {
++ if (!IS_ENABLED(CONFIG_X86_SMAP))
++ return false;
++
++ if (!static_cpu_has(X86_FEATURE_SMAP))
++ return false;
++
if (error_code & PF_USER)
return false;
@@ -31034,7 +31084,7 @@ index 9d591c8..2e61790 100644
return false;
return true;
-@@ -1031,6 +1239,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1031,6 +1245,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Get the faulting address: */
address = read_cr2();
@@ -31057,7 +31107,22 @@ index 9d591c8..2e61790 100644
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
-@@ -1110,7 +1334,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1087,11 +1317,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ if (unlikely(error_code & PF_RSVD))
+ pgtable_bad(regs, error_code, address);
+
+- if (static_cpu_has(X86_FEATURE_SMAP)) {
+- if (unlikely(smap_violation(error_code, regs))) {
+- bad_area_nosemaphore(regs, error_code, address);
+- return;
+- }
++ if (unlikely(smap_violation(error_code, regs))) {
++ bad_area_nosemaphore(regs, error_code, address);
++ return;
+ }
+
+ /*
+@@ -1110,7 +1338,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
@@ -31066,7 +31131,7 @@ index 9d591c8..2e61790 100644
local_irq_enable();
error_code |= PF_USER;
flags |= FAULT_FLAG_USER;
-@@ -1157,6 +1381,11 @@ retry:
+@@ -1157,6 +1385,11 @@ retry:
might_sleep();
}
@@ -31078,7 +31143,7 @@ index 9d591c8..2e61790 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1168,18 +1397,24 @@ retry:
+@@ -1168,18 +1401,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -31114,7 +31179,7 @@ index 9d591c8..2e61790 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1273,3 +1508,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1273,3 +1512,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
__do_page_fault(regs, error_code);
exception_exit(prev_state);
}
@@ -34762,6 +34827,18 @@ index 431e875..cbb23f3 100644
- return 0;
-}
-__setup("vdso=", vdso_setup);
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index 1a3c765..3d2e8d1 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -9,6 +9,7 @@ config XEN
+ select XEN_HAVE_PVMMU
+ depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
+ depends on X86_TSC
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
+ help
+ This is the Linux Xen port. Enabling this will allow the
+ kernel to boot in a paravirtualized environment under the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index fa6ade7..73da73a5 100644
--- a/arch/x86/xen/enlighten.c
@@ -39431,7 +39508,7 @@ index 6ed45a9..eb6dc41 100644
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 5c64842..f14bdf8 100644
+index e02266a..e3411aa 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -39444,10 +39521,10 @@ index 5c64842..f14bdf8 100644
return can_switch;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 90fcccb..b8aabc9 100644
+index 221ac62..f56acc8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -1325,7 +1325,7 @@ typedef struct drm_i915_private {
+@@ -1326,7 +1326,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
@@ -40617,10 +40694,10 @@ index ac98964..5dbf512 100644
case VIA_IRQ_ABSOLUTE:
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-index 20890ad..699e4f2 100644
+index c0b73b9..f6f7f34 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
-@@ -342,7 +342,7 @@ struct vmw_private {
+@@ -341,7 +341,7 @@ struct vmw_private {
* Fencing and IRQs.
*/
@@ -43192,7 +43269,7 @@ index 3ba6a38..b0fa9b0 100644
"start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index 8a30ad5..72792d3 100644
+index 7da3476..f75839e 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
@@ -43214,7 +43291,7 @@ index 8a30ad5..72792d3 100644
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 0704c52..0a33d61 100644
+index b49c762..c9503cf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -185,9 +185,9 @@ struct mapped_device {
@@ -43240,7 +43317,7 @@ index 0704c52..0a33d61 100644
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
-@@ -2175,7 +2175,7 @@ static void event_callback(void *context)
+@@ -2176,7 +2176,7 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
@@ -43249,7 +43326,7 @@ index 0704c52..0a33d61 100644
wake_up(&md->eventq);
}
-@@ -2868,18 +2868,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+@@ -2869,18 +2869,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
@@ -44869,7 +44946,7 @@ index 82dc574..8539ab2 100644
break;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index 29d5d98..fea356f 100644
+index 7b5424f..ed1d6ac 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
@@ -47645,10 +47722,10 @@ index 96c9f80..90974ca 100644
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
ARRAY_SIZE(mc13892_regulators));
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
-index f148762..5a6d1e5 100644
+index a2325bc..04c549f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
-@@ -731,7 +731,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
+@@ -779,7 +779,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
hpet_rtc_timer_init();
/* export at least the first block of NVRAM */
@@ -48894,10 +48971,10 @@ index df5e961..df6b97f 100644
return blk_trace_startstop(sdp->device->request_queue, 1);
case BLKTRACESTOP:
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
-index 349ebba..ff2a249 100644
+index d745f95..6bef2fc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
-@@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
+@@ -1947,7 +1947,7 @@ int spi_bus_unlock(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* portable code must never pass more than 32 bytes */
@@ -49428,10 +49505,10 @@ index d06de84..fd38c9b 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 91953da..a842b90 100644
+index dee2be1..f5fd8ca 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
-@@ -1112,7 +1112,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
+@@ -1113,7 +1113,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
@@ -50736,6 +50813,19 @@ index d0e3a44..5f8b754 100644
if (!perm) {
ret = -EPERM;
goto reterr;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 61b1137..23b5d32 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
+ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
+ vc->vc_screenbuf_size >> 1);
+ set_origin(vc);
++ if (CON_IS_VISIBLE(vc))
++ update_screen(vc);
+ /* fall through */
+ case 2: /* erase whole display */
+ count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a673e5b..36e5d32 100644
--- a/drivers/uio/uio.c
@@ -60046,10 +60136,10 @@ index b96a49b..9bfdc47 100644
cuse_class = class_create(THIS_MODULE, "cuse");
if (IS_ERR(cuse_class))
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
-index ef74ad5..c9ac759e 100644
+index fa8cb4b..4acb935 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
-@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+@@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
ret = 0;
pipe_lock(pipe);
@@ -60058,7 +60148,7 @@ index ef74ad5..c9ac759e 100644
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
-@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+@@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
page_nr++;
ret += buf->len;
@@ -61026,81 +61116,6 @@ index 00ad1c2..2fde15e 100644
}
void nfs_fattr_init(struct nfs_fattr *fattr)
-diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
-index b4a160a..2b9bfba 100644
---- a/fs/nfs/nfs4client.c
-+++ b/fs/nfs/nfs4client.c
-@@ -409,13 +409,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
- error = nfs4_discover_server_trunking(clp, &old);
- if (error < 0)
- goto error;
-- nfs_put_client(clp);
-- if (clp != old) {
-+
-+ if (clp != old)
- clp->cl_preserve_clid = true;
-- clp = old;
-- }
--
-- return clp;
-+ nfs_put_client(clp);
-+ return old;
-
- error:
- nfs_mark_client_ready(clp, error);
-@@ -493,9 +491,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
- prev = pos;
-
- status = nfs_wait_client_init_complete(pos);
-- spin_lock(&nn->nfs_client_lock);
- if (status < 0)
-- continue;
-+ goto out;
-+ status = -NFS4ERR_STALE_CLIENTID;
-+ spin_lock(&nn->nfs_client_lock);
- }
- if (pos->cl_cons_state != NFS_CS_READY)
- continue;
-@@ -633,7 +632,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
- }
- spin_lock(&nn->nfs_client_lock);
- if (status < 0)
-- continue;
-+ break;
-+ status = -NFS4ERR_STALE_CLIENTID;
- }
- if (pos->cl_cons_state != NFS_CS_READY)
- continue;
-diff --git a/fs/nfs/write.c b/fs/nfs/write.c
-index c1d5482..6a85038 100644
---- a/fs/nfs/write.c
-+++ b/fs/nfs/write.c
-@@ -922,19 +922,20 @@ out:
- * extend the write to cover the entire page in order to avoid fragmentation
- * inefficiencies.
- *
-- * If the file is opened for synchronous writes or if we have a write delegation
-- * from the server then we can just skip the rest of the checks.
-+ * If the file is opened for synchronous writes then we can just skip the rest
-+ * of the checks.
- */
- static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
- {
- if (file->f_flags & O_DSYNC)
- return 0;
-+ if (!nfs_write_pageuptodate(page, inode))
-+ return 0;
- if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
- return 1;
-- if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
-- (inode->i_flock->fl_start == 0 &&
-+ if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
- inode->i_flock->fl_end == OFFSET_MAX &&
-- inode->i_flock->fl_type != F_RDLCK)))
-+ inode->i_flock->fl_type != F_RDLCK))
- return 1;
- return 0;
- }
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 419572f..5414a23 100644
--- a/fs/nfsd/nfs4proc.c
@@ -61267,7 +61282,7 @@ index e7bc1d7..06bd4bb 100644
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
-index e44cb64..7668ca4 100644
+index 6663511..7668ca4 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
@@ -61281,18 +61296,6 @@ index e44cb64..7668ca4 100644
goto out_close_fd;
ret = prepare_for_access_response(group, event, fd);
-@@ -888,9 +888,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
- {
- return sys_fanotify_mark(fanotify_fd, flags,
- #ifdef __BIG_ENDIAN
-- ((__u64)mask1 << 32) | mask0,
--#else
- ((__u64)mask0 << 32) | mask1,
-+#else
-+ ((__u64)mask1 << 32) | mask0,
- #endif
- dfd, pathname);
- }
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 7b51b05..5ea5ef6 100644
--- a/fs/notify/notification.c
@@ -63328,7 +63331,7 @@ index 72d2917..c917c12 100644
if (!msg_head) {
printk(KERN_ERR
diff --git a/fs/read_write.c b/fs/read_write.c
-index 58e440d..8ec2838 100644
+index cfa18df..c110979 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
@@ -63612,7 +63615,7 @@ index 1d641bb..e600623 100644
if (op) {
diff --git a/fs/splice.c b/fs/splice.c
-index 46a08f7..bb163cc 100644
+index 12028fa..a6f2619 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
@@ -63645,7 +63648,7 @@ index 46a08f7..bb163cc 100644
}
pipe_unlock(pipe);
-@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+@@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
@@ -63654,7 +63657,7 @@ index 46a08f7..bb163cc 100644
set_fs(old_fs);
return res;
-@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
+@@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
@@ -63663,7 +63666,7 @@ index 46a08f7..bb163cc 100644
set_fs(old_fs);
return res;
-@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+@@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
goto err;
this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
@@ -63672,7 +63675,7 @@ index 46a08f7..bb163cc 100644
vec[i].iov_len = this_len;
spd.pages[i] = page;
spd.nr_pages++;
-@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
+@@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
ops->release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
@@ -63681,7 +63684,7 @@ index 46a08f7..bb163cc 100644
sd->need_wakeup = true;
}
-@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
+@@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
{
while (!pipe->nrbufs) {
@@ -63694,7 +63697,7 @@ index 46a08f7..bb163cc 100644
return 0;
if (sd->flags & SPLICE_F_NONBLOCK)
-@@ -1179,7 +1179,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
*/
@@ -63703,7 +63706,7 @@ index 46a08f7..bb163cc 100644
current->splice_pipe = pipe;
}
-@@ -1475,6 +1475,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
partial[buffers].offset = off;
partial[buffers].len = plen;
@@ -63711,7 +63714,7 @@ index 46a08f7..bb163cc 100644
off = 0;
len -= plen;
-@@ -1777,9 +1778,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -63723,7 +63726,7 @@ index 46a08f7..bb163cc 100644
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
-@@ -1811,7 +1812,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
@@ -63732,7 +63735,7 @@ index 46a08f7..bb163cc 100644
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
-@@ -1824,9 +1825,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -63744,7 +63747,7 @@ index 46a08f7..bb163cc 100644
}
pipe_unlock(pipe);
-@@ -1862,14 +1863,14 @@ retry:
+@@ -1880,14 +1881,14 @@ retry:
pipe_double_lock(ipipe, opipe);
do {
@@ -63761,7 +63764,7 @@ index 46a08f7..bb163cc 100644
break;
/*
-@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
pipe_double_lock(ipipe, opipe);
do {
@@ -63770,7 +63773,7 @@ index 46a08f7..bb163cc 100644
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
-@@ -2011,7 +2012,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
@@ -64240,10 +64243,10 @@ index 104455b..764c512 100644
kfree(s);
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..e98584b
+index 0000000..ffff596
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1147 @@
+@@ -0,0 +1,1153 @@
+#
+# grecurity configuration
+#
@@ -65380,6 +65383,9 @@ index 0000000..e98584b
+ to allow informative logs to be produced, but large enough to
+ prevent flooding.
+
++ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
++ any rate limiting on grsecurity log messages.
++
+config GRKERNSEC_FLOODBURST
+ int "Number of messages in a burst (maximum)"
+ default 6
@@ -65390,6 +65396,9 @@ index 0000000..e98584b
+ many of your logs are being interpreted as flooding, you may want to
+ raise this value.
+
++ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
++ any rate limiting on grsecurity log messages.
++
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
@@ -65453,7 +65462,7 @@ index 0000000..5307c8a
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..19a5b7c
+index 0000000..364a9d7
--- /dev/null
+++ b/grsecurity/gracl.c
@@ -0,0 +1,2678 @@
@@ -67666,7 +67675,7 @@ index 0000000..19a5b7c
+
+ return;
+}
-+EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL_GPL(gr_learn_resource);
+#endif
+
+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
@@ -68128,11 +68137,11 @@ index 0000000..19a5b7c
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
-+EXPORT_SYMBOL(gr_acl_is_enabled);
++EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
+#endif
+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
+
diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
@@ -71940,7 +71949,7 @@ index 0000000..bc0be01
+}
diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
new file mode 100644
-index 0000000..e10b319
+index 0000000..651d6c2
--- /dev/null
+++ b/grsecurity/grsec_chroot.c
@@ -0,0 +1,370 @@
@@ -72081,7 +72090,7 @@ index 0000000..e10b319
+ return 0;
+}
+
-+EXPORT_SYMBOL(gr_pid_is_chrooted);
++EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
+
+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
@@ -72316,7 +72325,7 @@ index 0000000..e10b319
+}
diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
new file mode 100644
-index 0000000..52b3e30
+index 0000000..4d6fce8
--- /dev/null
+++ b/grsecurity/grsec_disabled.c
@@ -0,0 +1,433 @@
@@ -72750,12 +72759,12 @@ index 0000000..52b3e30
+}
+
+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
new file mode 100644
-index 0000000..387032b
+index 0000000..f35f454
--- /dev/null
+++ b/grsecurity/grsec_exec.c
@@ -0,0 +1,187 @@
@@ -72942,10 +72951,10 @@ index 0000000..387032b
+#endif
+}
+
-+EXPORT_SYMBOL(gr_is_capable);
-+EXPORT_SYMBOL(gr_is_capable_nolog);
-+EXPORT_SYMBOL(gr_task_is_capable);
-+EXPORT_SYMBOL(gr_task_is_capable_nolog);
++EXPORT_SYMBOL_GPL(gr_is_capable);
++EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
++EXPORT_SYMBOL_GPL(gr_task_is_capable);
++EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
new file mode 100644
index 0000000..06cc6ea
@@ -73007,7 +73016,7 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..a88e901
+index 0000000..ae6c028
--- /dev/null
+++ b/grsecurity/grsec_init.c
@@ -0,0 +1,272 @@
@@ -73059,7 +73068,7 @@ index 0000000..a88e901
+kgid_t grsec_tpe_gid;
+int grsec_enable_blackhole;
+#ifdef CONFIG_IPV6_MODULE
-+EXPORT_SYMBOL(grsec_enable_blackhole);
++EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
@@ -73750,16 +73759,24 @@ index 0000000..dbe0a6b
+}
diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
new file mode 100644
-index 0000000..f536303
+index 0000000..0e39d8c
--- /dev/null
+++ b/grsecurity/grsec_mem.c
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
++#include <linux/module.h>
+#include <linux/grinternal.h>
+
++void gr_handle_msr_write(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
++ return;
++}
++EXPORT_SYMBOL_GPL(gr_handle_msr_write);
++
+void
+gr_handle_ioperm(void)
+{
@@ -74196,7 +74213,7 @@ index 0000000..3860c7e
+}
diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
new file mode 100644
-index 0000000..4030d57
+index 0000000..c0aef3a
--- /dev/null
+++ b/grsecurity/grsec_sock.c
@@ -0,0 +1,244 @@
@@ -74216,14 +74233,14 @@ index 0000000..4030d57
+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
+
-+EXPORT_SYMBOL(gr_search_udp_recvmsg);
-+EXPORT_SYMBOL(gr_search_udp_sendmsg);
++EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
+
+#ifdef CONFIG_UNIX_MODULE
-+EXPORT_SYMBOL(gr_acl_handle_unix);
-+EXPORT_SYMBOL(gr_acl_handle_mknod);
-+EXPORT_SYMBOL(gr_handle_chroot_unix);
-+EXPORT_SYMBOL(gr_handle_create);
++EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
++EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
++EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
++EXPORT_SYMBOL_GPL(gr_handle_create);
+#endif
+
+#ifdef CONFIG_GRKERNSEC
@@ -74931,7 +74948,7 @@ index 0000000..8159888
+#endif
diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
new file mode 100644
-index 0000000..0dc13c3
+index 0000000..61b514e
--- /dev/null
+++ b/grsecurity/grsec_time.c
@@ -0,0 +1,16 @@
@@ -74950,7 +74967,7 @@ index 0000000..0dc13c3
+ return;
+}
+
-+EXPORT_SYMBOL(gr_log_timechange);
++EXPORT_SYMBOL_GPL(gr_log_timechange);
diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
new file mode 100644
index 0000000..ee57dcf
@@ -75743,10 +75760,10 @@ index 810431d..0ec4804f 100644
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index db09234..86683e3 100644
+index 8e4f41d..c5e9afd 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
-@@ -736,6 +736,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
+@@ -748,6 +748,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -75988,7 +76005,7 @@ index c1da539..1dcec55 100644
struct atmphy_ops {
int (*start)(struct atm_dev *dev);
diff --git a/include/linux/audit.h b/include/linux/audit.h
-index a406419..c2bb164 100644
+index bf1ef22..2a55e1b 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -195,7 +195,7 @@ static inline void audit_ptrace(struct task_struct *t)
@@ -76148,7 +76165,7 @@ index 7e59253..d6e4cae 100644
/**
* struct clk_init_data - holds init data that's common to all clocks and is
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index eb8a49d..6b66ed9 100644
+index 19f6003..90b64f4 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
@@ -76167,10 +76184,10 @@ index eb8a49d..6b66ed9 100644
- compat_long_t addr, compat_long_t data);
+ compat_ulong_t addr, compat_ulong_t data);
- asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
+ asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
/*
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
-index ded4299..ddcbe31 100644
+index ded4299..55203f8 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -39,9 +39,34 @@
@@ -76208,6 +76225,19 @@ index ded4299..ddcbe31 100644
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
+@@ -75,11 +100,7 @@
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+-#if GCC_VERSION <= 40801
+-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+-#else
+-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
+-#endif
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+ #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+ #if GCC_VERSION >= 40400
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 92669cd..cc564c0 100644
--- a/include/linux/compiler.h
@@ -77918,10 +77948,10 @@ index 0000000..d25522e
+#endif
diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
new file mode 100644
-index 0000000..195cbe4
+index 0000000..ba93581
--- /dev/null
+++ b/include/linux/grmsg.h
-@@ -0,0 +1,115 @@
+@@ -0,0 +1,116 @@
+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
@@ -78037,12 +78067,13 @@ index 0000000..195cbe4
+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
++#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
new file mode 100644
-index 0000000..d8b5b48
+index 0000000..8108301
--- /dev/null
+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,245 @@
+@@ -0,0 +1,246 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+#include <linux/fs.h>
@@ -78103,6 +78134,7 @@ index 0000000..d8b5b48
+
+void gr_handle_ioperm(void);
+void gr_handle_iopl(void);
++void gr_handle_msr_write(void);
+
+umode_t gr_acl_umask(void);
+
@@ -79754,7 +79786,7 @@ index 7246ef3..1539ea4 100644
extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
-index b8809fe..ae4ccd0 100644
+index ab57526..94598804 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -47,10 +47,10 @@ struct pipe_inode_info {
@@ -80489,7 +80521,7 @@ index 53f97eb..1d90705 100644
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
-index 41467f8..1e4253d 100644
+index e3347c5..f682891 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
@@ -81459,7 +81491,7 @@ index 4b8a891..cb8df6e 100644
/*
* Internals. Dont't use..
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
-index e4b9480..5a5f65a 100644
+index a67b384..f52a537 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
@@ -81508,7 +81540,7 @@ index e4b9480..5a5f65a 100644
#ifdef CONFIG_SMP
int cpu;
-@@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
+@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
@@ -81519,7 +81551,7 @@ index e4b9480..5a5f65a 100644
}
static inline void __inc_zone_page_state(struct page *page,
-@@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
+@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
@@ -82432,7 +82464,7 @@ index 1f741cb..8cefc08 100644
struct snd_soc_platform {
const char *name;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 321301c..2ae5cb0 100644
+index e3569f8..6544ffd 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -687,7 +687,7 @@ struct se_device {
@@ -83643,10 +83675,10 @@ index 8d6e145..33e0b1e 100644
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
set_fs(fs);
diff --git a/kernel/audit.c b/kernel/audit.c
-index 906ae5a0..a7ad0b4 100644
+index 15ec13a..986322e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
-@@ -117,7 +117,7 @@ u32 audit_sig_sid = 0;
+@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
3) suppressed due to audit_rate_limit
4) suppressed due to audit_backlog_limit
*/
@@ -83655,7 +83687,7 @@ index 906ae5a0..a7ad0b4 100644
/* The netlink socket. */
static struct sock *audit_sock;
-@@ -250,7 +250,7 @@ void audit_log_lost(const char *message)
+@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
unsigned long now;
int print;
@@ -83664,7 +83696,7 @@ index 906ae5a0..a7ad0b4 100644
print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
-@@ -269,7 +269,7 @@ void audit_log_lost(const char *message)
+@@ -270,7 +270,7 @@ void audit_log_lost(const char *message)
printk(KERN_WARNING
"audit: audit_lost=%d audit_rate_limit=%d "
"audit_backlog_limit=%d\n",
@@ -83673,7 +83705,7 @@ index 906ae5a0..a7ad0b4 100644
audit_rate_limit,
audit_backlog_limit);
audit_panic(message);
-@@ -765,7 +765,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -766,7 +766,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
status_set.pid = audit_pid;
status_set.rate_limit = audit_rate_limit;
status_set.backlog_limit = audit_backlog_limit;
@@ -83682,7 +83714,7 @@ index 906ae5a0..a7ad0b4 100644
status_set.backlog = skb_queue_len(&audit_skb_queue);
audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
&status_set, sizeof(status_set));
-@@ -1356,7 +1356,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
+@@ -1359,7 +1359,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
int i, avail, new_len;
unsigned char *ptr;
struct sk_buff *skb;
@@ -87846,10 +87878,19 @@ index a63f4dc..349bbb0 100644
unsigned long timeout)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index a88f4a4..9d57ac9 100644
+index c677510..132bb14 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2871,6 +2871,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -1768,7 +1768,7 @@ void set_numabalancing_state(bool enabled)
+ int sysctl_numa_balancing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+- struct ctl_table t;
++ ctl_table_no_const t;
+ int err;
+ int state = numabalancing_enabled;
+
+@@ -2893,6 +2893,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -87858,7 +87899,7 @@ index a88f4a4..9d57ac9 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -2904,7 +2906,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -2926,7 +2928,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -87868,7 +87909,7 @@ index a88f4a4..9d57ac9 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -3066,6 +3069,7 @@ recheck:
+@@ -3088,6 +3091,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -87876,7 +87917,7 @@ index a88f4a4..9d57ac9 100644
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
-@@ -4232,7 +4236,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -4254,7 +4258,7 @@ static void migrate_tasks(unsigned int dead_cpu)
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -87885,7 +87926,7 @@ index a88f4a4..9d57ac9 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -4249,17 +4253,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -4271,17 +4275,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -87907,7 +87948,7 @@ index a88f4a4..9d57ac9 100644
/*
* In the intermediate directories, both the child directory and
-@@ -4267,22 +4271,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -4289,22 +4293,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -87939,7 +87980,7 @@ index a88f4a4..9d57ac9 100644
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -4302,7 +4309,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -4324,7 +4331,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -87948,7 +87989,7 @@ index a88f4a4..9d57ac9 100644
if (table == NULL)
return NULL;
-@@ -4337,9 +4344,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -4359,9 +4366,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -87960,7 +88001,7 @@ index a88f4a4..9d57ac9 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -4366,11 +4373,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -4388,11 +4395,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -87975,7 +88016,7 @@ index a88f4a4..9d57ac9 100644
if (entry == NULL)
return;
-@@ -4393,8 +4402,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -4415,8 +4424,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -88412,7 +88453,7 @@ index c723113..46bf922 100644
if (!retval) {
if (old_rlim)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 34a6047..5665aa7 100644
+index 06962ba..a54d45e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -93,7 +93,6 @@
@@ -88514,7 +88555,7 @@ index 34a6047..5665aa7 100644
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -620,7 +650,7 @@ static struct ctl_table kern_table[] = {
+@@ -629,7 +659,7 @@ static struct ctl_table kern_table[] = {
.data = &modprobe_path,
.maxlen = KMOD_PATH_LEN,
.mode = 0644,
@@ -88523,7 +88564,7 @@ index 34a6047..5665aa7 100644
},
{
.procname = "modules_disabled",
-@@ -787,16 +817,20 @@ static struct ctl_table kern_table[] = {
+@@ -796,16 +826,20 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &one,
},
@@ -88545,7 +88586,7 @@ index 34a6047..5665aa7 100644
{
.procname = "ngroups_max",
.data = &ngroups_max,
-@@ -1039,10 +1073,17 @@ static struct ctl_table kern_table[] = {
+@@ -1048,10 +1082,17 @@ static struct ctl_table kern_table[] = {
*/
{
.procname = "perf_event_paranoid",
@@ -88566,7 +88607,7 @@ index 34a6047..5665aa7 100644
},
{
.procname = "perf_event_mlock_kb",
-@@ -1306,6 +1347,13 @@ static struct ctl_table vm_table[] = {
+@@ -1315,6 +1356,13 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
@@ -88580,7 +88621,7 @@ index 34a6047..5665aa7 100644
#else
{
.procname = "nr_trim_pages",
-@@ -1770,6 +1818,16 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -1779,6 +1827,16 @@ int proc_dostring(struct ctl_table *table, int write,
buffer, lenp, ppos);
}
@@ -88597,7 +88638,7 @@ index 34a6047..5665aa7 100644
static size_t proc_skip_spaces(char **buf)
{
size_t ret;
-@@ -1875,6 +1933,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+@@ -1884,6 +1942,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
len = strlen(tmp);
if (len > *size)
len = *size;
@@ -88606,7 +88647,7 @@ index 34a6047..5665aa7 100644
if (copy_to_user(*buf, tmp, len))
return -EFAULT;
*size -= len;
-@@ -2039,7 +2099,7 @@ int proc_dointvec(struct ctl_table *table, int write,
+@@ -2048,7 +2108,7 @@ int proc_dointvec(struct ctl_table *table, int write,
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -88615,7 +88656,7 @@ index 34a6047..5665aa7 100644
unsigned long tmptaint = get_taint();
int err;
-@@ -2067,7 +2127,6 @@ static int proc_taint(struct ctl_table *table, int write,
+@@ -2076,7 +2136,6 @@ static int proc_taint(struct ctl_table *table, int write,
return err;
}
@@ -88623,7 +88664,7 @@ index 34a6047..5665aa7 100644
static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2076,7 +2135,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+@@ -2085,7 +2144,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
@@ -88631,7 +88672,7 @@ index 34a6047..5665aa7 100644
struct do_proc_dointvec_minmax_conv_param {
int *min;
-@@ -2623,6 +2681,12 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -2632,6 +2690,12 @@ int proc_dostring(struct ctl_table *table, int write,
return -ENOSYS;
}
@@ -88644,7 +88685,7 @@ index 34a6047..5665aa7 100644
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2679,5 +2743,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+@@ -2688,5 +2752,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
@@ -88708,7 +88749,7 @@ index 88c9c65..7497ebc 100644
.clock_get = alarm_clock_get,
.timer_create = alarm_timer_create,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 87b4f00..b7f77a7 100644
+index b415457..c26876d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -15,6 +15,7 @@
@@ -88899,10 +88940,10 @@ index f785aef..59f1b18 100644
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 72a0f81..0bbfd090 100644
+index 38463d2..68abe92 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -1944,12 +1944,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -1978,12 +1978,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -88922,7 +88963,7 @@ index 72a0f81..0bbfd090 100644
}
/*
-@@ -4119,8 +4124,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4190,8 +4195,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -88933,7 +88974,7 @@ index 72a0f81..0bbfd090 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -4851,8 +4858,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+@@ -4922,8 +4929,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
@@ -88942,9 +88983,9 @@ index 72a0f81..0bbfd090 100644
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
-@@ -5003,6 +5008,10 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
- FTRACE_OPS_FL_RECURSION_SAFE,
- };
+@@ -5099,6 +5104,10 @@ static void update_function_graph_func(void)
+ ftrace_graph_entry = ftrace_graph_entry_test;
+ }
+static struct notifier_block ftrace_suspend_notifier = {
+ .notifier_call = ftrace_suspend_notifier_call
@@ -88953,7 +88994,7 @@ index 72a0f81..0bbfd090 100644
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
-@@ -5016,7 +5025,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+@@ -5112,7 +5121,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
goto out;
}
@@ -89240,10 +89281,10 @@ index cc2f66f..05edd54 100644
*data_page = bpage;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 9d20cd9..221d816 100644
+index 0a360ce..7bd800e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3346,7 +3346,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3352,7 +3352,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
@@ -90469,10 +90510,10 @@ index dee6cf4..52b94f7 100644
if (!ptep)
return VM_FAULT_OOM;
diff --git a/mm/internal.h b/mm/internal.h
-index 684f7aa..9eb9edc 100644
+index 8b6cfd6..ec809a6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
-@@ -97,6 +97,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
+@@ -96,6 +96,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
* in mm/page_alloc.c
*/
extern void __free_pages_bootmem(struct page *page, unsigned int order);
@@ -90480,7 +90521,7 @@ index 684f7aa..9eb9edc 100644
extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
-@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
+@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
@@ -90614,7 +90655,7 @@ index 539eeb9..e24a987 100644
if (end == start)
return error;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index fabe550..f31b51c 100644
+index 6420be5..b7b7c8f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -90644,7 +90685,7 @@ index fabe550..f31b51c 100644
{ reserved, reserved, "reserved kernel", me_kernel },
/*
* free pages are specially detected outside this table:
-@@ -1063,7 +1063,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1060,7 +1060,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
nr_pages = 1 << compound_order(hpage);
else /* normal page or thp */
nr_pages = 1;
@@ -90653,7 +90694,7 @@ index fabe550..f31b51c 100644
/*
* We need/can do nothing about count=0 pages.
-@@ -1093,7 +1093,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1090,7 +1090,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
if (!PageHWPoison(hpage)
|| (hwpoison_filter(p) && TestClearPageHWPoison(p))
|| (p != hpage && TestSetPageHWPoison(hpage))) {
@@ -90662,7 +90703,7 @@ index fabe550..f31b51c 100644
return 0;
}
set_page_hwpoison_huge_page(hpage);
-@@ -1162,7 +1162,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+@@ -1159,7 +1159,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
}
if (hwpoison_filter(p)) {
if (TestClearPageHWPoison(p))
@@ -90671,7 +90712,7 @@ index fabe550..f31b51c 100644
unlock_page(hpage);
put_page(hpage);
return 0;
-@@ -1380,7 +1380,7 @@ int unpoison_memory(unsigned long pfn)
+@@ -1381,7 +1381,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
if (TestClearPageHWPoison(p))
@@ -90680,7 +90721,7 @@ index fabe550..f31b51c 100644
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
return 0;
}
-@@ -1394,7 +1394,7 @@ int unpoison_memory(unsigned long pfn)
+@@ -1395,7 +1395,7 @@ int unpoison_memory(unsigned long pfn)
*/
if (TestClearPageHWPoison(page)) {
pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
@@ -90689,7 +90730,7 @@ index fabe550..f31b51c 100644
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
-@@ -1519,11 +1519,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
+@@ -1520,11 +1520,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
@@ -90703,7 +90744,7 @@ index fabe550..f31b51c 100644
}
}
return ret;
-@@ -1562,7 +1562,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1563,7 +1563,7 @@ static int __soft_offline_page(struct page *page, int flags)
put_page(page);
pr_info("soft_offline: %#lx: invalidated\n", pfn);
SetPageHWPoison(page);
@@ -90712,7 +90753,7 @@ index fabe550..f31b51c 100644
return 0;
}
-@@ -1607,7 +1607,7 @@ static int __soft_offline_page(struct page *page, int flags)
+@@ -1608,7 +1608,7 @@ static int __soft_offline_page(struct page *page, int flags)
if (!is_free_buddy_page(page))
pr_info("soft offline: %#lx: page leaked\n",
pfn);
@@ -90721,7 +90762,7 @@ index fabe550..f31b51c 100644
}
} else {
pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
-@@ -1681,11 +1681,11 @@ int soft_offline_page(struct page *page, int flags)
+@@ -1682,11 +1682,11 @@ int soft_offline_page(struct page *page, int flags)
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
@@ -91431,7 +91472,7 @@ index 6768ce9..4c41d69 100644
mm = get_task_mm(tsk);
if (!mm)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index e1bd997..055f496 100644
+index cb2f3dd..fb80468 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
@@ -91592,7 +91633,7 @@ index 192e6ee..b044449 100644
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff --git a/mm/mmap.c b/mm/mmap.c
-index 834b2d7..650d1b9 100644
+index 546db74..650d1b9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,7 @@
@@ -91673,24 +91714,7 @@ index 834b2d7..650d1b9 100644
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -893,7 +916,15 @@ again: remove_next = 1 + (end > next->vm_end);
- static inline int is_mergeable_vma(struct vm_area_struct *vma,
- struct file *file, unsigned long vm_flags)
- {
-- if (vma->vm_flags ^ vm_flags)
-+ /*
-+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
-+ * match the flags but dirty bit -- the caller should mark
-+ * merged VMA as dirty. If dirty bit won't be excluded from
-+ * comparison, we increase pressue on the memory system forcing
-+ * the kernel to generate new VMAs when old one could be
-+ * extended instead.
-+ */
-+ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
- return 0;
- if (vma->vm_file != file)
- return 0;
-@@ -931,6 +962,12 @@ static int
+@@ -939,6 +962,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -91703,7 +91727,7 @@ index 834b2d7..650d1b9 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -950,6 +987,12 @@ static int
+@@ -958,6 +987,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -91716,7 +91740,7 @@ index 834b2d7..650d1b9 100644
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
pgoff_t vm_pglen;
-@@ -992,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -1000,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -91738,7 +91762,7 @@ index 834b2d7..650d1b9 100644
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -1014,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1022,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -91754,7 +91778,7 @@ index 834b2d7..650d1b9 100644
/*
* Can it merge with the predecessor?
*/
-@@ -1033,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1041,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -91780,7 +91804,7 @@ index 834b2d7..650d1b9 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(prev);
-@@ -1049,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -1057,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -91810,16 +91834,7 @@ index 834b2d7..650d1b9 100644
if (err)
return NULL;
khugepaged_enter_vma_merge(area);
-@@ -1082,7 +1171,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
- return a->vm_end == b->vm_start &&
- mpol_equal(vma_policy(a), vma_policy(b)) &&
- a->vm_file == b->vm_file &&
-- !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
-+ !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
- b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
- }
-
-@@ -1163,8 +1252,10 @@ none:
+@@ -1171,8 +1252,10 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -91832,7 +91847,7 @@ index 834b2d7..650d1b9 100644
mm->total_vm += pages;
-@@ -1172,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
+@@ -1180,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
mm->exec_vm += pages;
@@ -91841,7 +91856,7 @@ index 834b2d7..650d1b9 100644
mm->stack_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -1210,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1218,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -91850,7 +91865,7 @@ index 834b2d7..650d1b9 100644
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -1236,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1244,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -91859,7 +91874,7 @@ index 834b2d7..650d1b9 100644
if (addr & ~PAGE_MASK)
return addr;
-@@ -1247,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1255,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -91903,7 +91918,7 @@ index 834b2d7..650d1b9 100644
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1258,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1266,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -91911,7 +91926,7 @@ index 834b2d7..650d1b9 100644
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1342,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1350,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
vm_flags |= VM_NORESERVE;
}
@@ -91921,7 +91936,7 @@ index 834b2d7..650d1b9 100644
addr = mmap_region(file, addr, len, vm_flags, pgoff);
if (!IS_ERR_VALUE(addr) &&
((vm_flags & VM_LOCKED) ||
-@@ -1435,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1443,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
vm_flags_t vm_flags = vma->vm_flags;
/* If it was private or non-writable, the write bit is already clear */
@@ -91930,7 +91945,7 @@ index 834b2d7..650d1b9 100644
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1481,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1489,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
@@ -91953,7 +91968,7 @@ index 834b2d7..650d1b9 100644
if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
-@@ -1500,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1508,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
/* Clear old maps */
error = -ENOMEM;
@@ -91966,7 +91981,7 @@ index 834b2d7..650d1b9 100644
}
/*
-@@ -1535,6 +1681,16 @@ munmap_back:
+@@ -1543,6 +1681,16 @@ munmap_back:
goto unacct_error;
}
@@ -91983,7 +91998,7 @@ index 834b2d7..650d1b9 100644
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1554,6 +1710,13 @@ munmap_back:
+@@ -1562,6 +1710,13 @@ munmap_back:
if (error)
goto unmap_and_free_vma;
@@ -91997,7 +92012,7 @@ index 834b2d7..650d1b9 100644
/* Can addr have changed??
*
* Answer: Yes, several device drivers can do it in their
-@@ -1587,6 +1750,12 @@ munmap_back:
+@@ -1595,6 +1750,12 @@ munmap_back:
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -92010,7 +92025,7 @@ index 834b2d7..650d1b9 100644
/* Once vma denies write, undo our temporary denial count */
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
-@@ -1595,6 +1764,7 @@ out:
+@@ -1603,6 +1764,7 @@ out:
perf_event_mmap(vma);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -92018,7 +92033,7 @@ index 834b2d7..650d1b9 100644
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
-@@ -1627,6 +1797,12 @@ unmap_and_free_vma:
+@@ -1635,6 +1797,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -92031,7 +92046,7 @@ index 834b2d7..650d1b9 100644
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1634,7 +1810,63 @@ unacct_error:
+@@ -1642,7 +1810,63 @@ unacct_error:
return error;
}
@@ -92096,7 +92111,7 @@ index 834b2d7..650d1b9 100644
{
/*
* We implement the search by looking for an rbtree node that
-@@ -1682,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+@@ -1690,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
}
}
@@ -92127,7 +92142,7 @@ index 834b2d7..650d1b9 100644
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
-@@ -1736,7 +1986,7 @@ found:
+@@ -1744,7 +1986,7 @@ found:
return gap_start;
}
@@ -92136,7 +92151,7 @@ index 834b2d7..650d1b9 100644
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
-@@ -1790,6 +2040,24 @@ check_current:
+@@ -1798,6 +2040,24 @@ check_current:
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
@@ -92161,7 +92176,7 @@ index 834b2d7..650d1b9 100644
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
-@@ -1853,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1861,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
@@ -92169,7 +92184,7 @@ index 834b2d7..650d1b9 100644
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
-@@ -1860,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1868,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
return addr;
@@ -92186,7 +92201,7 @@ index 834b2d7..650d1b9 100644
return addr;
}
-@@ -1873,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1881,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = 0;
@@ -92194,7 +92209,7 @@ index 834b2d7..650d1b9 100644
return vm_unmapped_area(&info);
}
#endif
-@@ -1891,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1899,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
@@ -92202,7 +92217,7 @@ index 834b2d7..650d1b9 100644
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
-@@ -1899,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1907,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -92220,7 +92235,7 @@ index 834b2d7..650d1b9 100644
return addr;
}
-@@ -1913,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1921,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
@@ -92228,7 +92243,7 @@ index 834b2d7..650d1b9 100644
addr = vm_unmapped_area(&info);
/*
-@@ -1925,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1933,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
@@ -92241,7 +92256,7 @@ index 834b2d7..650d1b9 100644
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
-@@ -2026,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -2034,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
return vma;
}
@@ -92270,7 +92285,7 @@ index 834b2d7..650d1b9 100644
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -2042,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2050,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -92278,7 +92293,7 @@ index 834b2d7..650d1b9 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -2052,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2060,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -92286,7 +92301,7 @@ index 834b2d7..650d1b9 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -2081,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -2089,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -92344,7 +92359,7 @@ index 834b2d7..650d1b9 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -2146,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -2154,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -92353,7 +92368,7 @@ index 834b2d7..650d1b9 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
-@@ -2160,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2168,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -92362,7 +92377,7 @@ index 834b2d7..650d1b9 100644
/*
* We must make sure the anon_vma is allocated
-@@ -2173,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2181,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -92378,7 +92393,7 @@ index 834b2d7..650d1b9 100644
vma_lock_anon_vma(vma);
/*
-@@ -2182,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2190,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -92397,7 +92412,7 @@ index 834b2d7..650d1b9 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -2209,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -2217,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
vma->vm_pgoff -= grow;
anon_vma_interval_tree_post_update_vma(vma);
vma_gap_update(vma);
@@ -92425,7 +92440,7 @@ index 834b2d7..650d1b9 100644
khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error;
-@@ -2313,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2321,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -92439,7 +92454,7 @@ index 834b2d7..650d1b9 100644
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2357,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2365,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -92456,7 +92471,7 @@ index 834b2d7..650d1b9 100644
vma_rb_erase(vma, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -2382,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2390,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -92490,7 +92505,7 @@ index 834b2d7..650d1b9 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -2402,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2410,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -92513,7 +92528,7 @@ index 834b2d7..650d1b9 100644
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
-@@ -2421,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2429,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -92552,7 +92567,7 @@ index 834b2d7..650d1b9 100644
/* Success. */
if (!err)
return 0;
-@@ -2430,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2438,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
@@ -92572,7 +92587,7 @@ index 834b2d7..650d1b9 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2446,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2454,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -92588,7 +92603,7 @@ index 834b2d7..650d1b9 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2457,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2465,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -92619,7 +92634,7 @@ index 834b2d7..650d1b9 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2536,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2544,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -92628,7 +92643,7 @@ index 834b2d7..650d1b9 100644
return 0;
}
-@@ -2544,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2552,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
int ret;
struct mm_struct *mm = current->mm;
@@ -92642,7 +92657,7 @@ index 834b2d7..650d1b9 100644
down_write(&mm->mmap_sem);
ret = do_munmap(mm, start, len);
up_write(&mm->mmap_sem);
-@@ -2557,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2565,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
return vm_munmap(addr, len);
}
@@ -92659,7 +92674,7 @@ index 834b2d7..650d1b9 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2580,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2588,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -92667,7 +92682,7 @@ index 834b2d7..650d1b9 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2587,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2595,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -92699,7 +92714,7 @@ index 834b2d7..650d1b9 100644
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2613,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2621,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -92724,7 +92739,7 @@ index 834b2d7..650d1b9 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2641,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2649,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -92733,7 +92748,7 @@ index 834b2d7..650d1b9 100644
return -ENOMEM;
}
-@@ -2655,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2663,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -92747,7 +92762,7 @@ index 834b2d7..650d1b9 100644
return addr;
}
-@@ -2720,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2728,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
@@ -92755,7 +92770,7 @@ index 834b2d7..650d1b9 100644
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
-@@ -2737,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2745,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
struct vm_area_struct *prev;
struct rb_node **rb_link, *rb_parent;
@@ -92769,7 +92784,7 @@ index 834b2d7..650d1b9 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2760,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2768,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -92791,7 +92806,7 @@ index 834b2d7..650d1b9 100644
return 0;
}
-@@ -2779,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2787,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
@@ -92800,7 +92815,7 @@ index 834b2d7..650d1b9 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2843,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2851,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -92840,7 +92855,7 @@ index 834b2d7..650d1b9 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2854,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2862,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -92848,7 +92863,7 @@ index 834b2d7..650d1b9 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2924,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2932,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -93278,10 +93293,10 @@ index fec093a..8162f74 100644
struct mm_struct *mm;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 6380758..4064aec 100644
+index 2d30e2c..8b3d14c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
-@@ -690,7 +690,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
+@@ -685,7 +685,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
* card's bdi_dirty may rush to many times higher than bdi_setpoint.
* - the bdi dirty thresh drops quickly due to change of JBOD workload
*/
@@ -94368,7 +94383,7 @@ index 4bf8809..98a6914 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 545a170..a086226 100644
+index 89490d9..c7b226a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -207,7 +207,7 @@ struct track {
@@ -94542,7 +94557,7 @@ index 545a170..a086226 100644
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
-@@ -4486,7 +4551,7 @@ SLAB_ATTR_RO(ctor);
+@@ -4492,7 +4557,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
@@ -94551,7 +94566,7 @@ index 545a170..a086226 100644
}
SLAB_ATTR_RO(aliases);
-@@ -4574,6 +4639,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+@@ -4580,6 +4645,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(cache_dma);
#endif
@@ -94566,7 +94581,7 @@ index 545a170..a086226 100644
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
-@@ -4908,6 +4981,9 @@ static struct attribute *slab_attrs[] = {
+@@ -4914,6 +4987,9 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
@@ -94576,7 +94591,7 @@ index 545a170..a086226 100644
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
-@@ -5140,6 +5216,7 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -5146,6 +5222,7 @@ static char *create_unique_id(struct kmem_cache *s)
return name;
}
@@ -94584,7 +94599,7 @@ index 545a170..a086226 100644
static int sysfs_slab_add(struct kmem_cache *s)
{
int err;
-@@ -5163,7 +5240,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
+@@ -5169,7 +5246,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
}
s->kobj.kset = slab_kset;
@@ -94593,7 +94608,7 @@ index 545a170..a086226 100644
if (err) {
kobject_put(&s->kobj);
return err;
-@@ -5197,6 +5274,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+@@ -5203,6 +5280,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
kobject_del(&s->kobj);
kobject_put(&s->kobj);
}
@@ -94601,7 +94616,7 @@ index 545a170..a086226 100644
/*
* Need to buffer aliases during bootup until sysfs becomes
-@@ -5210,6 +5288,7 @@ struct saved_alias {
+@@ -5216,6 +5294,7 @@ struct saved_alias {
static struct saved_alias *alias_list;
@@ -94609,7 +94624,7 @@ index 545a170..a086226 100644
static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
-@@ -5232,6 +5311,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
+@@ -5238,6 +5317,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
alias_list = al;
return 0;
}
@@ -95106,6 +95121,22 @@ index 9321a77..ed2f256 100644
set_fs(oldfs);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 9c5a1aa..3c6c637 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+- pages[index++] = kmap_to_page(data);
++ if (is_vmalloc_addr(data))
++ pages[index++] = vmalloc_to_page(data);
++ else
++ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
+ }
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 876fbe8..8bbea9f 100644
--- a/net/atm/atm_misc.c
@@ -96165,10 +96196,10 @@ index b442e7e..6f5b5a2 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 06e72d3..19dfa7b 100644
+index 0b5149c..24e9976 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -2034,7 +2034,7 @@ EXPORT_SYMBOL(__skb_checksum);
+@@ -2004,7 +2004,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
@@ -96177,7 +96208,7 @@ index 06e72d3..19dfa7b 100644
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
-@@ -3147,13 +3147,15 @@ void __init skb_init(void)
+@@ -3117,13 +3117,15 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -100440,7 +100471,7 @@ index e83c416..1094d88 100644
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
-index 008cdad..6f17474 100644
+index 1b94a9c..496f7f5 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
@@ -100462,7 +100493,7 @@ index 008cdad..6f17474 100644
/* make a copy for the caller */
*handle = ctxh;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index f09b7db..393c39e 100644
+index e726e16..393c39e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task)
@@ -100476,22 +100507,6 @@ index f09b7db..393c39e 100644
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
}
-@@ -1529,9 +1531,13 @@ call_refreshresult(struct rpc_task *task)
- task->tk_action = call_refresh;
- switch (status) {
- case 0:
-- if (rpcauth_uptodatecred(task))
-+ if (rpcauth_uptodatecred(task)) {
- task->tk_action = call_allocate;
-- return;
-+ return;
-+ }
-+ /* Use rate-limiting and a max number of retries if refresh
-+ * had status 0 but failed to update the cred.
-+ */
- case -ETIMEDOUT:
- rpc_delay(task, 3*HZ);
- case -EAGAIN:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff3cc4b..7612a9e 100644
--- a/net/sunrpc/sched.c
@@ -103852,7 +103867,7 @@ index 0000000..4c2c45c
+size_overflow_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..0d1d9d3
+index 0000000..b198b6d
--- /dev/null
+++ b/tools/gcc/Makefile
@@ -0,0 +1,54 @@
@@ -103867,7 +103882,7 @@ index 0000000..0d1d9d3
+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
+else
+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing
+endif
+
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
@@ -105902,10 +105917,10 @@ index 0000000..dd73713
+}
diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
new file mode 100644
-index 0000000..592b923
+index 0000000..515d689
--- /dev/null
+++ b/tools/gcc/latent_entropy_plugin.c
-@@ -0,0 +1,325 @@
+@@ -0,0 +1,337 @@
+/*
+ * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -105934,7 +105949,7 @@ index 0000000..592b923
+static tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
-+ .version = "201401260140",
++ .version = "201402131900",
+ .help = NULL
+};
+
@@ -106131,7 +106146,7 @@ index 0000000..592b923
+ return 0;
+}
+
-+static void start_unit_callback(void *gcc_data, void *user_data)
++static void latent_entropy_start_unit(void *gcc_data, void *user_data)
+{
+ tree latent_entropy_type;
+
@@ -106218,6 +106233,16 @@ index 0000000..592b923
+ latent_entropy_pass_info.reference_pass_name = "optimized";
+ latent_entropy_pass_info.ref_pass_instance_number = 1;
+ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
++ {
++ .base = &latent_entropy_decl,
++ .nelt = 1,
++ .stride = sizeof(latent_entropy_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
@@ -106225,7 +106250,9 @@ index 0000000..592b923
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL);
++ if (!in_lto_p)
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
@@ -106233,10 +106260,10 @@ index 0000000..592b923
+}
diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
new file mode 100644
-index 0000000..fed12bf
+index 0000000..bc490ca
--- /dev/null
+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,902 @@
+@@ -0,0 +1,906 @@
+/*
+ * Copyright 2014 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ * and PaX Team <pageexec@freemail.hu>
@@ -106253,6 +106280,10 @@ index 0000000..fed12bf
+#include "gcc-common.h"
+#include "randomize_layout_seed.h"
+
++#if BUILDING_GCC_MAJOR < 4 || BUILDING_GCC_MINOR < 6 || (BUILDING_GCC_MINOR == 6 && BUILDING_GCC_PATCHLEVEL < 4)
++#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer."
++#endif
++
+#define ORIG_TYPE_NAME(node) \
+ (TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
+
@@ -106669,7 +106700,7 @@ index 0000000..fed12bf
+#endif
+}
+
-+static void finish_decl(void *event_data, void *data)
++static void randomize_layout_finish_decl(void *event_data, void *data)
+{
+ tree decl = (tree)event_data;
+ tree type;
@@ -107133,7 +107164,7 @@ index 0000000..fed12bf
+ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info);
+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
-+ register_callback(plugin_name, PLUGIN_FINISH_DECL, finish_decl, NULL);
++ register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl, NULL);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
@@ -107141,10 +107172,10 @@ index 0000000..fed12bf
+}
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
new file mode 100644
-index 0000000..0c980be
+index 0000000..102f0d6
--- /dev/null
+++ b/tools/gcc/size_overflow_hash.data
-@@ -0,0 +1,5697 @@
+@@ -0,0 +1,5703 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -107190,6 +107221,7 @@ index 0000000..0c980be
+start_isoc_chain_565 start_isoc_chain 2 565 NULL nohasharray
+dev_hard_header_565 dev_hard_header 0 565 &start_isoc_chain_565
+ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL
++osl_pktget_590 osl_pktget 2 590 NULL
+smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
+btrfs_stack_file_extent_offset_607 btrfs_stack_file_extent_offset 0 607 NULL
+ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
@@ -108220,6 +108252,7 @@ index 0000000..0c980be
+lustre_pack_request_v2_12665 lustre_pack_request_v2 0 12665 NULL
+sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
+sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
++ext4_writepage_trans_blocks_12674 ext4_writepage_trans_blocks 0 12674 NULL
+iwl_dbgfs_calib_disabled_write_12707 iwl_dbgfs_calib_disabled_write 3 12707 NULL
+ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
+ivtv_write_12721 ivtv_write 3 12721 NULL
@@ -108489,6 +108522,7 @@ index 0000000..0c980be
+SyS_connect_15674 SyS_connect 3 15674 &dm_read_15674
+tracing_snapshot_write_15719 tracing_snapshot_write 3 15719 NULL
+HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
++bio_map_15794 bio_map 3-0 15794 NULL
+smk_read_direct_15803 smk_read_direct 3 15803 NULL
+nameseq_list_15817 nameseq_list 3-0 15817 NULL nohasharray
+gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
@@ -111756,6 +111790,7 @@ index 0000000..0c980be
+ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
+tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
+num_node_state_52989 num_node_state 0 52989 NULL
++efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
+uasp_alloc_stream_res_53015 uasp_alloc_stream_res 0 53015 NULL
+btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
+tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
@@ -111994,6 +112029,7 @@ index 0000000..0c980be
+dgap_do_config_load_55548 dgap_do_config_load 2 55548 NULL
+hash_ipport6_expire_55549 hash_ipport6_expire 4 55549 NULL
+dm_stats_list_55551 dm_stats_list 4 55551 NULL
++__vdev_disk_physio_55568 __vdev_disk_physio 4 55568 NULL
+add_partition_55588 add_partition 2 55588 NULL
+kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
+SyS_keyctl_55602 SyS_keyctl 4 55602 NULL nohasharray
@@ -112474,6 +112510,7 @@ index 0000000..0c980be
+vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
+afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
+brcmf_sdio_chip_cr4_exitdl_61143 brcmf_sdio_chip_cr4_exitdl 4 61143 NULL
++osl_malloc_61156 osl_malloc 2 61156 NULL
+pair_device_61175 pair_device 4 61175 NULL nohasharray
+event_oom_late_read_61175 event_oom_late_read 3 61175 &pair_device_61175
+dio_bio_add_page_61178 dio_bio_add_page 0 61178 NULL
@@ -112844,10 +112881,10 @@ index 0000000..0c980be
+nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..4a637ab
+index 0000000..4aab36f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,4040 @@
+@@ -0,0 +1,4051 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -112872,7 +112909,7 @@ index 0000000..4a637ab
+int plugin_is_GPL_compatible;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140128",
++ .version = "20140213",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -112940,7 +112977,6 @@ index 0000000..4a637ab
+};
+
+static tree report_size_overflow_decl;
-+static const_tree const_char_ptr_type_node;
+
+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
@@ -116713,8 +116749,9 @@ index 0000000..4a637ab
+}
+
+// Create the noreturn report_size_overflow() function decl.
-+static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
++ tree const_char_ptr_type_node;
+ tree fntype;
+
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
@@ -116842,6 +116879,16 @@ index 0000000..4a637ab
+ struct register_pass_info __unused dump_before_pass_info;
+ struct register_pass_info __unused dump_after_pass_info;
+ struct register_pass_info ipa_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
++ {
++ .base = &report_size_overflow_decl,
++ .nelt = 1,
++ .stride = sizeof(report_size_overflow_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
@@ -116878,7 +116925,8 @@ index 0000000..4a637ab
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
@@ -116890,10 +116938,10 @@ index 0000000..4a637ab
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..a4f816a
+index 0000000..e684c74
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
-@@ -0,0 +1,341 @@
+@@ -0,0 +1,373 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -116921,10 +116969,11 @@ index 0000000..a4f816a
+static int track_frame_size = -1;
+static const char track_function[] = "pax_track_stack";
+static const char check_function[] = "pax_check_alloca";
++static tree track_function_decl, check_function_decl;
+static bool init_locals;
+
+static struct plugin_info stackleak_plugin_info = {
-+ .version = "201401260140",
++ .version = "201402131920",
+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
+// "initialize-locals\t\tforcibly initialize all stack frames\n"
+};
@@ -116932,29 +116981,20 @@ index 0000000..a4f816a
+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
+{
+ gimple check_alloca;
-+ tree fntype, fndecl, alloca_size;
-+
-+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(check_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++ tree alloca_size;
+
+ // insert call to void pax_check_alloca(unsigned long size)
+ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
-+ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++ check_alloca = gimple_build_call(check_function_decl, 1, alloca_size);
+ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
+}
+
+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
+{
+ gimple track_stack;
-+ tree fntype, fndecl;
-+
-+ fntype = build_function_type_list(void_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(track_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
+
+ // insert call to void pax_track_stack(void)
-+ track_stack = gimple_build_call(fndecl, 0);
++ track_stack = gimple_build_call(track_function_decl, 0);
+ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
+}
+
@@ -117081,6 +117121,27 @@ index 0000000..a4f816a
+ return track_frame_size >= 0;
+}
+
++static void stackleak_start_unit(void *gcc_data, void *user_data)
++{
++ tree fntype;
++
++ // void pax_track_stack(void)
++ fntype = build_function_type_list(void_type_node, NULL_TREE);
++ track_function_decl = build_fn_decl(track_function, fntype);
++ DECL_ASSEMBLER_NAME(track_function_decl); // for LTO
++ TREE_PUBLIC(track_function_decl) = 1;
++ DECL_EXTERNAL(track_function_decl) = 1;
++ DECL_ARTIFICIAL(track_function_decl) = 1;
++
++ // void pax_check_alloca(unsigned long)
++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++ check_function_decl = build_fn_decl(check_function, fntype);
++ DECL_ASSEMBLER_NAME(check_function_decl); // for LTO
++ TREE_PUBLIC(check_function_decl) = 1;
++ DECL_EXTERNAL(check_function_decl) = 1;
++ DECL_ARTIFICIAL(check_function_decl) = 1;
++}
++
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data stackleak_tree_instrument_pass_data = {
+#else
@@ -117189,6 +117250,23 @@ index 0000000..a4f816a
+ int i;
+ struct register_pass_info stackleak_tree_instrument_pass_info;
+ struct register_pass_info stackleak_final_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
++ {
++ .base = &track_function_decl,
++ .nelt = 1,
++ .stride = sizeof(track_function_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ {
++ .base = &check_function_decl,
++ .nelt = 1,
++ .stride = sizeof(check_function_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ stackleak_tree_instrument_pass_info.pass = make_stackleak_tree_instrument_pass();
+// stackleak_tree_instrument_pass_info.reference_pass_name = "tree_profile";
@@ -117230,6 +117308,8 @@ index 0000000..a4f816a
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
++ register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_stackleak);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
+
diff --git a/3.13.2/4425_grsec_remove_EI_PAX.patch b/3.13.3/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.13.2/4425_grsec_remove_EI_PAX.patch
+++ b/3.13.3/4425_grsec_remove_EI_PAX.patch
diff --git a/3.13.2/4427_force_XATTR_PAX_tmpfs.patch b/3.13.3/4427_force_XATTR_PAX_tmpfs.patch
index 23e60cd..23e60cd 100644
--- a/3.13.2/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.13.3/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.13.2/4430_grsec-remove-localversion-grsec.patch b/3.13.3/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.13.2/4430_grsec-remove-localversion-grsec.patch
+++ b/3.13.3/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.13.2/4435_grsec-mute-warnings.patch b/3.13.3/4435_grsec-mute-warnings.patch
index cb51a05..cb51a05 100644
--- a/3.13.2/4435_grsec-mute-warnings.patch
+++ b/3.13.3/4435_grsec-mute-warnings.patch
diff --git a/3.13.2/4440_grsec-remove-protected-paths.patch b/3.13.3/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.13.2/4440_grsec-remove-protected-paths.patch
+++ b/3.13.3/4440_grsec-remove-protected-paths.patch
diff --git a/3.13.2/4450_grsec-kconfig-default-gids.patch b/3.13.3/4450_grsec-kconfig-default-gids.patch
index 88f1f9b..88f1f9b 100644
--- a/3.13.2/4450_grsec-kconfig-default-gids.patch
+++ b/3.13.3/4450_grsec-kconfig-default-gids.patch
diff --git a/3.13.2/4465_selinux-avc_audit-log-curr_ip.patch b/3.13.3/4465_selinux-avc_audit-log-curr_ip.patch
index 0648169..0648169 100644
--- a/3.13.2/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.13.3/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.13.2/4470_disable-compat_vdso.patch b/3.13.3/4470_disable-compat_vdso.patch
index a25c029..a25c029 100644
--- a/3.13.2/4470_disable-compat_vdso.patch
+++ b/3.13.3/4470_disable-compat_vdso.patch
diff --git a/3.13.2/4475_emutramp_default_on.patch b/3.13.3/4475_emutramp_default_on.patch
index 30f6978..30f6978 100644
--- a/3.13.2/4475_emutramp_default_on.patch
+++ b/3.13.3/4475_emutramp_default_on.patch
diff --git a/3.2.54/0000_README b/3.2.55/0000_README
index b0f9c87..f81409b 100644
--- a/3.2.54/0000_README
+++ b/3.2.55/0000_README
@@ -134,7 +134,11 @@ Patch: 1053_linux-3.2.54.patch
From: http://www.kernel.org
Desc: Linux 3.2.54
-Patch: 4420_grsecurity-3.0-3.2.54-201402111745.patch
+Patch: 1054_linux-3.2.55.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.55
+
+Patch: 4420_grsecurity-3.0-3.2.55-201402152203.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.54/1021_linux-3.2.22.patch b/3.2.55/1021_linux-3.2.22.patch
index e6ad93a..e6ad93a 100644
--- a/3.2.54/1021_linux-3.2.22.patch
+++ b/3.2.55/1021_linux-3.2.22.patch
diff --git a/3.2.54/1022_linux-3.2.23.patch b/3.2.55/1022_linux-3.2.23.patch
index 3d796d0..3d796d0 100644
--- a/3.2.54/1022_linux-3.2.23.patch
+++ b/3.2.55/1022_linux-3.2.23.patch
diff --git a/3.2.54/1023_linux-3.2.24.patch b/3.2.55/1023_linux-3.2.24.patch
index 4692eb4..4692eb4 100644
--- a/3.2.54/1023_linux-3.2.24.patch
+++ b/3.2.55/1023_linux-3.2.24.patch
diff --git a/3.2.54/1024_linux-3.2.25.patch b/3.2.55/1024_linux-3.2.25.patch
index e95c213..e95c213 100644
--- a/3.2.54/1024_linux-3.2.25.patch
+++ b/3.2.55/1024_linux-3.2.25.patch
diff --git a/3.2.54/1025_linux-3.2.26.patch b/3.2.55/1025_linux-3.2.26.patch
index 44065b9..44065b9 100644
--- a/3.2.54/1025_linux-3.2.26.patch
+++ b/3.2.55/1025_linux-3.2.26.patch
diff --git a/3.2.54/1026_linux-3.2.27.patch b/3.2.55/1026_linux-3.2.27.patch
index 5878eb4..5878eb4 100644
--- a/3.2.54/1026_linux-3.2.27.patch
+++ b/3.2.55/1026_linux-3.2.27.patch
diff --git a/3.2.54/1027_linux-3.2.28.patch b/3.2.55/1027_linux-3.2.28.patch
index 4dbba4b..4dbba4b 100644
--- a/3.2.54/1027_linux-3.2.28.patch
+++ b/3.2.55/1027_linux-3.2.28.patch
diff --git a/3.2.54/1028_linux-3.2.29.patch b/3.2.55/1028_linux-3.2.29.patch
index 3c65179..3c65179 100644
--- a/3.2.54/1028_linux-3.2.29.patch
+++ b/3.2.55/1028_linux-3.2.29.patch
diff --git a/3.2.54/1029_linux-3.2.30.patch b/3.2.55/1029_linux-3.2.30.patch
index 86aea4b..86aea4b 100644
--- a/3.2.54/1029_linux-3.2.30.patch
+++ b/3.2.55/1029_linux-3.2.30.patch
diff --git a/3.2.54/1030_linux-3.2.31.patch b/3.2.55/1030_linux-3.2.31.patch
index c6accf5..c6accf5 100644
--- a/3.2.54/1030_linux-3.2.31.patch
+++ b/3.2.55/1030_linux-3.2.31.patch
diff --git a/3.2.54/1031_linux-3.2.32.patch b/3.2.55/1031_linux-3.2.32.patch
index 247fc0b..247fc0b 100644
--- a/3.2.54/1031_linux-3.2.32.patch
+++ b/3.2.55/1031_linux-3.2.32.patch
diff --git a/3.2.54/1032_linux-3.2.33.patch b/3.2.55/1032_linux-3.2.33.patch
index c32fb75..c32fb75 100644
--- a/3.2.54/1032_linux-3.2.33.patch
+++ b/3.2.55/1032_linux-3.2.33.patch
diff --git a/3.2.54/1033_linux-3.2.34.patch b/3.2.55/1033_linux-3.2.34.patch
index d647b38..d647b38 100644
--- a/3.2.54/1033_linux-3.2.34.patch
+++ b/3.2.55/1033_linux-3.2.34.patch
diff --git a/3.2.54/1034_linux-3.2.35.patch b/3.2.55/1034_linux-3.2.35.patch
index 76a9c19..76a9c19 100644
--- a/3.2.54/1034_linux-3.2.35.patch
+++ b/3.2.55/1034_linux-3.2.35.patch
diff --git a/3.2.54/1035_linux-3.2.36.patch b/3.2.55/1035_linux-3.2.36.patch
index 5d192a3..5d192a3 100644
--- a/3.2.54/1035_linux-3.2.36.patch
+++ b/3.2.55/1035_linux-3.2.36.patch
diff --git a/3.2.54/1036_linux-3.2.37.patch b/3.2.55/1036_linux-3.2.37.patch
index ad13251..ad13251 100644
--- a/3.2.54/1036_linux-3.2.37.patch
+++ b/3.2.55/1036_linux-3.2.37.patch
diff --git a/3.2.54/1037_linux-3.2.38.patch b/3.2.55/1037_linux-3.2.38.patch
index a3c106f..a3c106f 100644
--- a/3.2.54/1037_linux-3.2.38.patch
+++ b/3.2.55/1037_linux-3.2.38.patch
diff --git a/3.2.54/1038_linux-3.2.39.patch b/3.2.55/1038_linux-3.2.39.patch
index 5639e92..5639e92 100644
--- a/3.2.54/1038_linux-3.2.39.patch
+++ b/3.2.55/1038_linux-3.2.39.patch
diff --git a/3.2.54/1039_linux-3.2.40.patch b/3.2.55/1039_linux-3.2.40.patch
index f26b39c..f26b39c 100644
--- a/3.2.54/1039_linux-3.2.40.patch
+++ b/3.2.55/1039_linux-3.2.40.patch
diff --git a/3.2.54/1040_linux-3.2.41.patch b/3.2.55/1040_linux-3.2.41.patch
index 0d27fcb..0d27fcb 100644
--- a/3.2.54/1040_linux-3.2.41.patch
+++ b/3.2.55/1040_linux-3.2.41.patch
diff --git a/3.2.54/1041_linux-3.2.42.patch b/3.2.55/1041_linux-3.2.42.patch
index 77a08ed..77a08ed 100644
--- a/3.2.54/1041_linux-3.2.42.patch
+++ b/3.2.55/1041_linux-3.2.42.patch
diff --git a/3.2.54/1042_linux-3.2.43.patch b/3.2.55/1042_linux-3.2.43.patch
index a3f878b..a3f878b 100644
--- a/3.2.54/1042_linux-3.2.43.patch
+++ b/3.2.55/1042_linux-3.2.43.patch
diff --git a/3.2.54/1043_linux-3.2.44.patch b/3.2.55/1043_linux-3.2.44.patch
index 3d5e6ff..3d5e6ff 100644
--- a/3.2.54/1043_linux-3.2.44.patch
+++ b/3.2.55/1043_linux-3.2.44.patch
diff --git a/3.2.54/1044_linux-3.2.45.patch b/3.2.55/1044_linux-3.2.45.patch
index 44e1767..44e1767 100644
--- a/3.2.54/1044_linux-3.2.45.patch
+++ b/3.2.55/1044_linux-3.2.45.patch
diff --git a/3.2.54/1045_linux-3.2.46.patch b/3.2.55/1045_linux-3.2.46.patch
index bc10efd..bc10efd 100644
--- a/3.2.54/1045_linux-3.2.46.patch
+++ b/3.2.55/1045_linux-3.2.46.patch
diff --git a/3.2.54/1046_linux-3.2.47.patch b/3.2.55/1046_linux-3.2.47.patch
index b74563c..b74563c 100644
--- a/3.2.54/1046_linux-3.2.47.patch
+++ b/3.2.55/1046_linux-3.2.47.patch
diff --git a/3.2.54/1047_linux-3.2.48.patch b/3.2.55/1047_linux-3.2.48.patch
index 6d55b1f..6d55b1f 100644
--- a/3.2.54/1047_linux-3.2.48.patch
+++ b/3.2.55/1047_linux-3.2.48.patch
diff --git a/3.2.54/1048_linux-3.2.49.patch b/3.2.55/1048_linux-3.2.49.patch
index 2dab0cf..2dab0cf 100644
--- a/3.2.54/1048_linux-3.2.49.patch
+++ b/3.2.55/1048_linux-3.2.49.patch
diff --git a/3.2.54/1049_linux-3.2.50.patch b/3.2.55/1049_linux-3.2.50.patch
index 20b3015..20b3015 100644
--- a/3.2.54/1049_linux-3.2.50.patch
+++ b/3.2.55/1049_linux-3.2.50.patch
diff --git a/3.2.54/1050_linux-3.2.51.patch b/3.2.55/1050_linux-3.2.51.patch
index 5d5832b..5d5832b 100644
--- a/3.2.54/1050_linux-3.2.51.patch
+++ b/3.2.55/1050_linux-3.2.51.patch
diff --git a/3.2.54/1051_linux-3.2.52.patch b/3.2.55/1051_linux-3.2.52.patch
index 94b9359..94b9359 100644
--- a/3.2.54/1051_linux-3.2.52.patch
+++ b/3.2.55/1051_linux-3.2.52.patch
diff --git a/3.2.54/1052_linux-3.2.53.patch b/3.2.55/1052_linux-3.2.53.patch
index 986d714..986d714 100644
--- a/3.2.54/1052_linux-3.2.53.patch
+++ b/3.2.55/1052_linux-3.2.53.patch
diff --git a/3.2.54/1053_linux-3.2.54.patch b/3.2.55/1053_linux-3.2.54.patch
index a907496..a907496 100644
--- a/3.2.54/1053_linux-3.2.54.patch
+++ b/3.2.55/1053_linux-3.2.54.patch
diff --git a/3.2.55/1054_linux-3.2.55.patch b/3.2.55/1054_linux-3.2.55.patch
new file mode 100644
index 0000000..6071ff5
--- /dev/null
+++ b/3.2.55/1054_linux-3.2.55.patch
@@ -0,0 +1,2495 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 2ba8272..1b196ea 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1305,6 +1305,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ * dump_id: dump IDENTIFY data.
+
++ * disable: Disable this device.
++
+ If there are multiple matching configurations changing
+ the same attribute, the last one is used.
+
+diff --git a/Makefile b/Makefile
+index 848be26..538463e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 7ac5dfd..d45fd22 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -37,7 +37,13 @@
+
+ #include "signal.h"
+
+-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
++static const char *handler[]= {
++ "prefetch abort",
++ "data abort",
++ "address exception",
++ "interrupt",
++ "undefined instruction",
++};
+
+ void *vectors_page;
+
+diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
+index 121ad1d..547731b 100644
+--- a/arch/arm/mach-footbridge/dc21285-timer.c
++++ b/arch/arm/mach-footbridge/dc21285-timer.c
+@@ -95,17 +95,14 @@ static struct irqaction footbridge_timer_irq = {
+ static void __init footbridge_timer_init(void)
+ {
+ struct clock_event_device *ce = &ckevt_dc21285;
++ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
+
+- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
++ clocksource_register_hz(&cksrc_dc21285, rate);
+
+ setup_irq(ce->irq, &footbridge_timer_irq);
+
+- clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
+- ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
+- ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
+ ce->cpumask = cpumask_of(smp_processor_id());
+-
+- clockevents_register_device(ce);
++ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
+ }
+
+ struct sys_timer footbridge_timer = {
+diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
+index 3d3aeef..abd6bad 100644
+--- a/arch/ia64/kernel/machine_kexec.c
++++ b/arch/ia64/kernel/machine_kexec.c
+@@ -157,7 +157,7 @@ void arch_crash_save_vmcoreinfo(void)
+ #endif
+ #ifdef CONFIG_PGTABLE_3
+ VMCOREINFO_CONFIG(PGTABLE_3);
+-#elif CONFIG_PGTABLE_4
++#elif defined(CONFIG_PGTABLE_4)
+ VMCOREINFO_CONFIG(PGTABLE_4);
+ #endif
+ }
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 8057f4f..da85ad4 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -163,7 +163,7 @@ do_kvm_##n: \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
+-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
++1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
+ blt+ cr1,3f; /* abort if it is */ \
+ li r1,(n); /* will be reloaded later */ \
+ sth r1,PACA_TRAP_SAVE(r13); \
+diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
+index 7b95f29..3baff31 100644
+--- a/arch/sh/lib/Makefile
++++ b/arch/sh/lib/Makefile
+@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
+ checksum.o strlen.o div64.o div64-generic.o
+
+ # Extracted from libgcc
+-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
++obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
+ udiv_qrnnd.o
+
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+index 3b8a2d3..ea34253 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+@@ -9,6 +9,7 @@
+ #include <linux/perf_event.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/syscore_ops.h>
+
+ #include <asm/apic.h>
+
+@@ -209,6 +210,18 @@ out:
+ return ret;
+ }
+
++static void ibs_eilvt_setup(void)
++{
++ /*
++ * Force LVT offset assignment for family 10h: The offsets are
++ * not assigned by the BIOS for this family, so the OS is
++ * responsible for doing it. If the OS assignment fails, fall
++ * back to BIOS settings and try to setup this.
++ */
++ if (boot_cpu_data.x86 == 0x10)
++ force_ibs_eilvt_setup();
++}
++
+ static inline int get_ibs_lvt_offset(void)
+ {
+ u64 val;
+@@ -244,6 +257,36 @@ static void clear_APIC_ibs(void *dummy)
+ setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
+ }
+
++#ifdef CONFIG_PM
++
++static int perf_ibs_suspend(void)
++{
++ clear_APIC_ibs(NULL);
++ return 0;
++}
++
++static void perf_ibs_resume(void)
++{
++ ibs_eilvt_setup();
++ setup_APIC_ibs(NULL);
++}
++
++static struct syscore_ops perf_ibs_syscore_ops = {
++ .resume = perf_ibs_resume,
++ .suspend = perf_ibs_suspend,
++};
++
++static void perf_ibs_pm_init(void)
++{
++ register_syscore_ops(&perf_ibs_syscore_ops);
++}
++
++#else
++
++static inline void perf_ibs_pm_init(void) { }
++
++#endif
++
+ static int __cpuinit
+ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+ {
+@@ -270,18 +313,12 @@ static __init int amd_ibs_init(void)
+ if (!caps)
+ return -ENODEV; /* ibs not supported by the cpu */
+
+- /*
+- * Force LVT offset assignment for family 10h: The offsets are
+- * not assigned by the BIOS for this family, so the OS is
+- * responsible for doing it. If the OS assignment fails, fall
+- * back to BIOS settings and try to setup this.
+- */
+- if (boot_cpu_data.x86 == 0x10)
+- force_ibs_eilvt_setup();
++ ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
+
++ perf_ibs_pm_init();
+ get_online_cpus();
+ ibs_caps = caps;
+ /* make ibs_caps visible to other cpus: */
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index e6fbb94..20061b9 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -582,12 +582,13 @@ void __math_state_restore(struct task_struct *tsk)
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. safe_address is a random variable that should be in L1 */
+- alternative_input(
+- ASM_NOP8 ASM_NOP2,
+- "emms\n\t" /* clear stack tags */
+- "fildl %P[addr]", /* set F?P to defined value */
+- X86_FEATURE_FXSAVE_LEAK,
+- [addr] "m" (safe_address));
++ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
++ asm volatile(
++ "fnclex\n\t"
++ "emms\n\t"
++ "fildl %P[addr]" /* set F?P to defined value */
++ : : [addr] "m" (safe_address));
++ }
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 43e7753..757c716 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1278,14 +1278,12 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ {
+ u32 data;
+- void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+- kunmap_atomic(vapic, KM_USER0);
++ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+
+ apic_set_tpr(vcpu->arch.apic, data & 0xff);
+ }
+@@ -1295,7 +1293,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ u32 data, tpr;
+ int max_irr, max_isr;
+ struct kvm_lapic *apic;
+- void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+@@ -1310,17 +1307,22 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ max_isr = 0;
+ data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+- kunmap_atomic(vapic, KM_USER0);
++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+ }
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ {
+ if (!irqchip_in_kernel(vcpu->kvm))
+- return;
++ return 0;
++
++ if (vapic_addr && kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.apic->vapic_cache,
++ vapic_addr, sizeof(u32)))
++ return -EINVAL;
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
++ return 0;
+ }
+
+ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 138e8cc..62ae376 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -15,7 +15,7 @@ struct kvm_lapic {
+ bool irr_pending;
+ void *regs;
+ gpa_t vapic_addr;
+- struct page *vapic_page;
++ struct gfn_to_hva_cache vapic_cache;
+ };
+ int kvm_create_lapic(struct kvm_vcpu *vcpu);
+ void kvm_free_lapic(struct kvm_vcpu *vcpu);
+@@ -45,7 +45,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
+ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 57867e4..7774cca 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3140,8 +3140,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
+- r = 0;
+- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+@@ -5537,33 +5536,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+ !kvm_event_needs_reinjection(vcpu);
+ }
+
+-static void vapic_enter(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- struct page *page;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+-
+- vcpu->arch.apic->vapic_page = page;
+-}
+-
+-static void vapic_exit(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- int idx;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- idx = srcu_read_lock(&vcpu->kvm->srcu);
+- kvm_release_page_dirty(apic->vapic_page);
+- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+-}
+-
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+ {
+ int max_irr, tpr;
+@@ -5836,7 +5808,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+ }
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+- vapic_enter(vcpu);
+
+ r = 1;
+ while (r > 0) {
+@@ -5893,8 +5864,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+
+- vapic_exit(vcpu);
+-
+ return r;
+ }
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index d29f6d5..f4000ee 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -429,17 +429,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ /* Marvell */
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+- { PCI_DEVICE(0x1b4b, 0x9123),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+- { PCI_DEVICE(0x1b4b, 0x9125),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+- { PCI_DEVICE(0x1b4b, 0x917a),
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
++ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+- { PCI_DEVICE(0x1b4b, 0x9192),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+- { PCI_DEVICE(0x1b4b, 0x91a3),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ .driver_data = board_ahci_yes_fbs },
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a0a3987..72bbb5e 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4097,6 +4097,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
++ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
++
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+@@ -6328,6 +6331,7 @@ static int __init ata_parse_force_one(char **cur,
+ { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
+ { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
+ { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
++ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index da85c0d..a842317 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -254,6 +254,7 @@ config NET_DMA
+ bool "Network: TCP receive copy offload"
+ depends on DMA_ENGINE && NET
+ default (INTEL_IOATDMA || FSL_DMA)
++ depends on BROKEN
+ help
+ This enables the use of DMA engines in the network stack to
+ offload receive copy-to-user operations, freeing CPU cycles.
+diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
+index 5cb1227..3104502 100644
+--- a/drivers/gpio/gpio-msm-v2.c
++++ b/drivers/gpio/gpio-msm-v2.c
+@@ -249,7 +249,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
+- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ __clear_bit(gpio, msm_gpio.enabled_irqs);
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+@@ -261,7 +261,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ __set_bit(gpio, msm_gpio.enabled_irqs);
+- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 97a050f..ddb22e7 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -37,6 +37,7 @@
+ */
+ #define INTEL_GMCH_CTRL 0x52
+ #define INTEL_GMCH_VGA_DISABLE (1 << 1)
++#define SNB_GMCH_CTRL 0x50
+
+ /* PCI config space */
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6d36695..61b708b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9141,14 +9141,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
+ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
++ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
++ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 93bce72..414a681 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -160,6 +160,16 @@ void rs690_mc_init(struct radeon_device *rdev)
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++ /* Some boards seem to be configured for 128MB of sideport memory,
++ * but really only have 64MB. Just skip the sideport and use
++ * UMA memory.
++ */
++ if (rdev->mc.igp_sideport_enabled &&
++ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
++ base += 128 * 1024 * 1024;
++ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
++ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
++ }
+ rs690_pm_info(rdev);
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 221b924..e223175 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -144,9 +144,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ }
+
+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+- bo->vm_node->start - vma->vm_pgoff;
+- page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+- bo->vm_node->start - vma->vm_pgoff;
++ vma->vm_pgoff - bo->vm_node->start;
++ page_last = vma_pages(vma) + vma->vm_pgoff -
++ bo->vm_node->start;
+
+ if (unlikely(page_offset >= bo->num_pages)) {
+ retval = VM_FAULT_SIGBUS;
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 3d630bb..e6ec920 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
++#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 8bba438..6d05e26 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -997,7 +997,7 @@ read_again:
+ /* Could not read all from this device, so we will
+ * need another r10_bio.
+ */
+- sectors_handled = (r10_bio->sectors + max_sectors
++ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+@@ -1005,7 +1005,7 @@ read_again:
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+- spin_unlock(&conf->device_lock);
++ spin_unlock_irq(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __generic_make_request
+ * and subsequent mempool_alloc might block
+@@ -2563,10 +2563,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ if (j == conf->copies) {
+ /* Cannot recover, so abort the recovery or
+ * record a bad block */
+- put_buf(r10_bio);
+- if (rb2)
+- atomic_dec(&rb2->remaining);
+- r10_bio = rb2;
+ if (any_working) {
+ /* problem is that there are bad blocks
+ * on other device(s)
+@@ -2590,6 +2586,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ conf->mirrors[i].recovery_disabled
+ = mddev->recovery_disabled;
+ }
++ put_buf(r10_bio);
++ if (rb2)
++ atomic_dec(&rb2->remaining);
++ r10_bio = rb2;
+ break;
+ }
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 26ef63a..fb67833 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3084,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
++ if (test_bit(R5_WriteError, &dev->flags)) {
+ clear_bit(R5_Insync, &dev->flags);
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+@@ -3092,7 +3092,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ } else
+ clear_bit(R5_WriteError, &dev->flags);
+ }
+- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
++ if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index bbb6692..e367ab1 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -108,6 +108,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
++ u16 split_bd_len = 0;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+@@ -115,11 +116,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+- /* unmap first bd */
+ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+@@ -138,12 +136,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+- /* ...and the TSO split header bd since they have no mapping */
++ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
++ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
++ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
++ /* unmap first bd */
++ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
++ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
++ DMA_TO_DEVICE);
++
+ /* now free frags */
+ while (nbd > 0) {
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 1bc927a..d5793d3 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14537,6 +14537,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
++ tw32(TG3PCI_REG_BASE_ADDR, 0);
++
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index bfeccbf..297f0b6 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -3015,7 +3015,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
+- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
++ dev->features = NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
+ | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
+ | NETIF_F_RXCSUM;
+diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
+index 3a90af6..cc7e7de 100644
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -1995,7 +1995,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
+- /*| NETIF_F_FRAGLIST */
+ ;
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index e26945d..1b7b3be 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1011,7 +1011,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
+ dev_set_drvdata(&op->dev, ndev);
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
++ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &temac_netdev_ops;
+ ndev->ethtool_ops = &temac_ethtool_ops;
+ #if 0
+diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
+index a4a3516..3b3a7e0 100644
+--- a/drivers/net/hamradio/hdlcdrv.c
++++ b/drivers/net/hamradio/hdlcdrv.c
+@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ case HDLCDRVCTL_CALIBRATE:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
++ return -EINVAL;
+ s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+ return 0;
+
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
+index 96a98d2..e4260ab 100644
+--- a/drivers/net/hamradio/yam.c
++++ b/drivers/net/hamradio/yam.c
+@@ -1060,6 +1060,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ break;
+
+ case SIOCYAMGCFG:
++ memset(&yi, 0, sizeof(yi));
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 136ecf3..dc60aec 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -445,7 +445,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->ethtool_ops = &dm9601_ethtool_ops;
+ dev->net->hard_header_len += DM_TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
++
++ /* dm9620/21a require room for 4 byte padding, even in dm9601
++ * mode, so we need +1 to be able to receive full size
++ * ethernet frames.
++ */
++ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = dm9601_mdio_read;
+@@ -531,7 +536,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+ {
+- int len;
++ int len, pad;
+
+ /* format:
+ b1: packet length low
+@@ -539,12 +544,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ b3..n: packet data
+ */
+
+- len = skb->len;
++ len = skb->len + DM_TX_OVERHEAD;
+
+- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
++ /* workaround for dm962x errata with tx fifo getting out of
++ * sync if a USB bulk transfer retry happens right after a
++ * packet with odd / maxpacket length by adding up to 3 bytes
++ * padding.
++ */
++ while ((len & 1) || !(len % dev->maxpacket))
++ len++;
++
++ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
++ pad = len - skb->len;
++
++ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
+ struct sk_buff *skb2;
+
+- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
++ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+@@ -553,10 +569,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+
+ __skb_push(skb, DM_TX_OVERHEAD);
+
+- /* usbnet adds padding if length is a multiple of packet size
+- if so, adjust length value in header */
+- if ((skb->len % dev->maxpacket) == 0)
+- len++;
++ if (pad) {
++ memset(skb->data + skb->len, 0, pad);
++ __skb_put(skb, pad);
++ }
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index b592016..f4caeb3 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S2, isr2);
++ isr &= ~AR_ISR_BCNMISC;
++ }
+ }
+
+- isr = REG_READ(ah, AR_ISR_RAC);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
++ isr = REG_READ(ah, AR_ISR_RAC);
++
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+
+ *masked |= ATH9K_INT_TX;
+
+- s0_s = REG_READ(ah, AR_ISR_S0_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s0_s = REG_READ(ah, AR_ISR_S0_S);
++ s1_s = REG_READ(ah, AR_ISR_S1_S);
++ } else {
++ s0_s = REG_READ(ah, AR_ISR_S0);
++ REG_WRITE(ah, AR_ISR_S0, s0_s);
++ s1_s = REG_READ(ah, AR_ISR_S1);
++ REG_WRITE(ah, AR_ISR_S1, s1_s);
++
++ isr &= ~(AR_ISR_TXOK |
++ AR_ISR_TXDESC |
++ AR_ISR_TXERR |
++ AR_ISR_TXEOL);
++ }
++
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+-
+- s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ *masked |= mask2;
+ }
+
+- if (AR_SREV_9100(ah))
+- return true;
+-
+- if (isr & AR_ISR_GENTMR) {
++ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
+ u32 s5_s;
+
+- s5_s = REG_READ(ah, AR_ISR_S5_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s5_s = REG_READ(ah, AR_ISR_S5_S);
++ } else {
++ s5_s = REG_READ(ah, AR_ISR_S5);
++ }
++
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+ !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ *masked |= ATH9K_INT_TIM_TIMER;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S5, s5_s);
++ isr &= ~AR_ISR_GENTMR;
++ }
+ }
+
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR, isr);
++ REG_READ(ah, AR_ISR);
++ }
++
++ if (AR_SREV_9100(ah))
++ return true;
++
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+index 0b9a0e8..b6cd36c 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+@@ -139,21 +139,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+- for (i = 0; i < ETH_ALEN; i++)
+- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ if (iter_data->hw_macaddr != NULL) {
++ for (i = 0; i < ETH_ALEN; i++)
++ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ } else {
++ iter_data->hw_macaddr = mac;
++ }
+ }
+
+-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
++static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+ {
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+- iter_data.hw_macaddr = common->macaddr;
++ iter_data.hw_macaddr = NULL;
+ memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+ if (vif)
+@@ -164,6 +169,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+ &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
++
++ if (iter_data.hw_macaddr)
++ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
++
+ ath_hw_setbssidmask(common);
+ }
+
+@@ -1100,7 +1109,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ goto out;
+ }
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
+@@ -1163,7 +1172,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+
+ ath9k_htc_set_opmode(priv);
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a59267a..ad33126 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1357,8 +1357,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ iter_data->hw_macaddr = common->macaddr;
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 47ba0f7..38b793b 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -685,6 +685,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ };
+ int index = rtlpci->rx_ring[rx_queue_idx].idx;
+
++ if (rtlpci->driver_is_goingto_unload)
++ return;
+ /*RX NORMAL PKT */
+ while (count--) {
+ /*rx descriptor */
+@@ -1563,6 +1565,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ */
+ set_hal_stop(rtlhal);
+
++ rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+ tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+
+@@ -1580,7 +1583,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+
+- rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 363a5c6..9f1fec1 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1915,10 +1915,6 @@ void pci_enable_ari(struct pci_dev *dev)
+ if (!pci_is_pcie(dev) || dev->devfn)
+ return;
+
+- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+- if (!pos)
+- return;
+-
+ bridge = dev->bus->self;
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+@@ -1937,10 +1933,14 @@ void pci_enable_ari(struct pci_dev *dev)
+ return;
+
+ pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
+- ctrl |= PCI_EXP_DEVCTL2_ARI;
++ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
++ ctrl |= PCI_EXP_DEVCTL2_ARI;
++ bridge->ari_enabled = 1;
++ } else {
++ ctrl &= ~PCI_EXP_DEVCTL2_ARI;
++ bridge->ari_enabled = 0;
++ }
+ pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
+-
+- bridge->ari_enabled = 1;
+ }
+
+ /**
+diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
+index 79477a5..b3c9c8f 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidio.c
++++ b/drivers/staging/comedi/drivers/cb_pcidio.c
+@@ -56,10 +56,6 @@ struct pcidio_board {
+ const char *name; /* name of the board */
+ int dev_id;
+ int n_8255; /* number of 8255 chips on board */
+-
+- /* indices of base address regions */
+- int pcicontroler_badrindex;
+- int dioregs_badrindex;
+ };
+
+ static const struct pcidio_board pcidio_boards[] = {
+@@ -67,22 +63,16 @@ static const struct pcidio_board pcidio_boards[] = {
+ .name = "pci-dio24",
+ .dev_id = 0x0028,
+ .n_8255 = 1,
+- .pcicontroler_badrindex = 1,
+- .dioregs_badrindex = 2,
+ },
+ {
+ .name = "pci-dio24h",
+ .dev_id = 0x0014,
+ .n_8255 = 1,
+- .pcicontroler_badrindex = 1,
+- .dioregs_badrindex = 2,
+ },
+ {
+ .name = "pci-dio48h",
+ .dev_id = 0x000b,
+ .n_8255 = 2,
+- .pcicontroler_badrindex = 0,
+- .dioregs_badrindex = 1,
+ },
+ };
+
+@@ -244,10 +234,15 @@ found:
+ ("cb_pcidio: failed to enable PCI device and request regions\n");
+ return -EIO;
+ }
+- devpriv->dio_reg_base
+- =
++ /*
++ * Use PCI BAR 2 region if non-zero length, else use PCI BAR 1 region.
++ * PCI BAR 1 is only used for older PCI-DIO48H boards. At some point
++ * the PCI-DIO48H was redesigned to use the same PCI interface chip
++ * (and same PCI BAR region) as the other boards.
++ */
++ devpriv->dio_reg_base =
+ pci_resource_start(devpriv->pci_dev,
+- pcidio_boards[index].dioregs_badrindex);
++ (pci_resource_len(pcidev, 2) ? 2 : 1));
+
+ /*
+ * Allocate the subdevice structures. alloc_subdevice() is a
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 3effde2..45c13a6 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -861,24 +861,22 @@ static int iscsit_handle_scsi_cmd(
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+- * that adds support for RESERVE/RELEASE. There is a bug
+- * add with this new functionality that sets R/W bits when
+- * neither CDB carries any READ or WRITE datapayloads.
++ * From RFC-3720 Section 10.3.1:
++ *
++ * "Either or both of R and W MAY be 1 when either the
++ * Expected Data Transfer Length and/or Bidirectional Read
++ * Expected Data Transfer Length are 0"
++ *
++ * For this case, go ahead and clear the unnecssary bits
++ * to avoid any confusion with ->data_direction.
+ */
+- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+- goto done;
+- }
++ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
++ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+
+- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
++ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+- buf, conn);
++ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
+ }
+-done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 827f933..15685c3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -121,7 +121,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * any other sleep) on Haswell machines with LPT and LPT-LP
+ * with the new Intel BIOS
+ */
+- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ /* Limit the quirk to only known vendors, as this triggers
++ * yet another BIOS bug on some other machines
++ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
++ */
++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 6203d80..b24e2d3 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -608,6 +608,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
+ req->r_unsafe_dir = NULL;
+ }
+
++ complete_all(&req->r_safe_completion);
++
+ ceph_mdsc_put_request(req);
+ }
+
+@@ -1815,8 +1817,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
+ int mds = -1;
+ int err = -EAGAIN;
+
+- if (req->r_err || req->r_got_result)
++ if (req->r_err || req->r_got_result) {
++ if (req->r_aborted)
++ __unregister_request(mdsc, req);
+ goto out;
++ }
+
+ if (req->r_timeout &&
+ time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+@@ -2124,7 +2129,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+ if (head->safe) {
+ req->r_got_safe = true;
+ __unregister_request(mdsc, req);
+- complete_all(&req->r_safe_completion);
+
+ if (req->r_got_unsafe) {
+ /*
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index bd8ac16..94b9e32 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -1448,6 +1448,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
+ sb->s_blocksize - offset : towrite;
+
+ tmp_bh.b_state = 0;
++ tmp_bh.b_size = sb->s_blocksize;
+ err = ext2_get_block(inode, blk, &tmp_bh, 1);
+ if (err < 0)
+ goto out;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 22c71b9..68b1602 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -278,6 +278,16 @@ struct ext4_io_submit {
+ /* Translate # of blks to # of clusters */
+ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
+ (sbi)->s_cluster_bits)
++/* Mask out the low bits to get the starting block of the cluster */
++#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
++ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
++ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
++/* Get the cluster offset */
++#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
++ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
++ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+
+ /*
+ * Structure of a blocks group descriptor
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index d0b8f98..9995b99 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -113,6 +113,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
++ ext4_error_inode(inode, where, line,
++ bh->b_blocknr,
++ "journal_dirty_metadata failed: "
++ "errcode %d", err);
+ }
+ } else {
+ if (inode)
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 3e8fc80..bf35fe0 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -318,8 +318,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ {
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
+ int len = ext4_ext_get_actual_len(ext);
++ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
++ ext4_lblk_t last = lblock + len - 1;
+
+- if (len == 0)
++ if (lblock > last)
+ return 0;
+ return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -345,11 +347,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
+ if (depth == 0) {
+ /* leaf entries */
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
++ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
++ ext4_fsblk_t pblock = 0;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++ int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
++
++ /* Check for overlapping extents */
++ lblock = le32_to_cpu(ext->ee_block);
++ len = ext4_ext_get_actual_len(ext);
++ if ((lblock <= prev) && prev) {
++ pblock = ext4_ext_pblock(ext);
++ es->s_last_error_block = cpu_to_le64(pblock);
++ return 0;
++ }
+ ext++;
+ entries--;
++ prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
+@@ -1642,8 +1659,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ depth = ext_depth(inode);
+ if (!path[depth].p_ext)
+ goto out;
+- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
+
+ /*
+ * get the next allocated block if the extent in the path
+@@ -1653,7 +1669,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ b2 = ext4_ext_next_allocated_block(path);
+ if (b2 == EXT_MAX_BLOCKS)
+ goto out;
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, b2);
+ }
+
+ /* check for wrap through zero on extent logical start block*/
+@@ -2288,7 +2304,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
+ * truncate operation has removed all of the blocks in
+ * the cluster.
+ */
+- if (pblk & (sbi->s_cluster_ratio - 1) &&
++ if (EXT4_PBLK_COFF(sbi, pblk) &&
+ (ee_len == num))
+ *partial_cluster = EXT4_B2C(sbi, pblk);
+ else
+@@ -3491,7 +3507,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
++ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
+@@ -3551,9 +3567,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
+ /* Check towards left side */
+- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
+ if (c_offset) {
+- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
++ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
+ lblk_to = lblk_from + c_offset - 1;
+
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+@@ -3561,7 +3577,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ }
+
+ /* Now check towards right. */
+- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
+ if (allocated_clusters && c_offset) {
+ lblk_from = lblk_start + num_blks;
+ lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
+@@ -3754,7 +3770,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ struct ext4_ext_path *path)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ext4_lblk_t ex_cluster_start, ex_cluster_end;
+ ext4_lblk_t rr_cluster_start, rr_cluster_end;
+ ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
+@@ -3773,8 +3789,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ (rr_cluster_start == ex_cluster_start)) {
+ if (rr_cluster_start == ex_cluster_end)
+ ee_start += ee_len - 1;
+- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
+- c_offset;
++ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
+ map->m_len = min(map->m_len,
+ (unsigned) sbi->s_cluster_ratio - c_offset);
+ /*
+@@ -4052,7 +4067,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ */
+ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
+ newex.ee_block = cpu_to_le32(map->m_lblk);
+- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+
+ /*
+ * If we are doing bigalloc, check to see if the extent returned
+@@ -4120,7 +4135,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ * needed so that future calls to get_implied_cluster_alloc()
+ * work correctly.
+ */
+- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
++ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
+ ar.goal -= offset;
+ ar.logical -= offset;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9b8c131..81feb17 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3378,6 +3378,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
+ {
+ struct ext4_prealloc_space *pa;
+ pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
++
++ BUG_ON(atomic_read(&pa->pa_count));
++ BUG_ON(pa->pa_deleted == 0);
+ kmem_cache_free(ext4_pspace_cachep, pa);
+ }
+
+@@ -3391,11 +3394,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
+ ext4_group_t grp;
+ ext4_fsblk_t grp_blk;
+
+- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
+- return;
+-
+ /* in this short window concurrent discard can set pa_deleted */
+ spin_lock(&pa->pa_lock);
++ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
++ spin_unlock(&pa->pa_lock);
++ return;
++ }
++
+ if (pa->pa_deleted == 1) {
+ spin_unlock(&pa->pa_lock);
+ return;
+@@ -4062,7 +4067,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
+
+ /* set up allocation goals */
+ memset(ac, 0, sizeof(struct ext4_allocation_context));
+- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
++ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_sb = sb;
+ ac->ac_inode = ar->inode;
+@@ -4600,7 +4605,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ * blocks at the beginning or the end unless we are explicitly
+ * requested to avoid doing so.
+ */
+- overflow = block & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_PBLK_COFF(sbi, block);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
+ overflow = sbi->s_cluster_ratio - overflow;
+@@ -4614,7 +4619,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ count += overflow;
+ }
+ }
+- overflow = count & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_LBLK_COFF(sbi, count);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
+ if (count > overflow)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 84f84bf..acf2baf 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -819,7 +819,7 @@ static void ext4_put_super(struct super_block *sb)
+ ext4_abort(sb, "Couldn't clean up the journal");
+ }
+
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ ext4_release_system_zone(sb);
+ ext4_mb_release(sb);
+ ext4_ext_release(sb);
+@@ -3961,7 +3961,7 @@ failed_mount_wq:
+ sbi->s_journal = NULL;
+ }
+ failed_mount3:
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ if (sbi->s_flex_groups)
+ ext4_kvfree(sbi->s_flex_groups);
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
+index 89d2a58..5ecfffe 100644
+--- a/fs/hpfs/file.c
++++ b/fs/hpfs/file.c
+@@ -116,9 +116,12 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
+ hpfs_get_block,
+ &hpfs_i(mapping->host)->mmu_private);
+ if (unlikely(ret)) {
+- loff_t isize = mapping->host->i_size;
++ loff_t isize;
++ hpfs_lock(mapping->host->i_sb);
++ isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
++ hpfs_unlock(mapping->host->i_sb);
+ }
+
+ return ret;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 233d3ed..3ceaced 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1437,17 +1437,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+
+ nilfs_clear_logs(&sci->sc_segbufs);
+
+- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+- if (unlikely(err))
+- return err;
+-
+ if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
+ err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
+ sci->sc_freesegs,
+ sci->sc_nfreesegs,
+ NULL);
+ WARN_ON(err); /* do not happen */
++ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
+ }
++
++ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
++ if (unlikely(err))
++ return err;
++
+ nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ sci->sc_stage = prev_stage;
+ }
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 34817ad..10ca5e5 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -653,8 +653,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
+ .lv_iovecp = &reg,
+ };
+
+- /* remove inited flag */
++ /* remove inited flag, and account for space used */
+ tic->t_flags = 0;
++ tic->t_curr_res -= sizeof(magic);
+ error = xlog_write(log, &vec, tic, &lsn,
+ NULL, XLOG_UNMOUNT_TRANS);
+ /*
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index def807c..c37fd89 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -495,7 +495,7 @@
+ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
++ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 32697c1..4bc9445 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -24,6 +24,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+ void hugepage_put_subpool(struct hugepage_subpool *spool);
+
+ int PageHuge(struct page *page);
++int PageHeadHuge(struct page *page_head);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+ int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+@@ -88,6 +89,11 @@ static inline int PageHuge(struct page *page)
+ return 0;
+ }
+
++static inline int PageHeadHuge(struct page *page_head)
++{
++ return 0;
++}
++
+ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+ {
+ }
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8c43fd1..4b04097 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1719,6 +1719,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ return dev->header_ops->parse(skb, haddr);
+ }
+
++static inline int dev_rebuild_header(struct sk_buff *skb)
++{
++ const struct net_device *dev = skb->dev;
++
++ if (!dev->header_ops || !dev->header_ops->rebuild)
++ return 0;
++ return dev->header_ops->rebuild(skb);
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 7cda65b..fe76a74 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -590,6 +590,20 @@ struct pci_driver {
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+ /**
++ * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
++ * @vend: the 16 bit PCI Vendor ID
++ * @dev: the 16 bit PCI Device ID
++ * @subvend: the 16 bit PCI Subvendor ID
++ * @subdev: the 16 bit PCI Subdevice ID
++ *
++ * This macro is used to create a struct pci_device_id that matches a
++ * specific device with subsystem information.
++ */
++#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
++ .vendor = (vend), .device = (dev), \
++ .subvendor = (subvend), .subdevice = (subdev)
++
++/**
+ * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * @dev_class: the class, subclass, prog-if triple for this device
+ * @dev_class_mask: the class mask for this device
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 312d047..c17fdfb 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1200,6 +1200,7 @@ struct sched_entity {
+ struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned long timeout;
++ unsigned long watchdog_stamp;
+ unsigned int time_slice;
+ int nr_cpus_allowed;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d93369a..ea85b0d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2189,6 +2189,10 @@ static int irqtime_account_si_update(void)
+
+ #endif
+
++#ifdef CONFIG_SMP
++static void unthrottle_offline_cfs_rqs(struct rq *rq);
++#endif
++
+ #include "sched_idletask.c"
+ #include "sched_fair.c"
+ #include "sched_rt.c"
+@@ -6566,8 +6570,6 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq)
+ unthrottle_cfs_rq(cfs_rq);
+ }
+ }
+-#else
+-static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+ #endif
+
+ /*
+@@ -6595,9 +6597,6 @@ static void migrate_tasks(unsigned int dead_cpu)
+ */
+ rq->stop = NULL;
+
+- /* Ensure any throttled groups are reachable by pick_next_task */
+- unthrottle_offline_cfs_rqs(rq);
+-
+ for ( ; ; ) {
+ /*
+ * There's this thread running, bail when that's the only
+@@ -6624,6 +6623,10 @@ static void migrate_tasks(unsigned int dead_cpu)
+
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++#if !defined(CONFIG_HOTPLUG_CPU) || !defined(CONFIG_CFS_BANDWIDTH)
++static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
++#endif
++
+ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+ static struct ctl_table sd_ctl_dir[] = {
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 5b9e456..37f3f39 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4848,6 +4848,9 @@ static void rq_online_fair(struct rq *rq)
+ static void rq_offline_fair(struct rq *rq)
+ {
+ update_sysctl();
++
++ /* Ensure any throttled groups are reachable by pick_next_task */
++ unthrottle_offline_cfs_rqs(rq);
+ }
+
+ #else /* CONFIG_SMP */
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 6ad4fb3..f57fda7 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -509,6 +509,7 @@ balanced:
+ * runtime - in which case borrowing doesn't make sense.
+ */
+ rt_rq->rt_runtime = RUNTIME_INF;
++ rt_rq->rt_throttled = 0;
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
+ }
+@@ -587,6 +588,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+ return 1;
+
+ span = sched_rt_period_mask();
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * FIXME: isolated CPUs should really leave the root task group,
++ * whether they are isolcpus or were isolated via cpusets, lest
++ * the timer run on a CPU which does not service all runqueues,
++ * potentially leaving other CPUs indefinitely throttled. If
++ * isolation is really required, the user will turn the throttle
++ * off to kill the perturbations it causes anyway. Meanwhile,
++ * this maintains functionality for boot and/or troubleshooting.
++ */
++ if (rt_b == &root_task_group.rt_bandwidth)
++ span = cpu_online_mask;
++#endif
+ for_each_cpu(i, span) {
+ int enqueue = 0;
+ struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+@@ -719,6 +733,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && prio < prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
+ }
+@@ -728,6 +749,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
+ }
+@@ -1749,7 +1777,11 @@ static void watchdog(struct rq *rq, struct task_struct *p)
+ if (soft != RLIM_INFINITY) {
+ unsigned long next;
+
+- p->rt.timeout++;
++ if (p->rt.watchdog_stamp != jiffies) {
++ p->rt.timeout++;
++ p->rt.watchdog_stamp = jiffies;
++ }
++
+ next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
+ if (p->rt.timeout > next)
+ p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
+@@ -1758,6 +1790,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
+
+ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+ {
++ struct sched_rt_entity *rt_se = &p->rt;
++
+ update_curr_rt(rq);
+
+ watchdog(rq, p);
+@@ -1775,12 +1809,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+ p->rt.time_slice = DEF_TIMESLICE;
+
+ /*
+- * Requeue to the end of queue if we are not the only element
+- * on the queue:
++ * Requeue to the end of queue if we (and all of our ancestors) are the
++ * only element on the queue
+ */
+- if (p->rt.run_list.prev != p->rt.run_list.next) {
+- requeue_task_rt(rq, p, 0);
+- set_tsk_need_resched(p);
++ for_each_sched_rt_entity(rt_se) {
++ if (rt_se->run_list.prev != rt_se->run_list.next) {
++ requeue_task_rt(rq, p, 0);
++ set_tsk_need_resched(p);
++ return;
++ }
+ }
+ }
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index d40d7f6..cf8b439 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -618,7 +618,7 @@ static int ftrace_profile_init(void)
+ int cpu;
+ int ret = 0;
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ ret = ftrace_profile_init_cpu(cpu);
+ if (ret)
+ break;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ddf2128..3a5aae2 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -679,6 +679,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++/*
++ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
++ * normal or transparent huge pages.
++ */
++int PageHeadHuge(struct page *page_head)
++{
++ compound_page_dtor *dtor;
++
++ if (!PageHead(page_head))
++ return 0;
++
++ dtor = get_compound_page_dtor(page_head);
++
++ return dtor == free_huge_page;
++}
++EXPORT_SYMBOL_GPL(PageHeadHuge);
++
+ pgoff_t __basepage_index(struct page *page)
+ {
+ struct page *page_head = compound_head(page);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 1b03878..96c4bcf 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1441,10 +1441,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ return ret;
+ }
+ done:
+- if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
+- set_page_hwpoison_huge_page(hpage);
+- dequeue_hwpoisoned_huge_page(hpage);
++ /* overcommit hugetlb page will be freed to buddy */
++ if (PageHuge(hpage)) {
++ if (!PageHWPoison(hpage))
++ atomic_long_add(1 << compound_trans_order(hpage),
++ &mce_bad_pages);
++ set_page_hwpoison_huge_page(hpage);
++ dequeue_hwpoisoned_huge_page(hpage);
++ } else {
++ SetPageHWPoison(page);
++ atomic_long_inc(&mce_bad_pages);
++ }
++
+ /* keep elevated page count for bad page */
+ return ret;
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index dff37a6..6182c8a 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1368,7 +1368,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1377,7 +1377,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1442,9 +1442,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
++ unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr);
+
+ /* requested length too big for entire address space */
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1454,7 +1455,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1469,14 +1470,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = mm->free_area_cache;
+
+ /* make sure it can fit in the remaining address space */
+- if (addr > len) {
++ if (addr >= low_limit + len) {
+ vma = find_vma(mm, addr-len);
+ if (!vma || addr <= vma->vm_start)
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+
+- if (mm->mmap_base < len)
++ if (mm->mmap_base < low_limit + len)
+ goto bottomup;
+
+ addr = mm->mmap_base-len;
+@@ -1498,7 +1499,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ } while (vma->vm_start >= low_limit + len);
+
+ bottomup:
+ /*
+diff --git a/mm/swap.c b/mm/swap.c
+index 55b266d..a4b9016 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -31,6 +31,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+
+ #include "internal.h"
+
+@@ -69,7 +70,8 @@ static void __put_compound_page(struct page *page)
+ {
+ compound_page_dtor *dtor;
+
+- __page_cache_release(page);
++ if (!PageHuge(page))
++ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
+ (*dtor)(page);
+ }
+@@ -83,6 +85,35 @@ static void put_compound_page(struct page *page)
+ if (likely(page != page_head &&
+ get_page_unless_zero(page_head))) {
+ unsigned long flags;
++
++ if (PageHeadHuge(page_head)) {
++ if (likely(PageTail(page))) {
++ /*
++ * __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
++ atomic_dec(&page->_mapcount);
++ if (put_page_testzero(page_head))
++ VM_BUG_ON(1);
++ if (put_page_testzero(page_head))
++ __put_compound_page(page_head);
++ return;
++ } else {
++ /*
++ * __split_huge_page_refcount
++ * run before us, "page" was a
++ * THP tail. The split
++ * page_head has been freed
++ * and reallocated as slab or
++ * hugetlbfs page of smaller
++ * order (only possible if
++ * reallocated as slab on
++ * x86).
++ */
++ goto skip_lock;
++ }
++ }
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+@@ -94,9 +125,29 @@ static void put_compound_page(struct page *page)
+ /* __split_huge_page_refcount run before us */
+ compound_unlock_irqrestore(page_head, flags);
+ VM_BUG_ON(PageHead(page_head));
+- if (put_page_testzero(page_head))
+- __put_single_page(page_head);
+- out_put_single:
++skip_lock:
++ if (put_page_testzero(page_head)) {
++ /*
++ * The head page may have been
++ * freed and reallocated as a
++ * compound page of smaller
++ * order and then freed again.
++ * All we know is that it
++ * cannot have become: a THP
++ * page, a compound page of
++ * higher order, a tail page.
++ * That is because we still
++ * hold the refcount of the
++ * split THP tail and
++ * page_head was the THP head
++ * before the split.
++ */
++ if (PageHead(page_head))
++ __put_compound_page(page_head);
++ else
++ __put_single_page(page_head);
++ }
++out_put_single:
+ if (put_page_testzero(page))
+ __put_single_page(page);
+ return;
+@@ -163,6 +214,31 @@ bool __get_page_tail(struct page *page)
+ struct page *page_head = compound_trans_head(page);
+
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
++ /* Ref to put_compound_page() comment. */
++ if (PageHeadHuge(page_head)) {
++ if (likely(PageTail(page))) {
++ /*
++ * This is a hugetlbfs
++ * page. __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
++ __get_page_tail_foll(page, false);
++ return true;
++ } else {
++ /*
++ * __split_huge_page_refcount run
++ * before us, "page" was a THP
++ * tail. The split page_head has been
++ * freed and reallocated as slab or
++ * hugetlbfs page of smaller order
++ * (only possible if reallocated as
++ * slab on x86).
++ */
++ put_page(page_head);
++ return false;
++ }
++ }
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index b40d3da..48a62d8 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -522,6 +522,22 @@ static const struct header_ops vlan_header_ops = {
+ .parse = eth_header_parse,
+ };
+
++static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
++ unsigned short type,
++ const void *daddr, const void *saddr,
++ unsigned int len)
++{
++ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
++
++ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
++}
++
++static const struct header_ops vlan_passthru_header_ops = {
++ .create = vlan_passthru_hard_header,
++ .rebuild = dev_rebuild_header,
++ .parse = eth_header_parse,
++};
++
+ static const struct net_device_ops vlan_netdev_ops;
+
+ static int vlan_dev_init(struct net_device *dev)
+@@ -561,7 +577,7 @@ static int vlan_dev_init(struct net_device *dev)
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (real_dev->features & NETIF_F_HW_VLAN_TX) {
+- dev->header_ops = real_dev->header_ops;
++ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index a06deca..2157984 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1743,7 +1743,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
+ u32 old;
+ struct net_bridge_mdb_htable *mdb;
+
+- spin_lock(&br->multicast_lock);
++ spin_lock_bh(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+@@ -1775,7 +1775,7 @@ rollback:
+ }
+
+ unlock:
+- spin_unlock(&br->multicast_lock);
++ spin_unlock_bh(&br->multicast_lock);
+
+ return err;
+ }
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index b856f87..1d9a529 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -61,7 +61,6 @@ static struct genl_family net_drop_monitor_family = {
+ .hdrsize = 0,
+ .name = "NET_DM",
+ .version = 2,
+- .maxattr = NET_DM_CMD_MAX,
+ };
+
+ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index ccee270..6be5e8e 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -119,6 +119,10 @@ static int inet_csk_diag_fill(struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = inet->inet_rcv_saddr;
+ r->id.idiag_dst[0] = inet->inet_daddr;
+
+@@ -209,13 +213,20 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
+
+ r->idiag_family = tw->tw_family;
+ r->idiag_retrans = 0;
++
+ r->id.idiag_if = tw->tw_bound_dev_if;
+ r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
++
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+ r->id.idiag_dst[0] = tw->tw_daddr;
++
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -598,8 +609,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = ireq->rmt_port;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = ireq->loc_addr;
+ r->id.idiag_dst[0] = ireq->rmt_addr;
++
+ r->idiag_expires = jiffies_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+@@ -824,7 +840,7 @@ next_normal:
+ ++num;
+ }
+
+- if (r->idiag_states & TCPF_TIME_WAIT) {
++ if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) {
+ struct inet_timewait_sock *tw;
+
+ inet_twsk_for_each(tw, node,
+@@ -832,6 +848,8 @@ next_normal:
+
+ if (num < s_num)
+ goto next_dying;
++ if (!(r->idiag_states & (1 << tw->tw_substate)))
++ goto next_dying;
+ if (r->id.idiag_sport != tw->tw_sport &&
+ r->id.idiag_sport)
+ goto next_dying;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index b5e64e4..140d377 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -155,9 +155,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
+ struct mr_table **mrt)
+ {
+- struct ipmr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ipmr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv4.mr_rules_ops,
+ flowi4_to_flowi(flp4), 0, &arg);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index f5af259..f96c96f 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -139,9 +139,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
+ struct mr6_table **mrt)
+ {
+- struct ip6mr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ip6mr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
+ flowi6_to_flowi(flp6), 0, &arg);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1768238..9a4f437 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2058,15 +2058,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ {
+ struct net *net = dev_net(idev->dev);
+ struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
+- net->loopback_dev, 0);
++ net->loopback_dev, DST_NOCOUNT);
+ struct neighbour *neigh;
+
+- if (rt == NULL) {
+- if (net_ratelimit())
+- pr_warning("IPv6: Maximum number of routes reached,"
+- " consider increasing route/max_size.\n");
++ if (rt == NULL)
+ return ERR_PTR(-ENOMEM);
+- }
+
+ in6_dev_hold(idev);
+
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 99a60d5..f432d7b 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct llc_sock *llc = llc_sk(sk);
+ size_t copied = 0;
+ u32 peek_seq = 0;
+- u32 *seq;
++ u32 *seq, skb_len;
+ unsigned long used;
+ int target; /* Read at least this many bytes */
+ long timeo;
+@@ -813,6 +813,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+ continue;
+ found_ok_skb:
++ skb_len = skb->len;
+ /* Ok so how much can we use? */
+ used = skb->len - offset;
+ if (len < used)
+@@ -843,7 +844,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ /* Partial read */
+- if (used + offset < skb->len)
++ if (used + offset < skb_len)
+ continue;
+ } while (len > 0);
+
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index b4c8b00..ba2dffe 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support iWARP devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_IB_CA)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
+index e590949..37be6e2 100644
+--- a/net/rds/ib_send.c
++++ b/net/rds/ib_send.c
+@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
+ && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ scat = &rm->data.op_sg[sg];
+- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+- return ret;
++ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
++ return sizeof(struct rds_header) + ret;
+ }
+
+ /* FIXME we may overallocate here */
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index bf76dec7..686fb1a 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (msg->msg_name) {
+ struct sockaddr_rose *srose;
++ struct full_sockaddr_rose *full_srose = msg->msg_name;
+
+ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
+ srose = msg->msg_name;
+@@ -1265,18 +1266,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+ srose->srose_ndigis = rose->dest_ndigis;
+- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
+- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
+- for (n = 0 ; n < rose->dest_ndigis ; n++)
+- full_srose->srose_digis[n] = rose->dest_digis[n];
+- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+- } else {
+- if (rose->dest_ndigis >= 1) {
+- srose->srose_ndigis = 1;
+- srose->srose_digi = rose->dest_digis[0];
+- }
+- msg->msg_namelen = sizeof(struct sockaddr_rose);
+- }
++ for (n = 0 ; n < rose->dest_ndigis ; n++)
++ full_srose->srose_digis[n] = rose->dest_digis[n];
++ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+ }
+
+ skb_free_datagram(sk, skb);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9338ccc..eddfdec 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -696,7 +696,9 @@ static int unix_autobind(struct socket *sock)
+ int err;
+ unsigned int retries = 0;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ return err;
+
+ err = 0;
+ if (u->addr)
+@@ -829,7 +831,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ goto out;
+
+ err = -EINVAL;
+ if (u->addr)
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index 617a310..60549a4 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -122,6 +122,10 @@ int ieee80211_radiotap_iterator_init(
+ /* find payload start allowing for extended bitmap(s) */
+
+ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
++ if ((unsigned long)iterator->_arg -
++ (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
++ (unsigned long)iterator->_max_length)
++ return -EINVAL;
+ while (get_unaligned_le32(iterator->_arg) &
+ (1 << IEEE80211_RADIOTAP_EXT)) {
+ iterator->_arg += sizeof(uint32_t);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 5898f34..bcf1d73 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -217,6 +217,14 @@ static int inode_alloc_security(struct inode *inode)
+ return 0;
+ }
+
++static void inode_free_rcu(struct rcu_head *head)
++{
++ struct inode_security_struct *isec;
++
++ isec = container_of(head, struct inode_security_struct, rcu);
++ kmem_cache_free(sel_inode_cache, isec);
++}
++
+ static void inode_free_security(struct inode *inode)
+ {
+ struct inode_security_struct *isec = inode->i_security;
+@@ -227,8 +235,16 @@ static void inode_free_security(struct inode *inode)
+ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+
+- inode->i_security = NULL;
+- kmem_cache_free(sel_inode_cache, isec);
++ /*
++ * The inode may still be referenced in a path walk and
++ * a call to selinux_inode_permission() can be made
++ * after inode_free_security() is called. Ideally, the VFS
++ * wouldn't do this, but fixing that is a much harder
++ * job. For now, simply free the i_security via RCU, and
++ * leave the current inode->i_security pointer intact.
++ * The inode will be freed after the RCU grace period too.
++ */
++ call_rcu(&isec->rcu, inode_free_rcu);
+ }
+
+ static int file_alloc_security(struct file *file)
+@@ -4181,8 +4197,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+ PEER__RECV, &ad);
+- if (err)
++ if (err) {
+ selinux_netlbl_err(skb, err, 0);
++ return err;
++ }
+ }
+
+ if (secmark_active) {
+@@ -5372,11 +5390,11 @@ static int selinux_setprocattr(struct task_struct *p,
+ /* Check for ptracing, and update the task SID if ok.
+ Otherwise, leave SID unchanged and fail. */
+ ptsid = 0;
+- task_lock(p);
++ rcu_read_lock();
+ tracer = ptrace_parent(p);
+ if (tracer)
+ ptsid = task_sid(tracer);
+- task_unlock(p);
++ rcu_read_unlock();
+
+ if (tracer) {
+ error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
+index 26c7eee..7b1830b 100644
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -38,7 +38,10 @@ struct task_security_struct {
+
+ struct inode_security_struct {
+ struct inode *inode; /* back pointer to inode object */
+- struct list_head list; /* list of inode_security_struct */
++ union {
++ struct list_head list; /* list of inode_security_struct */
++ struct rcu_head rcu; /* for freeing the inode_security_struct */
++ };
+ u32 task_sid; /* SID of creating task */
+ u32 sid; /* SID of this object */
+ u16 sclass; /* security class of this object */
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 3420bd3..cf0d46e 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1846,6 +1846,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_STATE_DISCONNECTED:
+ err = -EBADFD;
+ goto _endloop;
++ case SNDRV_PCM_STATE_PAUSED:
++ continue;
+ }
+ if (!tout) {
+ snd_printd("%s write error (DMA or IRQ trouble?)\n",
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7ebe4b7..fea6895 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2618,6 +2618,10 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
+ * white/black-list for enable_msi
+ */
+ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
++ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 285ef87..fafb76f 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -1714,7 +1714,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+- aif1 |= WM8904_AIF_LRCLK_INV;
++ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif1 |= 0x3;
+ break;
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index dc4de37..bcf1d2f 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -18,9 +18,9 @@
+ #include "helpers/bitmask.h"
+
+ static struct option set_opts[] = {
+- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
+- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
+- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
++ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
++ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
++ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
+ { },
+ };
+
diff --git a/3.2.54/4420_grsecurity-3.0-3.2.54-201402111745.patch b/3.2.55/4420_grsecurity-3.0-3.2.55-201402152203.patch
index 21543e0..c279324 100644
--- a/3.2.54/4420_grsecurity-3.0-3.2.54-201402111745.patch
+++ b/3.2.55/4420_grsecurity-3.0-3.2.55-201402152203.patch
@@ -203,7 +203,7 @@ index dfa6fc6..ccbfbf3 100644
+zconf.lex.c
zoffset.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 2ba8272..e2a9806 100644
+index 1b196ea..ea70ff0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -859,6 +859,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
@@ -216,7 +216,7 @@ index 2ba8272..e2a9806 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -1960,6 +1963,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1962,6 +1965,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -273,7 +273,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index 848be26..296b92f 100644
+index 538463e..d1011ba 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2054,10 +2054,10 @@ index 90fa8b3..a3a2212 100644
return scno;
if (!(current->ptrace & PT_PTRACED))
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 7ac5dfd..0ce09c2 100644
+index d45fd22..1f2471d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
-@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+@@ -63,7 +63,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
@@ -2066,7 +2066,7 @@ index 7ac5dfd..0ce09c2 100644
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
-@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
+@@ -265,6 +265,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
static DEFINE_RAW_SPINLOCK(die_lock);
@@ -2075,7 +2075,7 @@ index 7ac5dfd..0ce09c2 100644
/*
* This function is protected against re-entrancy.
*/
-@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
+@@ -294,6 +296,9 @@ void die(const char *str, struct pt_regs *regs, int err)
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
@@ -9518,7 +9518,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index fb2e69d..200616a 100644
+index fb2e69d..205753c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,6 +75,7 @@ config X86
@@ -9538,14 +9538,14 @@ index fb2e69d..200616a 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -999,6 +1000,7 @@ config MICROCODE_OLD_INTERFACE
+@@ -525,6 +526,7 @@ config SCHED_OMIT_FRAME_POINTER
- config X86_MSR
- tristate "/dev/cpu/*/msr - Model-specific register support"
-+ depends on !GRKERNSEC_KMEM
+ menuconfig PARAVIRT_GUEST
+ bool "Paravirtualized guest support"
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST
---help---
- This device gives privileged processes access to the x86
- Model-Specific Registers (MSRs). It is a character device with
+ Say Y here to get to see options related to running Linux under
+ various hypervisors. This option alone does not add any kernel code.
@@ -1022,7 +1024,7 @@ choice
config NOHIGHMEM
@@ -21438,10 +21438,41 @@ index 925179f..b151b74 100644
if ((s64)val != *(s32 *)loc)
goto overflow;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
-index f7d1a64..399615a 100644
+index f7d1a64..28afc4a 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
-@@ -235,7 +235,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
+@@ -37,6 +37,7 @@
+ #include <linux/notifier.h>
+ #include <linux/uaccess.h>
+ #include <linux/gfp.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/processor.h>
+ #include <asm/msr.h>
+@@ -104,6 +105,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
+ int err = 0;
+ ssize_t bytes = 0;
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_msr_write();
++ return -EPERM;
++#endif
++
+ if (count % 8)
+ return -EINVAL; /* Invalid chunk size */
+
+@@ -151,6 +157,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+ err = -EBADF;
+ break;
+ }
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_msr_write();
++ return -EPERM;
++#endif
+ if (copy_from_user(&regs, uregs, sizeof regs)) {
+ err = -EFAULT;
+ break;
+@@ -235,7 +245,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
@@ -23231,7 +23262,7 @@ index 09ff517..df19fbff 100644
.short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index e6fbb94..b372995 100644
+index 20061b9..56d1e1d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
@@ -23376,7 +23407,7 @@ index e6fbb94..b372995 100644
{
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
-@@ -576,18 +605,19 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+@@ -576,8 +605,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
void __math_state_restore(struct task_struct *tsk)
{
/* We need a safe address that is cheap to find and that is already
@@ -23387,23 +23418,6 @@ index e6fbb94..b372995 100644
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
- values. safe_address is a random variable that should be in L1 */
-- alternative_input(
-- ASM_NOP8 ASM_NOP2,
-- "emms\n\t" /* clear stack tags */
-- "fildl %P[addr]", /* set F?P to defined value */
-- X86_FEATURE_FXSAVE_LEAK,
-- [addr] "m" (safe_address));
-+ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
-+ asm volatile(
-+ "fnclex\n\t"
-+ "emms\n\t"
-+ "fildl %P[addr]" /* set F?P to defined value */
-+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
-+ }
-
- /*
- * Paranoid restore. send a SIGSEGV if we fail to restore the state.
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index b9242ba..50c5edd 100644
--- a/arch/x86/kernel/verify_cpu.S
@@ -23916,7 +23930,7 @@ index f5302da..6ee193e 100644
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 43e7753..873f4440 100644
+index 757c716..11c70cf 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -53,7 +53,7 @@
@@ -24119,7 +24133,7 @@ index aac5ea7..266eda9 100644
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 57867e4..1d5ff81 100644
+index 7774cca..97dedc9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1341,8 +1341,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -24198,7 +24212,7 @@ index 57867e4..1d5ff81 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
-@@ -5182,7 +5193,7 @@ static void kvm_set_mmio_spte_mask(void)
+@@ -5181,7 +5192,7 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask);
}
@@ -31034,6 +31048,18 @@ index 153407c..611cba9 100644
- return 0;
-}
-__setup("vdso=", vdso_setup);
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index 26c731a..fb510c7 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -8,6 +8,7 @@ config XEN
+ select PARAVIRT_CLOCK
+ depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
+ depends on X86_CMPXCHG && X86_TSC
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
+ help
+ This is the Linux Xen port. Enabling this will allow the
+ kernel to boot in a paravirtualized environment under the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 5189fe8..d937469 100644
--- a/arch/x86/xen/enlighten.c
@@ -31901,10 +31927,10 @@ index de2802c..2260da9 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index a0a3987..d029614 100644
+index 72bbb5e..19f16d3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
-@@ -4746,7 +4746,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -4749,7 +4749,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -31913,7 +31939,7 @@ index a0a3987..d029614 100644
ap = qc->ap;
qc->flags = 0;
-@@ -4762,7 +4762,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -4765,7 +4765,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
@@ -31922,7 +31948,7 @@ index a0a3987..d029614 100644
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
-@@ -5767,6 +5767,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5770,6 +5770,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
return;
spin_lock(&lock);
@@ -31930,7 +31956,7 @@ index a0a3987..d029614 100644
for (cur = ops->inherits; cur; cur = cur->inherits) {
void **inherit = (void **)cur;
-@@ -5780,8 +5781,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5783,8 +5784,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
if (IS_ERR(*pp))
*pp = NULL;
@@ -37221,7 +37247,7 @@ index 93e74fb..4a1182d 100644
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 6d36695..4a3e870 100644
+index 61b708b..c8ca0e9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2215,7 +2215,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
@@ -38097,10 +38123,10 @@ index 0b5468b..74cfb87 100644
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
-index 93bce72..00332c1 100644
+index 414a681..2b17775 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
-@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+@@ -314,9 +314,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
@@ -38608,7 +38634,7 @@ index 83d2fbd6..93017f7 100644
{
sysfs_attr_init(&attr->attr);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
-index 3d630bb..77756d7 100644
+index e6ec920..7664a6b 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -787,7 +787,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
@@ -41458,27 +41484,9 @@ index c706a7b..2cc7511 100644
"md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index 8bba438..a579e8c 100644
+index 6d05e26..a579e8c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
-@@ -997,7 +997,7 @@ read_again:
- /* Could not read all from this device, so we will
- * need another r10_bio.
- */
-- sectors_handled = (r10_bio->sectors + max_sectors
-+ sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_sector);
- r10_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
-@@ -1005,7 +1005,7 @@ read_again:
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
-- spin_unlock(&conf->device_lock);
-+ spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __generic_make_request
- * and subsequent mempool_alloc might block
@@ -1465,7 +1465,7 @@ static void end_sync_read(struct bio *bio, int error)
/* The write handler will notice the lack of
* R10BIO_Uptodate and record any errors etc
@@ -41538,30 +41546,8 @@ index 8bba438..a579e8c 100644
}
rdev_dec_pending(rdev, mddev);
-@@ -2563,10 +2563,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- if (j == conf->copies) {
- /* Cannot recover, so abort the recovery or
- * record a bad block */
-- put_buf(r10_bio);
-- if (rb2)
-- atomic_dec(&rb2->remaining);
-- r10_bio = rb2;
- if (any_working) {
- /* problem is that there are bad blocks
- * on other device(s)
-@@ -2590,6 +2586,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- conf->mirrors[i].recovery_disabled
- = mddev->recovery_disabled;
- }
-+ put_buf(r10_bio);
-+ if (rb2)
-+ atomic_dec(&rb2->remaining);
-+ r10_bio = rb2;
- break;
- }
- }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 26ef63a..bd587cd 100644
+index fb67833..09c4732 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1618,19 +1618,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
@@ -43601,31 +43587,6 @@ index d4d2bc1..14b8672 100644
};
static int stmmac_init_fs(struct net_device *dev)
-diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
-index a4a3516..3b3a7e0 100644
---- a/drivers/net/hamradio/hdlcdrv.c
-+++ b/drivers/net/hamradio/hdlcdrv.c
-@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- case HDLCDRVCTL_CALIBRATE:
- if(!capable(CAP_SYS_RAWIO))
- return -EPERM;
-+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
-+ return -EINVAL;
- s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
- return 0;
-
-diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
-index 96a98d2..e4260ab 100644
---- a/drivers/net/hamradio/yam.c
-+++ b/drivers/net/hamradio/yam.c
-@@ -1060,6 +1060,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
- break;
-
- case SIOCYAMGCFG:
-+ memset(&yi, 0, sizeof(yi));
- yi.cfg.mask = 0xffffffff;
- yi.cfg.iobase = yp->iobase;
- yi.cfg.irq = yp->irq;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index d0893e4..14b0d44 100644
--- a/drivers/net/loopback.c
@@ -44302,10 +44263,10 @@ index b346d04..04436fa 100644
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(ah, avf->bbuf);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
-index b592016..fe47870 100644
+index f4caeb3..8da6f5d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
-@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+@@ -217,8 +217,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
@@ -44316,7 +44277,7 @@ index b592016..fe47870 100644
ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
ctl6 = SM(i->keytype, AR_EncrType);
-@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+@@ -232,26 +232,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
if ((i->is_first || i->is_last) &&
i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
@@ -44350,7 +44311,7 @@ index b592016..fe47870 100644
return;
}
-@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+@@ -276,7 +276,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
break;
}
@@ -44359,7 +44320,7 @@ index b592016..fe47870 100644
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
| SM(i->txpower, AR_XmitPower)
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
-@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
+@@ -286,19 +286,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
(i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
@@ -47391,10 +47352,10 @@ index ed147c4..94fc3c6 100644
/* core tmem accessor functions */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
-index 3effde2..dda7d46 100644
+index 45c13a6..f8c847c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
-@@ -1351,7 +1351,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+@@ -1349,7 +1349,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
* outstanding_r2ts reaches zero, go ahead and send the delayed
* TASK_ABORTED status.
*/
@@ -56068,10 +56029,10 @@ index a8cbe1b..fed04cb 100644
(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
return 0;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
-index bd8ac16..43811b9 100644
+index 94b9e32..4b85c15 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
-@@ -1494,6 +1494,7 @@ static struct file_system_type ext2_fs_type = {
+@@ -1495,6 +1495,7 @@ static struct file_system_type ext2_fs_type = {
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -56174,10 +56135,10 @@ index 2845a1f..f29de63 100644
if (free_clusters >= (nclusters + dirty_clusters))
return 1;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 22c71b9..ba28a7d 100644
+index 68b1602..830e05c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
-@@ -1206,19 +1206,19 @@ struct ext4_sb_info {
+@@ -1216,19 +1216,19 @@ struct ext4_sb_info {
unsigned long s_mb_last_start;
/* stats for buddy allocator */
@@ -56208,7 +56169,7 @@ index 22c71b9..ba28a7d 100644
/* locality groups */
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 9b8c131..d469b31 100644
+index 81feb17..d1ac883 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
@@ -56288,7 +56249,7 @@ index 9b8c131..d469b31 100644
}
if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
-@@ -3514,7 +3514,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+@@ -3519,7 +3519,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
trace_ext4_mb_new_inode_pa(ac, pa);
ext4_mb_use_inode_pa(ac, pa);
@@ -56297,7 +56258,7 @@ index 9b8c131..d469b31 100644
ei = EXT4_I(ac->ac_inode);
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
-@@ -3574,7 +3574,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+@@ -3579,7 +3579,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
trace_ext4_mb_new_group_pa(ac, pa);
ext4_mb_use_group_pa(ac, pa);
@@ -56306,7 +56267,7 @@ index 9b8c131..d469b31 100644
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
lg = ac->ac_lg;
-@@ -3663,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
+@@ -3668,7 +3668,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
* from the bitmap and continue.
*/
}
@@ -56315,7 +56276,7 @@ index 9b8c131..d469b31 100644
return err;
}
-@@ -3681,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+@@ -3686,7 +3686,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
@@ -56338,7 +56299,7 @@ index f3358ab..fbb1d90 100644
"MMP failure info: last update time: %llu, last update "
"node: %s, last update device: %s\n",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 84f84bf..a8770cd 100644
+index acf2baf..31c5131 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -92,6 +92,8 @@ static struct file_system_type ext2_fs_type = {
@@ -59491,34 +59452,6 @@ index 6a66fc0..cfdadae 100644
set_fs(oldfs);
if (host_err < 0)
-diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
-index 233d3ed..3ceaced 100644
---- a/fs/nilfs2/segment.c
-+++ b/fs/nilfs2/segment.c
-@@ -1437,17 +1437,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
-
- nilfs_clear_logs(&sci->sc_segbufs);
-
-- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
-- if (unlikely(err))
-- return err;
--
- if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
- err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
- sci->sc_freesegs,
- sci->sc_nfreesegs,
- NULL);
- WARN_ON(err); /* do not happen */
-+ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
- }
-+
-+ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
-+ if (unlikely(err))
-+ return err;
-+
- nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
- sci->sc_stage = prev_stage;
- }
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 97bfbdd..e7f644a 100644
--- a/fs/nilfs2/super.c
@@ -62801,10 +62734,10 @@ index 8a89949..6776861 100644
xfs_init_zones(void)
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..058b9e2
+index 0000000..849ba36
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1133 @@
+@@ -0,0 +1,1139 @@
+#
+# grecurity configuration
+#
@@ -63927,6 +63860,9 @@ index 0000000..058b9e2
+ to allow informative logs to be produced, but large enough to
+ prevent flooding.
+
++ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
++ any rate limiting on grsecurity log messages.
++
+config GRKERNSEC_FLOODBURST
+ int "Number of messages in a burst (maximum)"
+ default 6
@@ -63937,6 +63873,9 @@ index 0000000..058b9e2
+ many of your logs are being interpreted as flooding, you may want to
+ raise this value.
+
++ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
++ any rate limiting on grsecurity log messages.
++
+endmenu
diff --git a/grsecurity/Makefile b/grsecurity/Makefile
new file mode 100644
@@ -64000,7 +63939,7 @@ index 0000000..5307c8a
+endif
diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
new file mode 100644
-index 0000000..bd57acb
+index 0000000..8532ed2
--- /dev/null
+++ b/grsecurity/gracl.c
@@ -0,0 +1,2826 @@
@@ -66822,12 +66761,12 @@ index 0000000..bd57acb
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
-+EXPORT_SYMBOL(gr_acl_is_enabled);
++EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
+#endif
-+EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL_GPL(gr_learn_resource);
+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
+
diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
@@ -70621,7 +70560,7 @@ index 0000000..bc0be01
+}
diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
new file mode 100644
-index 0000000..6b654b0
+index 0000000..12eb2bd
--- /dev/null
+++ b/grsecurity/grsec_chroot.c
@@ -0,0 +1,353 @@
@@ -70762,7 +70701,7 @@ index 0000000..6b654b0
+ return 0;
+}
+
-+EXPORT_SYMBOL(gr_pid_is_chrooted);
++EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
+
+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
@@ -70980,7 +70919,7 @@ index 0000000..6b654b0
+}
diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
new file mode 100644
-index 0000000..91cef85
+index 0000000..dcc6b9f
--- /dev/null
+++ b/grsecurity/grsec_disabled.c
@@ -0,0 +1,441 @@
@@ -71420,14 +71359,14 @@ index 0000000..91cef85
+ return;
+}
+
-+EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL_GPL(gr_learn_resource);
+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
++EXPORT_SYMBOL_GPL(gr_check_user_change);
++EXPORT_SYMBOL_GPL(gr_check_group_change);
+#endif
diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
new file mode 100644
-index 0000000..ee1f60f
+index 0000000..c6db3ee
--- /dev/null
+++ b/grsecurity/grsec_exec.c
@@ -0,0 +1,159 @@
@@ -71588,8 +71527,8 @@ index 0000000..ee1f60f
+#endif
+}
+
-+EXPORT_SYMBOL(gr_is_capable);
-+EXPORT_SYMBOL(gr_is_capable_nolog);
++EXPORT_SYMBOL_GPL(gr_is_capable);
++EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
new file mode 100644
index 0000000..d3ee748
@@ -71651,7 +71590,7 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..454a98e
+index 0000000..7bcfc7a
--- /dev/null
+++ b/grsecurity/grsec_init.c
@@ -0,0 +1,272 @@
@@ -71703,7 +71642,7 @@ index 0000000..454a98e
+int grsec_tpe_gid;
+int grsec_enable_blackhole;
+#ifdef CONFIG_IPV6_MODULE
-+EXPORT_SYMBOL(grsec_enable_blackhole);
++EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
@@ -72390,16 +72329,24 @@ index 0000000..56b5e9d
+}
diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
new file mode 100644
-index 0000000..f536303
+index 0000000..0e39d8c
--- /dev/null
+++ b/grsecurity/grsec_mem.c
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
++#include <linux/module.h>
+#include <linux/grinternal.h>
+
++void gr_handle_msr_write(void)
++{
++ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
++ return;
++}
++EXPORT_SYMBOL_GPL(gr_handle_msr_write);
++
+void
+gr_handle_ioperm(void)
+{
@@ -72845,7 +72792,7 @@ index 0000000..c6a07aa
+}
diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
new file mode 100644
-index 0000000..4030d57
+index 0000000..c0aef3a
--- /dev/null
+++ b/grsecurity/grsec_sock.c
@@ -0,0 +1,244 @@
@@ -72865,14 +72812,14 @@ index 0000000..4030d57
+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
+
-+EXPORT_SYMBOL(gr_search_udp_recvmsg);
-+EXPORT_SYMBOL(gr_search_udp_sendmsg);
++EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
+
+#ifdef CONFIG_UNIX_MODULE
-+EXPORT_SYMBOL(gr_acl_handle_unix);
-+EXPORT_SYMBOL(gr_acl_handle_mknod);
-+EXPORT_SYMBOL(gr_handle_chroot_unix);
-+EXPORT_SYMBOL(gr_handle_create);
++EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
++EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
++EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
++EXPORT_SYMBOL_GPL(gr_handle_create);
+#endif
+
+#ifdef CONFIG_GRKERNSEC
@@ -73578,7 +73525,7 @@ index 0000000..0d4723d
+#endif
diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
new file mode 100644
-index 0000000..0dc13c3
+index 0000000..61b514e
--- /dev/null
+++ b/grsecurity/grsec_time.c
@@ -0,0 +1,16 @@
@@ -73597,7 +73544,7 @@ index 0000000..0dc13c3
+ return;
+}
+
-+EXPORT_SYMBOL(gr_log_timechange);
++EXPORT_SYMBOL_GPL(gr_log_timechange);
diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
new file mode 100644
index 0000000..07e0dc0
@@ -76854,10 +76801,10 @@ index 0000000..7dc4203
+#endif
diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
new file mode 100644
-index 0000000..195cbe4
+index 0000000..ba93581
--- /dev/null
+++ b/include/linux/grmsg.h
-@@ -0,0 +1,115 @@
+@@ -0,0 +1,116 @@
+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
@@ -76973,12 +76920,13 @@ index 0000000..195cbe4
+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
++#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
new file mode 100644
-index 0000000..8996115
+index 0000000..f253c0e
--- /dev/null
+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,224 @@
+@@ -0,0 +1,225 @@
+#ifndef GR_SECURITY_H
+#define GR_SECURITY_H
+#include <linux/fs.h>
@@ -77039,6 +76987,7 @@ index 0000000..8996115
+
+void gr_handle_ioperm(void);
+void gr_handle_iopl(void);
++void gr_handle_msr_write(void);
+
+umode_t gr_acl_umask(void);
+
@@ -78482,7 +78431,7 @@ index bd4f6c7..e9b8bb8 100644
struct iovec;
struct kvec;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 8c43fd1..782342e 100644
+index 4b04097..43bda9d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -949,6 +949,7 @@ struct net_device_ops {
@@ -78502,7 +78451,7 @@ index 8c43fd1..782342e 100644
* Do not use this in drivers.
*/
-@@ -2585,7 +2586,7 @@ static inline int netif_is_bond_slave(struct net_device *dev)
+@@ -2594,7 +2595,7 @@ static inline int netif_is_bond_slave(struct net_device *dev)
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
@@ -79219,7 +79168,7 @@ index 2148b12..519b820 100644
static inline void anon_vma_merge(struct vm_area_struct *vma,
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 312d047..a357e91 100644
+index c17fdfb..90df630 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -101,6 +101,7 @@ struct bio_list;
@@ -79335,7 +79284,7 @@ index 312d047..a357e91 100644
struct load_weight {
unsigned long weight, inv_weight;
-@@ -1305,6 +1343,8 @@ struct task_struct {
+@@ -1306,6 +1344,8 @@ struct task_struct {
* execve */
unsigned in_iowait:1;
@@ -79344,7 +79293,7 @@ index 312d047..a357e91 100644
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
-@@ -1345,8 +1385,8 @@ struct task_struct {
+@@ -1346,8 +1386,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -79355,7 +79304,7 @@ index 312d047..a357e91 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1362,13 +1402,6 @@ struct task_struct {
+@@ -1363,13 +1403,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -79369,7 +79318,7 @@ index 312d047..a357e91 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1385,8 +1418,16 @@ struct task_struct {
+@@ -1386,8 +1419,16 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -79386,7 +79335,7 @@ index 312d047..a357e91 100644
/* open file information */
struct files_struct *files;
/* namespaces */
-@@ -1409,7 +1450,7 @@ struct task_struct {
+@@ -1410,7 +1451,7 @@ struct task_struct {
uid_t loginuid;
unsigned int sessionid;
#endif
@@ -79395,7 +79344,7 @@ index 312d047..a357e91 100644
/* Thread group tracking */
u32 parent_exec_id;
-@@ -1433,6 +1474,11 @@ struct task_struct {
+@@ -1434,6 +1475,11 @@ struct task_struct {
struct rt_mutex_waiter *pi_blocked_on;
#endif
@@ -79407,7 +79356,7 @@ index 312d047..a357e91 100644
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
-@@ -1548,6 +1594,30 @@ struct task_struct {
+@@ -1549,6 +1595,30 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -79438,7 +79387,7 @@ index 312d047..a357e91 100644
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
-@@ -1580,7 +1650,54 @@ struct task_struct {
+@@ -1581,7 +1651,54 @@ struct task_struct {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
@@ -79494,7 +79443,7 @@ index 312d047..a357e91 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -2097,7 +2214,9 @@ void yield(void);
+@@ -2098,7 +2215,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -79504,7 +79453,7 @@ index 312d047..a357e91 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2130,6 +2249,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2131,6 +2250,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -79512,7 +79461,7 @@ index 312d047..a357e91 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2251,6 +2371,12 @@ static inline void mmdrop(struct mm_struct * mm)
+@@ -2252,6 +2372,12 @@ static inline void mmdrop(struct mm_struct * mm)
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -79525,7 +79474,7 @@ index 312d047..a357e91 100644
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2267,9 +2393,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2268,9 +2394,8 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -79536,7 +79485,7 @@ index 312d047..a357e91 100644
extern int allow_signal(int);
extern int disallow_signal(int);
-@@ -2432,9 +2557,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2433,9 +2558,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#endif
@@ -87037,10 +86986,10 @@ index 3d9f31c..7fefc9e 100644
default:
diff --git a/kernel/sched.c b/kernel/sched.c
-index d93369a..700af59 100644
+index ea85b0d..e0b6326 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
-@@ -5046,7 +5046,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
+@@ -5050,7 +5050,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
@@ -87049,7 +86998,7 @@ index d93369a..700af59 100644
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
-@@ -5063,7 +5063,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+@@ -5067,7 +5067,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
*
* The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
@@ -87058,7 +87007,7 @@ index d93369a..700af59 100644
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
-@@ -5084,7 +5084,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
+@@ -5088,7 +5088,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* The return value is -ERESTARTSYS if interrupted, 0 if timed out,
* positive (at least 1, or number of jiffies left till timeout) if completed.
*/
@@ -87067,7 +87016,7 @@ index d93369a..700af59 100644
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
-@@ -5293,6 +5293,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -5297,6 +5297,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -87076,7 +87025,7 @@ index d93369a..700af59 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -5326,7 +5328,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -5330,7 +5332,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -87086,7 +87035,7 @@ index d93369a..700af59 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -5483,6 +5486,7 @@ recheck:
+@@ -5487,6 +5490,7 @@ recheck:
unsigned long rlim_rtprio =
task_rlimit(p, RLIMIT_RTPRIO);
@@ -87094,7 +87043,7 @@ index d93369a..700af59 100644
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
-@@ -6626,7 +6630,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -6629,7 +6633,7 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -87103,7 +87052,7 @@ index d93369a..700af59 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -6643,17 +6647,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -6646,17 +6650,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -87125,7 +87074,7 @@ index d93369a..700af59 100644
/*
* In the intermediate directories, both the child directory and
-@@ -6661,22 +6665,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -6664,22 +6668,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -87157,7 +87106,7 @@ index d93369a..700af59 100644
const char *procname, void *data, int maxlen,
mode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -6696,7 +6703,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -6699,7 +6706,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -87166,7 +87115,7 @@ index d93369a..700af59 100644
if (table == NULL)
return NULL;
-@@ -6731,9 +6738,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -6734,9 +6741,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -87178,7 +87127,7 @@ index d93369a..700af59 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -6760,11 +6767,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -6763,11 +6770,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -87193,7 +87142,7 @@ index d93369a..700af59 100644
if (entry == NULL)
return;
-@@ -6787,8 +6796,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -6790,8 +6799,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -87208,7 +87157,7 @@ index d93369a..700af59 100644
}
#else
static void register_sched_domain_sysctl(void)
-@@ -6886,7 +6899,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+@@ -6889,7 +6902,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
@@ -87240,7 +87189,7 @@ index f280df1..da1281d 100644
#ifdef CONFIG_RT_GROUP_SCHED
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
-index 5b9e456..03c74cd 100644
+index 37f3f39..6d3b6e1 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -4803,7 +4803,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
@@ -88946,7 +88895,7 @@ index 16fc34a..efd8bb8 100644
ret = -EIO;
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index d40d7f6..b4e9662 100644
+index cf8b439..fe97a05 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1598,12 +1598,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
@@ -90949,10 +90898,10 @@ index ed0ed8a..cc835b9 100644
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index ddf2128..af57f40 100644
+index 3a5aae2..ef6f89a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
-@@ -1990,15 +1990,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2007,15 +2007,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
@@ -90973,7 +90922,7 @@ index ddf2128..af57f40 100644
if (ret)
goto out;
-@@ -2055,15 +2057,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2072,15 +2074,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
@@ -90994,7 +90943,7 @@ index ddf2128..af57f40 100644
if (ret)
goto out;
-@@ -2482,6 +2486,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2499,6 +2503,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
return 1;
}
@@ -91022,7 +90971,7 @@ index ddf2128..af57f40 100644
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
*/
-@@ -2584,6 +2609,11 @@ retry_avoidcopy:
+@@ -2601,6 +2626,11 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -91034,7 +90983,7 @@ index ddf2128..af57f40 100644
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
-@@ -2735,6 +2765,10 @@ retry:
+@@ -2752,6 +2782,10 @@ retry:
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
@@ -91045,7 +90994,7 @@ index ddf2128..af57f40 100644
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
-@@ -2764,6 +2798,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2781,6 +2815,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
@@ -91056,7 +91005,7 @@ index ddf2128..af57f40 100644
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
-@@ -2775,6 +2813,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2792,6 +2830,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
VM_FAULT_SET_HINDEX(h - hstates);
}
@@ -91220,7 +91169,7 @@ index 23d3a6b..e10d35a 100644
if (end == start)
goto out;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index 1b03878..d62c02b 100644
+index 96c4bcf..436254e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -91295,16 +91244,23 @@ index 1b03878..d62c02b 100644
freeit = 1;
if (PageHuge(page))
clear_page_hwpoison_huge_page(page);
-@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
+@@ -1444,13 +1444,13 @@ done:
+ /* overcommit hugetlb page will be freed to buddy */
+ if (PageHuge(hpage)) {
+ if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage),
++ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
+ &mce_bad_pages);
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ } else {
+ SetPageHWPoison(page);
+- atomic_long_inc(&mce_bad_pages);
++ atomic_long_inc_unchecked(&mce_bad_pages);
}
- done:
- if (!PageHWPoison(hpage))
-- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
-+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
+
/* keep elevated page count for bad page */
-@@ -1581,7 +1581,7 @@ int soft_offline_page(struct page *page, int flags)
+@@ -1589,7 +1589,7 @@ int soft_offline_page(struct page *page, int flags)
return ret;
done:
@@ -92240,7 +92196,7 @@ index 1ffd97a..240aa20 100644
int mminit_loglevel;
diff --git a/mm/mmap.c b/mm/mmap.c
-index dff37a6..49e182f 100644
+index 6182c8a..7d532cf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
@@ -92761,7 +92717,7 @@ index dff37a6..49e182f 100644
unsigned long start_addr;
+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
- if (len > TASK_SIZE)
+ if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -1374,18 +1623,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED)
@@ -92774,12 +92730,12 @@ index dff37a6..49e182f 100644
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
+- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
+ if (TASK_SIZE - len >= addr) {
+ vma = find_vma(mm, addr);
-+ if (check_heap_stack_gap(vma, &addr, len, offset))
++ if (addr >= mmap_min_addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
}
@@ -92853,10 +92809,10 @@ index dff37a6..49e182f 100644
- unsigned long addr = addr0;
+ unsigned long base = mm->mmap_base, addr = addr0;
+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr);
/* requested length too big for entire address space */
- if (len > TASK_SIZE)
-@@ -1450,13 +1711,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1451,13 +1712,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED)
return addr;
@@ -92868,10 +92824,10 @@ index dff37a6..49e182f 100644
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-- if (TASK_SIZE - len >= addr &&
+- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
-+ if (TASK_SIZE - len >= addr) {
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr) {
+ vma = find_vma(mm, addr);
+ if (check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
@@ -92879,10 +92835,10 @@ index dff37a6..49e182f 100644
}
/* check if free_area_cache is useful for us */
-@@ -1470,10 +1736,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1471,10 +1737,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* make sure it can fit in the remaining address space */
- if (addr > len) {
+ if (addr >= low_limit + len) {
- vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start)
+ addr -= len;
@@ -92893,8 +92849,8 @@ index dff37a6..49e182f 100644
+ return (mm->free_area_cache = addr);
}
- if (mm->mmap_base < len)
-@@ -1488,7 +1755,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (mm->mmap_base < low_limit + len)
+@@ -1489,7 +1756,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* return with success:
*/
vma = find_vma(mm, addr);
@@ -92903,18 +92859,18 @@ index dff37a6..49e182f 100644
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
-@@ -1497,8 +1764,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1498,8 +1765,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
-- } while (len < vma->vm_start);
+- } while (vma->vm_start >= low_limit + len);
+ addr = skip_heap_stack_gap(vma, len, offset);
-+ } while (!IS_ERR_VALUE(addr));
++ } while (!IS_ERR_VALUE(addr) && addr >= low_limit);
bottomup:
/*
-@@ -1507,13 +1774,21 @@ bottomup:
+@@ -1508,13 +1775,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -92938,7 +92894,7 @@ index dff37a6..49e182f 100644
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1522,6 +1797,12 @@ bottomup:
+@@ -1523,6 +1798,12 @@ bottomup:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -92951,7 +92907,7 @@ index dff37a6..49e182f 100644
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1529,8 +1810,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1530,8 +1811,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -92963,7 +92919,7 @@ index dff37a6..49e182f 100644
}
unsigned long
-@@ -1603,40 +1886,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1604,40 +1887,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL(find_vma);
@@ -93039,7 +92995,7 @@ index dff37a6..49e182f 100644
/*
* Verify that the stack growth is acceptable and
-@@ -1654,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1655,6 +1948,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Stack limit test */
@@ -93047,7 +93003,7 @@ index dff37a6..49e182f 100644
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -1664,6 +1958,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1665,6 +1959,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -93055,7 +93011,7 @@ index dff37a6..49e182f 100644
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1682,7 +1977,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1683,7 +1978,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Ok, everything looks good - let it rip */
@@ -93063,7 +93019,7 @@ index dff37a6..49e182f 100644
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
-@@ -1694,37 +1988,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1695,37 +1989,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
* vma is the last one with address > vma->vm_end. Have to extend vma.
*/
@@ -93121,7 +93077,7 @@ index dff37a6..49e182f 100644
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1739,6 +2044,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1740,6 +2045,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
}
@@ -93130,7 +93086,7 @@ index dff37a6..49e182f 100644
vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma);
return error;
-@@ -1752,6 +2059,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1753,6 +2060,8 @@ int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
int error;
@@ -93139,7 +93095,7 @@ index dff37a6..49e182f 100644
/*
* We must make sure the anon_vma is allocated
-@@ -1765,6 +2074,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1766,6 +2075,15 @@ int expand_downwards(struct vm_area_struct *vma,
if (error)
return error;
@@ -93155,7 +93111,7 @@ index dff37a6..49e182f 100644
vma_lock_anon_vma(vma);
/*
-@@ -1774,9 +2092,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1775,9 +2093,17 @@ int expand_downwards(struct vm_area_struct *vma,
*/
/* Somebody else might have raced and expanded it already */
@@ -93174,7 +93130,7 @@ index dff37a6..49e182f 100644
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1786,18 +2112,48 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1787,18 +2113,48 @@ int expand_downwards(struct vm_area_struct *vma,
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -93223,7 +93179,7 @@ index dff37a6..49e182f 100644
return expand_upwards(vma, address);
}
-@@ -1820,6 +2176,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1821,6 +2177,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
#else
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
@@ -93238,7 +93194,7 @@ index dff37a6..49e182f 100644
return expand_downwards(vma, address);
}
-@@ -1860,7 +2224,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1861,7 +2225,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do {
long nrpages = vma_pages(vma);
@@ -93253,7 +93209,7 @@ index dff37a6..49e182f 100644
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
} while (vma);
-@@ -1905,6 +2275,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1906,6 +2276,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -93270,7 +93226,7 @@ index dff37a6..49e182f 100644
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1933,14 +2313,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1934,14 +2314,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -93304,7 +93260,7 @@ index dff37a6..49e182f 100644
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1953,6 +2352,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1954,6 +2353,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -93327,7 +93283,7 @@ index dff37a6..49e182f 100644
pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
err = PTR_ERR(pol);
-@@ -1978,6 +2393,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1979,6 +2394,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -93370,7 +93326,7 @@ index dff37a6..49e182f 100644
/* Success. */
if (!err)
return 0;
-@@ -1990,10 +2441,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1991,10 +2442,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
removed_exe_file_vma(mm);
fput(new->vm_file);
}
@@ -93390,7 +93346,7 @@ index dff37a6..49e182f 100644
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2006,6 +2465,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2007,6 +2466,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -93406,7 +93362,7 @@ index dff37a6..49e182f 100644
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2017,11 +2485,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2018,11 +2486,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -93437,7 +93393,7 @@ index dff37a6..49e182f 100644
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2096,6 +2583,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2097,6 +2584,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -93446,7 +93402,7 @@ index dff37a6..49e182f 100644
return 0;
}
-@@ -2108,22 +2597,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2109,22 +2598,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
profile_munmap(addr);
@@ -93475,7 +93431,7 @@ index dff37a6..49e182f 100644
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2137,6 +2622,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2138,6 +2623,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -93483,7 +93439,7 @@ index dff37a6..49e182f 100644
len = PAGE_ALIGN(len);
if (!len)
-@@ -2148,16 +2634,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2149,16 +2635,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -93515,7 +93471,7 @@ index dff37a6..49e182f 100644
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2174,22 +2674,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2175,22 +2675,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
/*
* Clear old maps. this also does some error checking for us
*/
@@ -93542,7 +93498,7 @@ index dff37a6..49e182f 100644
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2203,7 +2703,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2204,7 +2704,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -93551,7 +93507,7 @@ index dff37a6..49e182f 100644
return -ENOMEM;
}
-@@ -2217,11 +2717,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2218,11 +2718,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
perf_event_mmap(vma);
@@ -93566,7 +93522,7 @@ index dff37a6..49e182f 100644
return addr;
}
-@@ -2268,8 +2769,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2269,8 +2770,10 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
@@ -93578,7 +93534,7 @@ index dff37a6..49e182f 100644
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
-@@ -2283,6 +2786,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2284,6 +2787,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -93592,7 +93548,7 @@ index dff37a6..49e182f 100644
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2305,7 +2815,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2306,7 +2816,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -93615,7 +93571,7 @@ index dff37a6..49e182f 100644
return 0;
}
-@@ -2323,6 +2848,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2324,6 +2849,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
@@ -93624,7 +93580,7 @@ index dff37a6..49e182f 100644
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2373,6 +2900,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2374,6 +2901,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL;
}
@@ -93664,7 +93620,7 @@ index dff37a6..49e182f 100644
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2384,6 +2944,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2385,6 +2945,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -93672,7 +93628,7 @@ index dff37a6..49e182f 100644
if (cur + npages > lim)
return 0;
return 1;
-@@ -2454,6 +3015,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2455,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -95570,23 +95526,20 @@ index 1b7e22a..3fcd4f3 100644
return pgd;
}
diff --git a/mm/swap.c b/mm/swap.c
-index 55b266d..a532537 100644
+index a4b9016..d1a1b68 100644
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -31,6 +31,7 @@
- #include <linux/backing-dev.h>
- #include <linux/memcontrol.h>
- #include <linux/gfp.h>
-+#include <linux/hugetlb.h>
-
- #include "internal.h"
-
-@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
+@@ -70,9 +70,11 @@ static void __put_compound_page(struct page *page)
+ {
+ compound_page_dtor *dtor;
- __page_cache_release(page);
+- if (!PageHuge(page))
+- __page_cache_release(page);
dtor = get_compound_page_dtor(page);
-+ if (!PageHuge(page))
++ if (!PageHuge(page)) {
+ BUG_ON(dtor != free_compound_page);
++ __page_cache_release(page);
++ }
(*dtor)(page);
}
@@ -96008,6 +95961,22 @@ index fdfdb57..38d368c 100644
set_fs(oldfs);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 55f0c09..d5bf348 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -324,7 +324,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+- pages[index++] = kmap_to_page(data);
++ if (is_vmalloc_addr(data))
++ pages[index++] = vmalloc_to_page(data);
++ else
++ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
+ }
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index f41f026..fe76ea8 100644
--- a/net/atm/atm_misc.c
@@ -96447,7 +96416,7 @@ index 14c4864..77ff888 100644
err = -EFAULT;
break;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index a06deca..2269299 100644
+index 2157984..430c7e1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1410,7 +1410,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
@@ -97953,10 +97922,10 @@ index 907ef2c..eba7111 100644
void inet_get_local_port_range(int *low, int *high)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
-index ccee270..2b3d4de 100644
+index 6be5e8e..22df23e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
-@@ -114,11 +114,21 @@ static int inet_csk_diag_fill(struct sock *sk,
+@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
r->idiag_retrans = 0;
r->id.idiag_if = sk->sk_bound_dev_if;
@@ -97971,18 +97940,9 @@ index ccee270..2b3d4de 100644
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
-+
-+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-+
- r->id.idiag_src[0] = inet->inet_rcv_saddr;
- r->id.idiag_dst[0] = inet->inet_daddr;
-
-@@ -209,13 +219,26 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-
- r->idiag_family = tw->tw_family;
+@@ -215,8 +221,14 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_retrans = 0;
-+
+
r->id.idiag_if = tw->tw_bound_dev_if;
+
+#ifdef CONFIG_GRKERNSEC_HIDESYM
@@ -97992,20 +97952,10 @@ index ccee270..2b3d4de 100644
r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+#endif
-+
+
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
-+
-+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-+
- r->id.idiag_src[0] = tw->tw_rcv_saddr;
- r->id.idiag_dst[0] = tw->tw_daddr;
-+
- r->idiag_state = tw->tw_substate;
- r->idiag_timer = 3;
- r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
-@@ -294,12 +317,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
+@@ -305,12 +317,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
if (sk == NULL)
goto unlock;
@@ -98020,7 +97970,7 @@ index ccee270..2b3d4de 100644
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
-@@ -589,8 +614,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+@@ -600,8 +614,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->idiag_retrans = req->retrans;
r->id.idiag_if = sk->sk_bound_dev_if;
@@ -98035,20 +97985,6 @@ index ccee270..2b3d4de 100644
tmo = req->expires - jiffies;
if (tmo < 0)
-@@ -598,8 +629,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
-
- r->id.idiag_sport = inet->inet_sport;
- r->id.idiag_dport = ireq->rmt_port;
-+
-+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
-+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
-+
- r->id.idiag_src[0] = ireq->loc_addr;
- r->id.idiag_dst[0] = ireq->rmt_addr;
-+
- r->idiag_expires = jiffies_to_msecs(tmo);
- r->idiag_rqueue = 0;
- r->idiag_wqueue = 0;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 4afcf31..392d206 100644
--- a/net/ipv4/inet_hashtables.c
@@ -98229,25 +98165,10 @@ index 99ec116..c5628fe 100644
return res;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
-index b5e64e4..69801fa 100644
+index 140d377..69801fa 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
-@@ -155,9 +155,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
- static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
- struct mr_table **mrt)
- {
-+ int err;
- struct ipmr_result res;
-- struct fib_lookup_arg arg = { .result = &res, };
-- int err;
-+ struct fib_lookup_arg arg = {
-+ .result = &res,
-+ .flags = FIB_LOOKUP_NOREF,
-+ };
-
- err = fib_rules_lookup(net->ipv4.mr_rules_ops,
- flowi4_to_flowi(flp4), 0, &arg);
-@@ -1320,6 +1323,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
+@@ -1323,6 +1323,10 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
@@ -99237,25 +99158,6 @@ index d3fde7e..f526e49 100644
}
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
-diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
-index f5af259..f96c96f 100644
---- a/net/ipv6/ip6mr.c
-+++ b/net/ipv6/ip6mr.c
-@@ -139,9 +139,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
- static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
- struct mr6_table **mrt)
- {
-+ int err;
- struct ip6mr_result res;
-- struct fib_lookup_arg arg = { .result = &res, };
-- int err;
-+ struct fib_lookup_arg arg = {
-+ .result = &res,
-+ .flags = FIB_LOOKUP_NOREF,
-+ };
-
- err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
- flowi6_to_flowi(flp6), 0, &arg);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index b204df8..8f274f4 100644
--- a/net/ipv6/ipv6_sockglue.c
@@ -99459,10 +99361,10 @@ index eba5deb..61e026f 100644
return -ENOMEM;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index 1768238..b28b21a 100644
+index 9a4f437..d13bf8b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
-@@ -2812,7 +2812,7 @@ ctl_table ipv6_route_table_template[] = {
+@@ -2808,7 +2808,7 @@ ctl_table ipv6_route_table_template[] = {
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
{
@@ -102867,10 +102769,10 @@ index 1983717..4d6102c 100644
sub->evt.event = htohl(event, sub->swap);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index 9338ccc..9bc732b 100644
+index eddfdec..e20439d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
-@@ -766,6 +766,12 @@ static struct sock *unix_find_other(struct net *net,
+@@ -768,6 +768,12 @@ static struct sock *unix_find_other(struct net *net,
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
@@ -102883,7 +102785,7 @@ index 9338ccc..9bc732b 100644
u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
-@@ -786,6 +792,13 @@ static struct sock *unix_find_other(struct net *net,
+@@ -788,6 +794,13 @@ static struct sock *unix_find_other(struct net *net,
if (u) {
struct dentry *dentry;
dentry = unix_sk(u)->dentry;
@@ -102897,7 +102799,7 @@ index 9338ccc..9bc732b 100644
if (dentry)
touch_atime(unix_sk(u)->mnt, dentry);
} else
-@@ -868,11 +881,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+@@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = security_path_mknod(&path, dentry, mode, 0);
if (err)
goto out_mknod_drop_write;
@@ -102916,7 +102818,7 @@ index 9338ccc..9bc732b 100644
mutex_unlock(&path.dentry->d_inode->i_mutex);
dput(path.dentry);
path.dentry = dentry;
-@@ -2265,9 +2285,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2269,9 +2289,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
@@ -102931,7 +102833,7 @@ index 9338ccc..9bc732b 100644
seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
-@@ -2294,8 +2318,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2298,8 +2322,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
}
for ( ; i < len; i++)
seq_putc(seq, u->addr->name->sun_path[i]);
@@ -106158,7 +106060,7 @@ index dca1c22..4fa4591 100644
lock = &avc_cache.slots_lock[hvalue];
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 5898f34..04f8b47 100644
+index bcf1d73..04f8b47 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -95,8 +95,6 @@
@@ -106170,41 +106072,7 @@ index 5898f34..04f8b47 100644
/* SECMARK reference count */
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
-@@ -217,6 +215,14 @@ static int inode_alloc_security(struct inode *inode)
- return 0;
- }
-
-+static void inode_free_rcu(struct rcu_head *head)
-+{
-+ struct inode_security_struct *isec;
-+
-+ isec = container_of(head, struct inode_security_struct, rcu);
-+ kmem_cache_free(sel_inode_cache, isec);
-+}
-+
- static void inode_free_security(struct inode *inode)
- {
- struct inode_security_struct *isec = inode->i_security;
-@@ -227,8 +233,16 @@ static void inode_free_security(struct inode *inode)
- list_del_init(&isec->list);
- spin_unlock(&sbsec->isec_lock);
-
-- inode->i_security = NULL;
-- kmem_cache_free(sel_inode_cache, isec);
-+ /*
-+ * The inode may still be referenced in a path walk and
-+ * a call to selinux_inode_permission() can be made
-+ * after inode_free_security() is called. Ideally, the VFS
-+ * wouldn't do this, but fixing that is a much harder
-+ * job. For now, simply free the i_security via RCU, and
-+ * leave the current inode->i_security pointer intact.
-+ * The inode will be freed after the RCU grace period too.
-+ */
-+ call_rcu(&isec->rcu, inode_free_rcu);
- }
-
- static int file_alloc_security(struct file *file)
-@@ -2001,6 +2015,13 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+@@ -2017,6 +2015,13 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
@@ -106218,7 +106086,7 @@ index 5898f34..04f8b47 100644
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
-@@ -2013,7 +2034,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+@@ -2029,7 +2034,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
COMMON_AUDIT_DATA_INIT(&ad, PATH);
ad.u.path = bprm->file->f_path;
@@ -106228,33 +106096,7 @@ index 5898f34..04f8b47 100644
new_tsec->sid = old_tsec->sid;
if (new_tsec->sid == old_tsec->sid) {
-@@ -4181,8 +4203,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
- }
- err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
- PEER__RECV, &ad);
-- if (err)
-+ if (err) {
- selinux_netlbl_err(skb, err, 0);
-+ return err;
-+ }
- }
-
- if (secmark_active) {
-@@ -5372,11 +5396,11 @@ static int selinux_setprocattr(struct task_struct *p,
- /* Check for ptracing, and update the task SID if ok.
- Otherwise, leave SID unchanged and fail. */
- ptsid = 0;
-- task_lock(p);
-+ rcu_read_lock();
- tracer = ptrace_parent(p);
- if (tracer)
- ptsid = task_sid(tracer);
-- task_unlock(p);
-+ rcu_read_unlock();
-
- if (tracer) {
- error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
-@@ -5508,7 +5532,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
+@@ -5526,7 +5532,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
#endif
@@ -106263,7 +106105,7 @@ index 5898f34..04f8b47 100644
.name = "selinux",
.ptrace_access_check = selinux_ptrace_access_check,
-@@ -5854,6 +5878,9 @@ static void selinux_nf_ip_exit(void)
+@@ -5872,6 +5878,9 @@ static void selinux_nf_ip_exit(void)
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
static int selinux_disabled;
@@ -106273,7 +106115,7 @@ index 5898f34..04f8b47 100644
int selinux_disable(void)
{
if (ss_initialized) {
-@@ -5871,7 +5898,9 @@ int selinux_disable(void)
+@@ -5889,7 +5898,9 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
@@ -106284,22 +106126,6 @@ index 5898f34..04f8b47 100644
/* Try to destroy the avc node cache */
avc_disable();
-diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
-index 26c7eee..7b1830b 100644
---- a/security/selinux/include/objsec.h
-+++ b/security/selinux/include/objsec.h
-@@ -38,7 +38,10 @@ struct task_security_struct {
-
- struct inode_security_struct {
- struct inode *inode; /* back pointer to inode object */
-- struct list_head list; /* list of inode_security_struct */
-+ union {
-+ struct list_head list; /* list of inode_security_struct */
-+ struct rcu_head rcu; /* for freeing the inode_security_struct */
-+ };
- u32 task_sid; /* SID of creating task */
- u32 sid; /* SID of this object */
- u16 sclass; /* security class of this object */
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index b43813c..74be837 100644
--- a/security/selinux/include/xfrm.h
@@ -107045,7 +106871,7 @@ index 0000000..4c2c45c
+size_overflow_hash.h
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
new file mode 100644
-index 0000000..0d1d9d3
+index 0000000..b198b6d
--- /dev/null
+++ b/tools/gcc/Makefile
@@ -0,0 +1,54 @@
@@ -107060,7 +106886,7 @@ index 0000000..0d1d9d3
+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
+else
+HOSTLIBS := hostcxxlibs
-+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter
++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -fno-rtti -ggdb -Wno-unused-parameter -Wno-narrowing
+endif
+
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
@@ -109095,10 +108921,10 @@ index 0000000..dd73713
+}
diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
new file mode 100644
-index 0000000..592b923
+index 0000000..515d689
--- /dev/null
+++ b/tools/gcc/latent_entropy_plugin.c
-@@ -0,0 +1,325 @@
+@@ -0,0 +1,337 @@
+/*
+ * Copyright 2012-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -109127,7 +108953,7 @@ index 0000000..592b923
+static tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
-+ .version = "201401260140",
++ .version = "201402131900",
+ .help = NULL
+};
+
@@ -109324,7 +109150,7 @@ index 0000000..592b923
+ return 0;
+}
+
-+static void start_unit_callback(void *gcc_data, void *user_data)
++static void latent_entropy_start_unit(void *gcc_data, void *user_data)
+{
+ tree latent_entropy_type;
+
@@ -109411,6 +109237,16 @@ index 0000000..592b923
+ latent_entropy_pass_info.reference_pass_name = "optimized";
+ latent_entropy_pass_info.ref_pass_instance_number = 1;
+ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
++ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
++ {
++ .base = &latent_entropy_decl,
++ .nelt = 1,
++ .stride = sizeof(latent_entropy_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
@@ -109418,7 +109254,9 @@ index 0000000..592b923
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &latent_entropy_start_unit, NULL);
++ if (!in_lto_p)
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
@@ -109426,10 +109264,10 @@ index 0000000..592b923
+}
diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
new file mode 100644
-index 0000000..fed12bf
+index 0000000..bc490ca
--- /dev/null
+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,902 @@
+@@ -0,0 +1,906 @@
+/*
+ * Copyright 2014 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ * and PaX Team <pageexec@freemail.hu>
@@ -109446,6 +109284,10 @@ index 0000000..fed12bf
+#include "gcc-common.h"
+#include "randomize_layout_seed.h"
+
++#if BUILDING_GCC_MAJOR < 4 || BUILDING_GCC_MINOR < 6 || (BUILDING_GCC_MINOR == 6 && BUILDING_GCC_PATCHLEVEL < 4)
++#error "The RANDSTRUCT plugin requires GCC 4.6.4 or newer."
++#endif
++
+#define ORIG_TYPE_NAME(node) \
+ (TYPE_NAME(TYPE_MAIN_VARIANT(node)) != NULL_TREE ? ((const unsigned char *)IDENTIFIER_POINTER(TYPE_NAME(TYPE_MAIN_VARIANT(node)))) : (const unsigned char *)"anonymous")
+
@@ -109862,7 +109704,7 @@ index 0000000..fed12bf
+#endif
+}
+
-+static void finish_decl(void *event_data, void *data)
++static void randomize_layout_finish_decl(void *event_data, void *data)
+{
+ tree decl = (tree)event_data;
+ tree type;
@@ -110326,7 +110168,7 @@ index 0000000..fed12bf
+ register_callback(plugin_name, PLUGIN_ALL_IPA_PASSES_START, check_global_variables, NULL);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &randomize_layout_bad_cast_info);
+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
-+ register_callback(plugin_name, PLUGIN_FINISH_DECL, finish_decl, NULL);
++ register_callback(plugin_name, PLUGIN_FINISH_DECL, randomize_layout_finish_decl, NULL);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
+
@@ -116341,10 +116183,10 @@ index 0000000..7b67f2b
+selnl_msglen_65499 selnl_msglen 0 65499 NULL
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
new file mode 100644
-index 0000000..4a637ab
+index 0000000..4aab36f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,4040 @@
+@@ -0,0 +1,4051 @@
+/*
+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
@@ -116369,7 +116211,7 @@ index 0000000..4a637ab
+int plugin_is_GPL_compatible;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140128",
++ .version = "20140213",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -116437,7 +116279,6 @@ index 0000000..4a637ab
+};
+
+static tree report_size_overflow_decl;
-+static const_tree const_char_ptr_type_node;
+
+static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
@@ -120210,8 +120051,9 @@ index 0000000..4a637ab
+}
+
+// Create the noreturn report_size_overflow() function decl.
-+static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
++static void size_overflow_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
++ tree const_char_ptr_type_node;
+ tree fntype;
+
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
@@ -120339,6 +120181,16 @@ index 0000000..4a637ab
+ struct register_pass_info __unused dump_before_pass_info;
+ struct register_pass_info __unused dump_after_pass_info;
+ struct register_pass_info ipa_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_size_overflow[] = {
++ {
++ .base = &report_size_overflow_decl,
++ .nelt = 1,
++ .stride = sizeof(report_size_overflow_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ insert_size_overflow_asm_pass_info.pass = make_insert_size_overflow_asm_pass();
+ insert_size_overflow_asm_pass_info.reference_pass_name = "ssa";
@@ -120375,7 +120227,8 @@ index 0000000..4a637ab
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
+ if (enable) {
-+ register_callback(plugin_name, PLUGIN_START_UNIT, &start_unit_callback, NULL);
++ register_callback(plugin_name, PLUGIN_START_UNIT, &size_overflow_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_size_overflow);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
+// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
@@ -120387,10 +120240,10 @@ index 0000000..4a637ab
+}
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
new file mode 100644
-index 0000000..a4f816a
+index 0000000..e684c74
--- /dev/null
+++ b/tools/gcc/stackleak_plugin.c
-@@ -0,0 +1,341 @@
+@@ -0,0 +1,373 @@
+/*
+ * Copyright 2011-2014 by the PaX Team <pageexec@freemail.hu>
+ * Licensed under the GPL v2
@@ -120418,10 +120271,11 @@ index 0000000..a4f816a
+static int track_frame_size = -1;
+static const char track_function[] = "pax_track_stack";
+static const char check_function[] = "pax_check_alloca";
++static tree track_function_decl, check_function_decl;
+static bool init_locals;
+
+static struct plugin_info stackleak_plugin_info = {
-+ .version = "201401260140",
++ .version = "201402131920",
+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
+// "initialize-locals\t\tforcibly initialize all stack frames\n"
+};
@@ -120429,29 +120283,20 @@ index 0000000..a4f816a
+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
+{
+ gimple check_alloca;
-+ tree fntype, fndecl, alloca_size;
-+
-+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(check_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
++ tree alloca_size;
+
+ // insert call to void pax_check_alloca(unsigned long size)
+ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
-+ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
++ check_alloca = gimple_build_call(check_function_decl, 1, alloca_size);
+ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
+}
+
+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
+{
+ gimple track_stack;
-+ tree fntype, fndecl;
-+
-+ fntype = build_function_type_list(void_type_node, NULL_TREE);
-+ fndecl = build_fn_decl(track_function, fntype);
-+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
+
+ // insert call to void pax_track_stack(void)
-+ track_stack = gimple_build_call(fndecl, 0);
++ track_stack = gimple_build_call(track_function_decl, 0);
+ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
+}
+
@@ -120578,6 +120423,27 @@ index 0000000..a4f816a
+ return track_frame_size >= 0;
+}
+
++static void stackleak_start_unit(void *gcc_data, void *user_data)
++{
++ tree fntype;
++
++ // void pax_track_stack(void)
++ fntype = build_function_type_list(void_type_node, NULL_TREE);
++ track_function_decl = build_fn_decl(track_function, fntype);
++ DECL_ASSEMBLER_NAME(track_function_decl); // for LTO
++ TREE_PUBLIC(track_function_decl) = 1;
++ DECL_EXTERNAL(track_function_decl) = 1;
++ DECL_ARTIFICIAL(track_function_decl) = 1;
++
++ // void pax_check_alloca(unsigned long)
++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
++ check_function_decl = build_fn_decl(check_function, fntype);
++ DECL_ASSEMBLER_NAME(check_function_decl); // for LTO
++ TREE_PUBLIC(check_function_decl) = 1;
++ DECL_EXTERNAL(check_function_decl) = 1;
++ DECL_ARTIFICIAL(check_function_decl) = 1;
++}
++
+#if BUILDING_GCC_VERSION >= 4009
+static const struct pass_data stackleak_tree_instrument_pass_data = {
+#else
@@ -120686,6 +120552,23 @@ index 0000000..a4f816a
+ int i;
+ struct register_pass_info stackleak_tree_instrument_pass_info;
+ struct register_pass_info stackleak_final_pass_info;
++ static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
++ {
++ .base = &track_function_decl,
++ .nelt = 1,
++ .stride = sizeof(track_function_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ {
++ .base = &check_function_decl,
++ .nelt = 1,
++ .stride = sizeof(check_function_decl),
++ .cb = &gt_ggc_mx_tree_node,
++ .pchw = &gt_pch_nx_tree_node
++ },
++ LAST_GGC_ROOT_TAB
++ };
+
+ stackleak_tree_instrument_pass_info.pass = make_stackleak_tree_instrument_pass();
+// stackleak_tree_instrument_pass_info.reference_pass_name = "tree_profile";
@@ -120727,6 +120610,8 @@ index 0000000..a4f816a
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
++ register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
++ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_stackleak);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
+
diff --git a/3.2.54/4425_grsec_remove_EI_PAX.patch b/3.2.55/4425_grsec_remove_EI_PAX.patch
index cf65d90..cf65d90 100644
--- a/3.2.54/4425_grsec_remove_EI_PAX.patch
+++ b/3.2.55/4425_grsec_remove_EI_PAX.patch
diff --git a/3.2.54/4427_force_XATTR_PAX_tmpfs.patch b/3.2.55/4427_force_XATTR_PAX_tmpfs.patch
index 8c7a533..8c7a533 100644
--- a/3.2.54/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.2.55/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.2.54/4430_grsec-remove-localversion-grsec.patch b/3.2.55/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.2.54/4430_grsec-remove-localversion-grsec.patch
+++ b/3.2.55/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.2.54/4435_grsec-mute-warnings.patch b/3.2.55/4435_grsec-mute-warnings.patch
index f099757..f099757 100644
--- a/3.2.54/4435_grsec-mute-warnings.patch
+++ b/3.2.55/4435_grsec-mute-warnings.patch
diff --git a/3.2.54/4440_grsec-remove-protected-paths.patch b/3.2.55/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.2.54/4440_grsec-remove-protected-paths.patch
+++ b/3.2.55/4440_grsec-remove-protected-paths.patch
diff --git a/3.2.54/4450_grsec-kconfig-default-gids.patch b/3.2.55/4450_grsec-kconfig-default-gids.patch
index 4f345d6..4f345d6 100644
--- a/3.2.54/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.55/4450_grsec-kconfig-default-gids.patch
diff --git a/3.2.54/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.55/4465_selinux-avc_audit-log-curr_ip.patch
index 0545d51..0545d51 100644
--- a/3.2.54/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.55/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.2.54/4470_disable-compat_vdso.patch b/3.2.55/4470_disable-compat_vdso.patch
index f6eb9f7..f6eb9f7 100644
--- a/3.2.54/4470_disable-compat_vdso.patch
+++ b/3.2.55/4470_disable-compat_vdso.patch
diff --git a/3.2.54/4475_emutramp_default_on.patch b/3.2.55/4475_emutramp_default_on.patch
index cfde6f8..cfde6f8 100644
--- a/3.2.54/4475_emutramp_default_on.patch
+++ b/3.2.55/4475_emutramp_default_on.patch