summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304181846.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304122025.patch)110
-rw-r--r--3.2.43/0000_README2
-rw-r--r--3.2.43/4420_grsecurity-2.9.1-3.2.43-201304181907.patch (renamed from 3.2.43/4420_grsecurity-2.9.1-3.2.43-201304122026.patch)326
-rw-r--r--3.8.8/0000_README (renamed from 3.8.7/0000_README)6
-rw-r--r--3.8.8/1006_linux-3.8.7.patch (renamed from 3.8.7/1006_linux-3.8.7.patch)0
-rw-r--r--3.8.8/1007_linux-3.8.8.patch1471
-rw-r--r--3.8.8/4420_grsecurity-2.9.1-3.8.8-201304181923.patch (renamed from 3.8.7/4420_grsecurity-2.9.1-3.8.7-201304122027.patch)550
-rw-r--r--3.8.8/4425_grsec_remove_EI_PAX.patch (renamed from 3.8.7/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.8.8/4430_grsec-remove-localversion-grsec.patch (renamed from 3.8.7/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.8.8/4435_grsec-mute-warnings.patch (renamed from 3.8.7/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.8.8/4440_grsec-remove-protected-paths.patch (renamed from 3.8.7/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.8.8/4450_grsec-kconfig-default-gids.patch (renamed from 3.8.7/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.8.8/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.8.7/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.8.8/4470_disable-compat_vdso.patch (renamed from 3.8.7/4470_disable-compat_vdso.patch)0
15 files changed, 2218 insertions, 249 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 6b3d14c..d04e223 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -34,7 +34,7 @@ Patch: 1059_linux-2.6.32.60.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.60-201304122025.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.60-201304181846.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304122025.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304181846.patch
index 5b9006d..3224566 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304122025.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201304181846.patch
@@ -80929,6 +80929,19 @@ index f4300ff7..6ec38b2 100644
if (filp->f_pos >= inode->i_size)
return 0;
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 0022eec..b3d234e 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -447,7 +447,7 @@ void hfsplus_file_truncate(struct inode *inode)
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+ int res;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1bcf597..905a251 100644
--- a/fs/hfsplus/inode.c
@@ -99111,6 +99124,43 @@ index 58ae8e0..8ce9617 100644
struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index b0cb0eb..26fd888 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -16,6 +16,7 @@
+ #define _KREF_H_
+
+ #include <linux/types.h>
++#include <asm/atomic.h>
+
+ struct kref {
+ atomic_t refcount;
+@@ -26,4 +27,24 @@ void kref_init(struct kref *kref);
+ void kref_get(struct kref *kref);
+ int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+
++/**
++ * kref_get_unless_zero - Increment refcount for object unless it is zero.
++ * @kref: object.
++ *
++ * Return non-zero if the increment succeeded. Otherwise return 0.
++ *
++ * This function is intended to simplify locking around refcounting for
++ * objects that can be looked up from a lookup structure, and which are
++ * removed from that lookup structure in the object destructor.
++ * Operations on such objects require at least a read lock around
++ * lookup + kref_get, and a write lock around kref_put + remove from lookup
++ * structure. Furthermore, RCU implementations become extremely tricky.
++ * With a lookup followed by a kref_get_unless_zero *with return value check*
++ * locking in the kref_put path can be deferred to the actual removal from
++ * the lookup structure and RCU lookups become trivial.
++ */
++static inline int __must_check kref_get_unless_zero(struct kref *kref)
++{
++ return atomic_add_unless(&kref->refcount, 1, 0);
++}
+ #endif /* _KREF_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8bfed57..07be660 100644
--- a/include/linux/kvm_host.h
@@ -107428,7 +107478,7 @@ index 0591df8..dcf3f9f 100644
if (cpu != group_first_cpu(sd->groups))
return;
diff --git a/kernel/signal.c b/kernel/signal.c
-index 2494827..3087914 100644
+index 2494827..873d447 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -41,12 +41,12 @@
@@ -107620,6 +107670,15 @@ index 2494827..3087914 100644
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, info, p);
/*
+@@ -2300,7 +2336,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+
+ static int do_tkill(pid_t tgid, pid_t pid, int sig)
+ {
+- struct siginfo info;
++ struct siginfo info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
diff --git a/kernel/smp.c b/kernel/smp.c
index aa9cff3..631a0de 100644
--- a/kernel/smp.c
@@ -107715,7 +107774,7 @@ index 04a0252..4ee2bbb 100644
struct tasklet_struct *list;
diff --git a/kernel/sys.c b/kernel/sys.c
-index e9512b1..dec4030 100644
+index e9512b1..b436660 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -107857,16 +107916,16 @@ index e9512b1..dec4030 100644
if (rgid != (gid_t) -1)
new->gid = rgid;
if (egid != (gid_t) -1)
-@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
- if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
- goto error;
-
-+ if (gr_check_user_change(-1, -1, uid))
-+ goto error;
-+
- if (uid == old->uid || uid == old->euid ||
+@@ -853,6 +888,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
uid == old->suid || uid == old->fsuid ||
capable(CAP_SETUID)) {
+ if (uid != old_fsuid) {
++ if (gr_check_user_change(-1, -1, uid))
++ goto error;
++
+ new->fsuid = uid;
+ if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+ goto change_okay;
@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
if (gid == old->gid || gid == old->egid ||
gid == old->sgid || gid == old->fsgid ||
@@ -109259,10 +109318,24 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index b512b74..8115eb1 100644
+index b512b74..ba78866 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+@@ -531,6 +531,13 @@ struct kobject *kobject_get(struct kobject *kobj)
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -700,7 +707,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
@@ -109271,7 +109344,16 @@ index b512b74..8115eb1 100644
.show = kobj_attr_show,
.store = kobj_attr_store,
};
-@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
+@@ -752,7 +759,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
+ spin_lock(&kset->list_lock);
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+@@ -789,7 +796,7 @@ static struct kobj_type kset_ktype = {
* If the kset was not able to be created, NULL will be returned.
*/
static struct kset *kset_create(const char *name,
@@ -109280,7 +109362,7 @@ index b512b74..8115eb1 100644
struct kobject *parent_kobj)
{
struct kset *kset;
-@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
+@@ -832,7 +839,7 @@ static struct kset *kset_create(const char *name,
* If the kset was not able to be created, NULL will be returned.
*/
struct kset *kset_create_and_add(const char *name,
diff --git a/3.2.43/0000_README b/3.2.43/0000_README
index 72f3f88..9450604 100644
--- a/3.2.43/0000_README
+++ b/3.2.43/0000_README
@@ -90,7 +90,7 @@ Patch: 1042_linux-3.2.43.patch
From: http://www.kernel.org
Desc: Linux 3.2.43
-Patch: 4420_grsecurity-2.9.1-3.2.43-201304122026.patch
+Patch: 4420_grsecurity-2.9.1-3.2.43-201304181907.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.43/4420_grsecurity-2.9.1-3.2.43-201304122026.patch b/3.2.43/4420_grsecurity-2.9.1-3.2.43-201304181907.patch
index 48edf5c..38dc1b4 100644
--- a/3.2.43/4420_grsecurity-2.9.1-3.2.43-201304122026.patch
+++ b/3.2.43/4420_grsecurity-2.9.1-3.2.43-201304181907.patch
@@ -4310,6 +4310,19 @@ index 578e5a0..2ab6a8a 100644
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index adba970..ef0d917 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index e30a13d..2b7d994 100644
--- a/arch/powerpc/include/asm/system.h
@@ -8497,7 +8510,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 9a42703..fd885e7 100644
+index 9a42703..79a673e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -235,7 +235,7 @@ config X86_HT
@@ -8552,19 +8565,12 @@ index 9a42703..fd885e7 100644
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
-@@ -1553,6 +1555,7 @@ config KEXEC_JUMP
- config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
- default "0x1000000"
-+ range 0x400000 0x40000000
- ---help---
- This gives the physical address where the kernel is loaded.
-
-@@ -1616,6 +1619,7 @@ config X86_NEED_RELOCS
+@@ -1616,6 +1618,8 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned" if X86_32
default "0x1000000"
-+ range 0x400000 0x1000000 if PAX_KERNEXEC
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
range 0x2000 0x1000000
---help---
This value puts the alignment restrictions on physical address
@@ -11661,9 +11667,20 @@ index 5478825..839e88c 100644
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index b4973f4..8c1fb95 100644
+index b4973f4..fc8880d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
+@@ -393,8 +393,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+
+ struct {
+ u64 msr_val;
@@ -459,7 +459,7 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
@@ -22231,10 +22248,45 @@ index 407789b..5570a86 100644
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index f4063fd..b395ad7 100644
+index f4063fd..8ed079b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1348,8 +1348,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ {
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+- void *shared_kaddr;
+ unsigned long this_tsc_khz;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;
+@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+-
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr, KM_USER0);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1348,8 +1342,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -22245,19 +22297,54 @@ index f4063fd..b395ad7 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -1603,6 +1603,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+@@ -1496,10 +1490,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
-+ /* Check that the address is 32-byte aligned. */
-+ if (vcpu->arch.time_offset &
-+ (sizeof(struct pvclock_vcpu_time_info) - 1))
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1591,6 +1582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1600,16 +1592,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
++ /* Check that address+len does not cross page boundary */
++ if ((gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)
++ & PAGE_MASK)
+ break;
-+
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-@@ -2168,6 +2173,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+- if (is_error_page(vcpu->arch.time_page)) {
+- kvm_release_page_clean(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+ break;
+ }
+ case MSR_KVM_ASYNC_PF_EN:
+@@ -2168,6 +2162,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -22266,7 +22353,7 @@ index f4063fd..b395ad7 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -2343,15 +2350,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+@@ -2343,15 +2339,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
@@ -22290,7 +22377,7 @@ index f4063fd..b395ad7 100644
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
-@@ -2366,15 +2378,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+@@ -2366,15 +2367,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
@@ -22313,7 +22400,7 @@ index f4063fd..b395ad7 100644
return 0;
out:
-@@ -2749,7 +2765,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+@@ -2749,7 +2754,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -22322,7 +22409,7 @@ index f4063fd..b395ad7 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
-@@ -5191,7 +5207,7 @@ static void kvm_set_mmio_spte_mask(void)
+@@ -5191,7 +5196,7 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask);
}
@@ -22331,6 +22418,14 @@ index f4063fd..b395ad7 100644
{
int r;
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+@@ -6549,6 +6554,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+ goto fail_free_mce_banks;
+
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+
+ return 0;
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index cf4603b..7cdde38 100644
--- a/arch/x86/lguest/boot.c
@@ -52288,6 +52383,19 @@ index 1b55f70..bd6c289 100644
static void hfs_init_once(void *p)
{
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 5849e3e..32b12e5 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -517,7 +517,7 @@ void hfsplus_file_truncate(struct inode *inode)
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d24a9b6..b398147 100644
--- a/fs/hfsplus/super.c
@@ -52406,9 +52514,18 @@ index 0aa424a..332097d8 100644
static int can_do_hugetlb_shm(void)
{
diff --git a/fs/inode.c b/fs/inode.c
-index ee4e66b..9a39f9c 100644
+index ee4e66b..e6f3833 100644
--- a/fs/inode.c
+++ b/fs/inode.c
+@@ -634,7 +634,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
++ list_move(&inode->i_lru, &sb->s_inode_lru);
+ continue;
+ }
+
@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
#ifdef CONFIG_SMP
@@ -69480,6 +69597,19 @@ index a64b00e..2ef3855f 100644
extern void softirq_init(void);
static inline void __raise_softirq_irqoff(unsigned int nr)
{
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 9d57a71..8d0f701 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -166,7 +166,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+ }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bff29c5..7437762 100644
--- a/include/linux/irq.h
@@ -69615,6 +69745,43 @@ index f66b065..c2c29b4 100644
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index d4a62ab..d064502 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -16,6 +16,7 @@
+ #define _KREF_H_
+
+ #include <linux/types.h>
++#include <linux/atomic.h>
+
+ struct kref {
+ atomic_t refcount;
+@@ -27,4 +28,24 @@ int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+ int kref_sub(struct kref *kref, unsigned int count,
+ void (*release) (struct kref *kref));
+
++/**
++ * kref_get_unless_zero - Increment refcount for object unless it is zero.
++ * @kref: object.
++ *
++ * Return non-zero if the increment succeeded. Otherwise return 0.
++ *
++ * This function is intended to simplify locking around refcounting for
++ * objects that can be looked up from a lookup structure, and which are
++ * removed from that lookup structure in the object destructor.
++ * Operations on such objects require at least a read lock around
++ * lookup + kref_get, and a write lock around kref_put + remove from lookup
++ * structure. Furthermore, RCU implementations become extremely tricky.
++ * With a lookup followed by a kref_get_unless_zero *with return value check*
++ * locking in the kref_put path can be deferred to the actual removal from
++ * the lookup structure and RCU lookups become trivial.
++ */
++static inline int __must_check kref_get_unless_zero(struct kref *kref)
++{
++ return atomic_add_unless(&kref->refcount, 1, 0);
++}
+ #endif /* _KREF_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6136821..da8ced0 100644
--- a/include/linux/kvm_host.h
@@ -77723,7 +77890,7 @@ index 66e4576..d05c6d5 100644
int this_cpu = smp_processor_id();
struct rq *this_rq = cpu_rq(this_cpu);
diff --git a/kernel/signal.c b/kernel/signal.c
-index ea76d30..353258a 100644
+index ea76d30..7b5d245 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
@@ -77849,6 +78016,15 @@ index ea76d30..353258a 100644
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, info, p);
/*
+@@ -2790,7 +2821,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+
+ static int do_tkill(pid_t tgid, pid_t pid, int sig)
+ {
+- struct siginfo info;
++ struct siginfo info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
diff --git a/kernel/smp.c b/kernel/smp.c
index 9e800b2..1533ba5 100644
--- a/kernel/smp.c
@@ -77975,7 +78151,7 @@ index 2f194e9..2c05ea9 100644
.priority = 10,
};
diff --git a/kernel/sys.c b/kernel/sys.c
-index f5939c2..110dc5d 100644
+index f5939c2..3084a61 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -78064,17 +78240,16 @@ index f5939c2..110dc5d 100644
if (rgid != (gid_t) -1)
new->gid = rgid;
if (egid != (gid_t) -1)
-@@ -897,6 +925,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
- old = current_cred();
- old_fsuid = old->fsuid;
-
-+ if (gr_check_user_change(-1, -1, uid))
-+ goto error;
-+
- if (uid == old->uid || uid == old->euid ||
+@@ -901,12 +929,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
uid == old->suid || uid == old->fsuid ||
nsown_capable(CAP_SETUID)) {
-@@ -907,6 +938,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ if (uid != old_fsuid) {
++ if (gr_check_user_change(-1, -1, uid))
++ goto error;
++
+ new->fsuid = uid;
+ if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+ goto change_okay;
}
}
@@ -79714,10 +79889,33 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index 640bd98..306b7ab 100644
+index 640bd98..a0de35f 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -891,9 +891,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+@@ -531,6 +531,13 @@ struct kobject *kobject_get(struct kobject *kobj)
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -785,7 +792,7 @@ struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
+ slow_search:
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+@@ -891,9 +898,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
@@ -86672,6 +86870,31 @@ index e41c40f..fbed7a7 100644
.init = devinet_init_net,
.exit = devinet_exit_net,
};
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 530787b..238fc3b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -137,8 +137,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ /* skb is pure payload to encrypt */
+
+- err = -ENOMEM;
+-
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+@@ -174,8 +172,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 92fc5f6..b790d91 100644
--- a/net/ipv4/fib_frontend.c
@@ -87300,6 +87523,21 @@ index 94cdbc5..01d3a77 100644
.init = rt_genid_init,
};
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 769c0e9..8a1bed2 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
++ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, th->source, th->dest);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5485077..7e37374 100644
--- a/net/ipv4/sysctl_net_ipv4.c
@@ -91632,7 +91870,7 @@ index 38f6617..e70b72b 100755
exuberant()
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..14ae375 100644
+index 51bd5a0..7b71be9 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,6 +4,944 @@
@@ -92511,7 +92749,7 @@ index 51bd5a0..14ae375 100644
+config PAX_CONSTIFY_PLUGIN
+ bool "Automatically constify eligible structures"
+ default y if GRKERNSEC_CONFIG_AUTO
-+ depends on !UML
++ depends on !UML && PAX_KERNEXEC
+ help
+ By saying Y here the compiler will automatically constify a class
+ of types that contain only function pointers. This reduces the
diff --git a/3.8.7/0000_README b/3.8.8/0000_README
index 3317c66..94354d0 100644
--- a/3.8.7/0000_README
+++ b/3.8.8/0000_README
@@ -6,7 +6,11 @@ Patch: 1006_linux-3.8.7.patch
From: http://www.kernel.org
Desc: Linux 3.8.7
-Patch: 4420_grsecurity-2.9.1-3.8.7-201304122027.patch
+Patch: 1007_linux-3.8.8.patch
+From: http://www.kernel.org
+Desc: Linux 3.8.8
+
+Patch: 4420_grsecurity-2.9.1-3.8.8-201304181923.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.8.7/1006_linux-3.8.7.patch b/3.8.8/1006_linux-3.8.7.patch
index 99e0538..99e0538 100644
--- a/3.8.7/1006_linux-3.8.7.patch
+++ b/3.8.8/1006_linux-3.8.7.patch
diff --git a/3.8.8/1007_linux-3.8.8.patch b/3.8.8/1007_linux-3.8.8.patch
new file mode 100644
index 0000000..8db28d7
--- /dev/null
+++ b/3.8.8/1007_linux-3.8.8.patch
@@ -0,0 +1,1471 @@
+diff --git a/Makefile b/Makefile
+index 85204da..7684f95 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 8
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Displaced Humerus Anterior
+
+diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+index 93c3afb..3694e94 100644
+--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
++++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+@@ -96,11 +96,11 @@
+ marvell,function = "gpio";
+ };
+ pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
+- marvell,pins = "mpp44";
++ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
+- marvell,pins = "mpp45";
++ marvell,pins = "mpp47";
+ marvell,function = "gpio";
+ };
+
+@@ -157,14 +157,14 @@
+ gpios = <&gpio0 16 0>;
+ linux,default-trigger = "default-on";
+ };
+- health_led1 {
++ rebuild_led {
++ label = "status:white:rebuild_led";
++ gpios = <&gpio1 4 0>;
++ };
++ health_led {
+ label = "status:red:health_led";
+ gpios = <&gpio1 5 0>;
+ };
+- health_led2 {
+- label = "status:white:health_led";
+- gpios = <&gpio1 4 0>;
+- };
+ backup_led {
+ label = "status:blue:backup_led";
+ gpios = <&gpio0 15 0>;
+diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
+index f0727e8..0edce4b 100644
+--- a/arch/arm/mach-imx/clk-imx35.c
++++ b/arch/arm/mach-imx/clk-imx35.c
+@@ -257,6 +257,7 @@ int __init mx35_clocks_init()
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
+ clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
++ clk_register_clkdev(clk[admux_gate], "audmux", NULL);
+
+ clk_prepare_enable(clk[spba_gate]);
+ clk_prepare_enable(clk[gpio1_gate]);
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 5edd174..7361e47 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+ }
+
+-void arch_flush_lazy_mmu_mode(void);
++static inline void arch_flush_lazy_mmu_mode(void)
++{
++ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
++}
+
+ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 142236e..b3b0ec1 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -91,6 +91,7 @@ struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
++ void (*flush)(void);
+ };
+
+ struct pv_time_ops {
+@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
+
+ void paravirt_enter_lazy_mmu(void);
+ void paravirt_leave_lazy_mmu(void);
++void paravirt_flush_lazy_mmu(void);
+
+ void _paravirt_nop(void);
+ u32 _paravirt_ident_32(u32);
+diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
+index 4fef207..c779730 100644
+--- a/arch/x86/include/asm/tlb.h
++++ b/arch/x86/include/asm/tlb.h
+@@ -7,7 +7,7 @@
+
+ #define tlb_flush(tlb) \
+ { \
+- if (tlb->fullmm == 0) \
++ if (!tlb->fullmm && !tlb->need_flush_all) \
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+ else \
+ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 17fff18..8bfb335 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
+ leave_lazy(PARAVIRT_LAZY_MMU);
+ }
+
++void paravirt_flush_lazy_mmu(void)
++{
++ preempt_disable();
++
++ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
++ arch_leave_lazy_mmu_mode();
++ arch_enter_lazy_mmu_mode();
++ }
++
++ preempt_enable();
++}
++
+ void paravirt_start_context_switch(struct task_struct *prev)
+ {
+ BUG_ON(preemptible());
+@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return this_cpu_read(paravirt_lazy_mode);
+ }
+
+-void arch_flush_lazy_mmu_mode(void)
+-{
+- preempt_disable();
+-
+- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+- arch_leave_lazy_mmu_mode();
+- arch_enter_lazy_mmu_mode();
+- }
+-
+- preempt_enable();
+-}
+-
+ struct pv_info pv_info = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
++ .flush = paravirt_nop,
+ },
+
+ .set_fixmap = native_set_fixmap,
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index df4176c..20a4fd4 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1333,6 +1333,7 @@ __init void lguest_init(void)
+ pv_mmu_ops.read_cr3 = lguest_read_cr3;
+ pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
++ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
+ pv_mmu_ops.pte_update = lguest_pte_update;
+ pv_mmu_ops.pte_update_defer = lguest_pte_update;
+
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index fb674fd..4f7d793 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+ if (pgd_none(*pgd_ref))
+ return -1;
+
+- if (pgd_none(*pgd))
++ if (pgd_none(*pgd)) {
+ set_pgd(pgd, *pgd_ref);
+- else
++ arch_flush_lazy_mmu_mode();
++ } else {
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++ }
+
+ /*
+ * Below here mismatches are bugs because these lower tables
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index e27fbf8..395b3b4 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+ {
+ paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
++ /*
++ * NOTE! For PAE, any changes to the top page-directory-pointer-table
++ * entries need a full cr3 reload to flush.
++ */
++#ifdef CONFIG_X86_PAE
++ tlb->need_flush_all = 1;
++#endif
+ tlb_remove_page(tlb, virt_to_page(pmd));
+ }
+
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 01de35c..cab96b6 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -2190,6 +2190,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_mmu,
+ .leave = xen_leave_lazy_mmu,
++ .flush = paravirt_flush_lazy_mmu,
+ },
+
+ .set_fixmap = xen_set_fixmap,
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index 5a31264..8607724 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
+- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+- spin_lock(&d->lock);
+- if (list_empty(&c->node))
+- list_add_tail(&c->node, &d->pending);
+- spin_unlock(&d->lock);
+- tasklet_schedule(&d->task);
++ /*
++ * c->cyclic is used only by audio and in this case the DMA need
++ * to be started without delay.
++ */
++ if (!c->cyclic) {
++ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
++ spin_lock(&d->lock);
++ if (list_empty(&c->node))
++ list_add_tail(&c->node, &d->pending);
++ spin_unlock(&d->lock);
++ tasklet_schedule(&d->task);
++ } else {
++ omap_dma_start_desc(c);
++ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index d542a14..ea537fa 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -228,7 +228,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+ if (!np)
+ return;
+
+- do {
++ for (;; index++) {
+ ret = of_parse_phandle_with_args(np, "gpio-ranges",
+ "#gpio-range-cells", index, &pinspec);
+ if (ret)
+@@ -257,8 +257,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+
+ if (ret)
+ break;
+-
+- } while (index++);
++ }
+ }
+
+ #else
+diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
+index fe5cdbc..b44d548 100644
+--- a/drivers/gpu/drm/udl/udl_connector.c
++++ b/drivers/gpu/drm/udl/udl_connector.c
+@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
+ int ret;
+
+ edid = (struct edid *)udl_get_edid(udl);
++ if (!edid) {
++ drm_mode_connector_update_edid_property(connector, NULL);
++ return 0;
++ }
+
+ /*
+ * We only read the main block, but if the monitor reports extension
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+index 21a8242..18d3764 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
+ gain0_15 = ((biq1 & 0xf) << 12) |
+ ((tia & 0xf) << 8) |
+ ((lna2 & 0x3) << 6) |
+- ((lna2 & 0x3) << 4) |
+- ((lna1 & 0x3) << 2) |
+- ((lna1 & 0x3) << 0);
++ ((lna2 &
++ 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+
+ mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
+ mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
+@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
+ }
+
+ mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
+- mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
+- mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
+
+ }
+
+@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
+ return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
+ }
+
+-static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
+- u16 tia_gain, u16 lna2_gain)
+-{
+- u32 i_thresh_l, q_thresh_l;
+- u32 i_thresh_h, q_thresh_h;
+- struct lcnphy_iq_est iq_est_h, iq_est_l;
+-
+- wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
+- lna2_gain, 0);
+-
+- wlc_lcnphy_rx_gain_override_enable(pi, true);
+- wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
+- udelay(500);
+- write_radio_reg(pi, RADIO_2064_REG112, 0);
+- if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
+- return false;
+-
+- wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
+- udelay(500);
+- write_radio_reg(pi, RADIO_2064_REG112, 0);
+- if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
+- return false;
+-
+- i_thresh_l = (iq_est_l.i_pwr << 1);
+- i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
+-
+- q_thresh_l = (iq_est_l.q_pwr << 1);
+- q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
+- if ((iq_est_h.i_pwr > i_thresh_l) &&
+- (iq_est_h.i_pwr < i_thresh_h) &&
+- (iq_est_h.q_pwr > q_thresh_l) &&
+- (iq_est_h.q_pwr < q_thresh_h))
+- return true;
+-
+- return false;
+-}
+-
+ static bool
+ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
+ const struct lcnphy_rx_iqcomp *iqcomp,
+@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
+ RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
+ rfoverride3_old, rfoverride3val_old, rfoverride4_old,
+ rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
+- int tia_gain, lna2_gain, biq1_gain;
+- bool set_gain;
++ int tia_gain;
++ u32 received_power, rx_pwr_threshold;
+ u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
+ u16 values_to_save[11];
+ s16 *ptr;
+@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
+ goto cal_done;
+ }
+
+- WARN_ON(module != 1);
+- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+-
+- for (i = 0; i < 11; i++)
+- values_to_save[i] =
+- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+- Core1TxControl_old = read_phy_reg(pi, 0x631);
+-
+- or_phy_reg(pi, 0x631, 0x0015);
+-
+- RFOverride0_old = read_phy_reg(pi, 0x44c);
+- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+- rfoverride2_old = read_phy_reg(pi, 0x4b0);
+- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+- rfoverride3_old = read_phy_reg(pi, 0x4f9);
+- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+- rfoverride4_old = read_phy_reg(pi, 0x938);
+- rfoverride4val_old = read_phy_reg(pi, 0x939);
+- afectrlovr_old = read_phy_reg(pi, 0x43b);
+- afectrlovrval_old = read_phy_reg(pi, 0x43c);
+- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+-
+- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+- if (tx_gain_override_old) {
+- wlc_lcnphy_get_tx_gain(pi, &old_gains);
+- tx_gain_index_old = pi_lcn->lcnphy_current_index;
+- }
+-
+- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
++ if (module == 1) {
+
+- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
++ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
++ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+
+- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
++ for (i = 0; i < 11; i++)
++ values_to_save[i] =
++ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
++ Core1TxControl_old = read_phy_reg(pi, 0x631);
++
++ or_phy_reg(pi, 0x631, 0x0015);
++
++ RFOverride0_old = read_phy_reg(pi, 0x44c);
++ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
++ rfoverride2_old = read_phy_reg(pi, 0x4b0);
++ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
++ rfoverride3_old = read_phy_reg(pi, 0x4f9);
++ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
++ rfoverride4_old = read_phy_reg(pi, 0x938);
++ rfoverride4val_old = read_phy_reg(pi, 0x939);
++ afectrlovr_old = read_phy_reg(pi, 0x43b);
++ afectrlovrval_old = read_phy_reg(pi, 0x43c);
++ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
++ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
++
++ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
++ if (tx_gain_override_old) {
++ wlc_lcnphy_get_tx_gain(pi, &old_gains);
++ tx_gain_index_old = pi_lcn->lcnphy_current_index;
++ }
+
+- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+-
+- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
++ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+
+- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
++ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
++ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+
+- write_phy_reg(pi, 0x6da, 0xffff);
+- or_phy_reg(pi, 0x6db, 0x3);
++ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
++ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+
+- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+- set_gain = false;
+-
+- lna2_gain = 3;
+- while ((lna2_gain >= 0) && !set_gain) {
+- tia_gain = 4;
+-
+- while ((tia_gain >= 0) && !set_gain) {
+- biq1_gain = 6;
+-
+- while ((biq1_gain >= 0) && !set_gain) {
+- set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
+- (u16)
+- biq1_gain,
+- (u16)
+- tia_gain,
+- (u16)
+- lna2_gain);
+- biq1_gain -= 1;
+- }
++ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
++ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
++ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
++ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
++ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
++ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
++ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
++ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
++ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
++ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
++
++ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
++ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
++ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
++ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
++ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
++ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
++ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
++ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
++ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
++ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
++
++ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
++ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
++
++ wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
++ write_phy_reg(pi, 0x6da, 0xffff);
++ or_phy_reg(pi, 0x6db, 0x3);
++ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
++ wlc_lcnphy_rx_gain_override_enable(pi, true);
++
++ tia_gain = 8;
++ rx_pwr_threshold = 950;
++ while (tia_gain > 0) {
+ tia_gain -= 1;
++ wlc_lcnphy_set_rx_gain_by_distribution(pi,
++ 0, 0, 2, 2,
++ (u16)
++ tia_gain, 1, 0);
++ udelay(500);
++
++ received_power =
++ wlc_lcnphy_measure_digital_power(pi, 2000);
++ if (received_power < rx_pwr_threshold)
++ break;
+ }
+- lna2_gain -= 1;
+- }
++ result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+
+- if (set_gain)
+- result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
+- else
+- result = false;
++ wlc_lcnphy_stop_tx_tone(pi);
+
+- wlc_lcnphy_stop_tx_tone(pi);
++ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+- write_phy_reg(pi, 0x631, Core1TxControl_old);
+-
+- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+- write_phy_reg(pi, 0x4b0, rfoverride2_old);
+- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+- write_phy_reg(pi, 0x4f9, rfoverride3_old);
+- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+- write_phy_reg(pi, 0x938, rfoverride4_old);
+- write_phy_reg(pi, 0x939, rfoverride4val_old);
+- write_phy_reg(pi, 0x43b, afectrlovr_old);
+- write_phy_reg(pi, 0x43c, afectrlovrval_old);
+- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
++ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
++ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
++ write_phy_reg(pi, 0x4b0, rfoverride2_old);
++ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
++ write_phy_reg(pi, 0x4f9, rfoverride3_old);
++ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
++ write_phy_reg(pi, 0x938, rfoverride4_old);
++ write_phy_reg(pi, 0x939, rfoverride4val_old);
++ write_phy_reg(pi, 0x43b, afectrlovr_old);
++ write_phy_reg(pi, 0x43c, afectrlovrval_old);
++ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
++ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+
+- wlc_lcnphy_clear_trsw_override(pi);
++ wlc_lcnphy_clear_trsw_override(pi);
+
+- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
++ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+
+- for (i = 0; i < 11; i++)
+- write_radio_reg(pi, rxiq_cal_rf_reg[i],
+- values_to_save[i]);
++ for (i = 0; i < 11; i++)
++ write_radio_reg(pi, rxiq_cal_rf_reg[i],
++ values_to_save[i]);
+
+- if (tx_gain_override_old)
+- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+- else
+- wlc_lcnphy_disable_tx_gain_override(pi);
++ if (tx_gain_override_old)
++ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
++ else
++ wlc_lcnphy_disable_tx_gain_override(pi);
+
+- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+- wlc_lcnphy_rx_gain_override_enable(pi, false);
++ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
++ wlc_lcnphy_rx_gain_override_enable(pi, false);
++ }
+
+ cal_done:
+ kfree(ptr);
+@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
+ write_radio_reg(pi, RADIO_2064_REG038, 3);
+ write_radio_reg(pi, RADIO_2064_REG091, 7);
+ }
+-
+- if (!(pi->sh->boardflags & BFL_FEM)) {
+- u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
+- 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
+-
+- write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
+- write_radio_reg(pi, RADIO_2064_REG091, 0x3);
+- write_radio_reg(pi, RADIO_2064_REG038, 0x3);
+-
+- write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
+- }
+ }
+
+ static int
+@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
+ } else {
+ mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+- mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
+- mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
+- mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
+- mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
+- mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+- mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
+- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
+- mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
+- mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
+- mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
+ }
+ } else {
+ mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
+@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
+ (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
+
+ mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
+- mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
+ }
+
+ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
+ {
+ struct phytbl_info tab;
+ u32 rfseq, ind;
+- u8 tssi_sel;
+
+ tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
+ tab.tbl_width = 32;
+@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
+
+ mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
+
+- if (pi->sh->boardflags & BFL_FEM) {
+- tssi_sel = 0x1;
+- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+- } else {
+- tssi_sel = 0xe;
+- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
+- }
++ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
+
+ mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
+@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
+ mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
+- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
++ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
+ mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
+ } else {
+- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
+ mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
+ }
+@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
+
+ mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
+
+- mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
+- mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+- mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+-
+ wlc_lcnphy_pwrctrl_rssiparams(pi);
+ }
+
+@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
+ read_radio_reg(pi, RADIO_2064_REG007) & 1;
+ u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
+ u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
+- u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
+-
+ idleTssi = read_phy_reg(pi, 0x4ab);
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
+@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
+ mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
+ mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
+ wlc_lcnphy_tssi_setup(pi);
+-
+- mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
+- mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
+-
+- wlc_lcnphy_set_bbmult(pi, 0x0);
+-
+ wlc_phy_do_dummy_tx(pi, true, OFF);
+ idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
+ >> 0);
+@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
+
+ mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
+
+- wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
+ wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
+ wlc_lcnphy_set_tx_gain(pi, &old_gains);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
+@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
+ wlc_lcnphy_write_table(pi, &tab);
+ tab.tbl_offset++;
+ }
+- mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
+- mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
+- mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
+- mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
+- mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
+
+ mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
+
+@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
+ target_gains.pad_gain = 21;
+ target_gains.dac_gain = 0;
+ wlc_lcnphy_set_tx_gain(pi, &target_gains);
++ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
+
+@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
+ lcnphy_recal ? LCNPHY_CAL_RECAL :
+ LCNPHY_CAL_FULL), false);
+ } else {
+- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
+ wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
+ }
+
+@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ pa_gain = 0x70;
+ else
+- pa_gain = 0x60;
++ pa_gain = 0x70;
+
+ if (pi->sh->boardflags & BFL_FEM)
+ pa_gain = 0x10;
+-
+ tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
+ tab.tbl_width = 32;
+ tab.tbl_len = 1;
+ tab.tbl_ptr = &val;
+
+ for (j = 0; j < 128; j++) {
+- if (pi->sh->boardflags & BFL_FEM)
+- gm_gain = gain_table[j].gm;
+- else
+- gm_gain = 15;
+-
++ gm_gain = gain_table[j].gm;
+ val = (((u32) pa_gain << 24) |
+ (gain_table[j].pad << 16) |
+ (gain_table[j].pga << 8) | gm_gain);
+@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
+
+ write_phy_reg(pi, 0x4ea, 0x4688);
+
+- if (pi->sh->boardflags & BFL_FEM)
+- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+- else
+- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
++ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+
+ mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
+
+@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
+ wlc_lcnphy_rcal(pi);
+
+ wlc_lcnphy_rc_cal(pi);
+-
+- if (!(pi->sh->boardflags & BFL_FEM)) {
+- write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
+- write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+- write_radio_reg(pi, RADIO_2064_REG039, 0xe);
+- }
+-
+ }
+
+ static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
+@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
+ wlc_lcnphy_write_table(pi, &tab);
+ }
+
+- if (!(pi->sh->boardflags & BFL_FEM)) {
+- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+- tab.tbl_width = 16;
+- tab.tbl_ptr = &val;
+- tab.tbl_len = 1;
++ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
++ tab.tbl_width = 16;
++ tab.tbl_ptr = &val;
++ tab.tbl_len = 1;
+
+- val = 150;
+- tab.tbl_offset = 0;
+- wlc_lcnphy_write_table(pi, &tab);
++ val = 114;
++ tab.tbl_offset = 0;
++ wlc_lcnphy_write_table(pi, &tab);
+
+- val = 220;
+- tab.tbl_offset = 1;
+- wlc_lcnphy_write_table(pi, &tab);
+- }
++ val = 130;
++ tab.tbl_offset = 1;
++ wlc_lcnphy_write_table(pi, &tab);
++
++ val = 6;
++ tab.tbl_offset = 8;
++ wlc_lcnphy_write_table(pi, &tab);
+
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if (pi->sh->boardflags & BFL_FEM)
+@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
+ wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
+
+ mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
+- wlc_lcnphy_tssi_setup(pi);
+ }
+
+ void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
+@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
+ if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
+ return false;
+
+- if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
++ if ((pi->sh->boardflags & BFL_FEM) &&
++ (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if (pi_lcn->lcnphy_tempsense_option == 3) {
+ pi->hwpwrctrl = true;
+ pi->hwpwrctrl_capable = true;
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+index b7e95ac..622c01c 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
+ };
+
+ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
+- 0x0009,
+ 0x000a,
+- 0x0005,
+- 0x0006,
+ 0x0009,
+- 0x000a,
+- 0x0005,
+ 0x0006,
+- 0x0009,
+- 0x000a,
+ 0x0005,
+- 0x0006,
+- 0x0009,
+ 0x000a,
+- 0x0005,
+- 0x0006,
+ 0x0009,
+- 0x000a,
+- 0x0005,
+ 0x0006,
+- 0x0009,
+- 0x000a,
+ 0x0005,
+- 0x0006,
+- 0x0009,
+ 0x000a,
+- 0x0005,
+- 0x0006,
+ 0x0009,
+- 0x000a,
+- 0x0005,
+ 0x0006,
+- 0x0009,
+- 0x000a,
+ 0x0005,
+- 0x0006,
+- 0x0009,
+ 0x000a,
+- 0x0005,
+- 0x0006,
+ 0x0009,
+- 0x000a,
+- 0x0005,
+ 0x0006,
+- 0x0009,
+- 0x000a,
+ 0x0005,
+- 0x0006,
++ 0x000a,
+ 0x0009,
++ 0x0006,
++ 0x0005,
+ 0x000a,
++ 0x0009,
++ 0x0006,
+ 0x0005,
++ 0x000a,
++ 0x0009,
+ 0x0006,
++ 0x0005,
++ 0x000a,
+ 0x0009,
++ 0x0006,
++ 0x0005,
+ 0x000a,
++ 0x0009,
++ 0x0006,
+ 0x0005,
++ 0x000a,
++ 0x0009,
+ 0x0006,
++ 0x0005,
++ 0x000a,
+ 0x0009,
++ 0x0006,
++ 0x0005,
+ 0x000a,
++ 0x0009,
++ 0x0006,
+ 0x0005,
++ 0x000a,
++ 0x0009,
+ 0x0006,
++ 0x0005,
++ 0x000a,
+ 0x0009,
++ 0x0006,
++ 0x0005,
+ 0x000a,
++ 0x0009,
++ 0x0006,
+ 0x0005,
++ 0x000a,
++ 0x0009,
+ 0x0006,
++ 0x0005,
+ };
+
+ static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index aec2e0d..1924d8b 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+ linkrate = phy->linkrate;
+ memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
+
++ /* Handle vacant phy - rest of dr data is not valid so skip it */
++ if (phy->phy_state == PHY_VACANT) {
++ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
++ phy->attached_dev_type = NO_DEVICE;
++ if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
++ phy->phy_id = phy_id;
++ goto skip;
++ } else
++ goto out;
++ }
++
+ phy->attached_dev_type = to_dev_type(dr);
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+ goto out;
+@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+ phy->phy->maximum_linkrate = dr->pmax_linkrate;
+ phy->phy->negotiated_linkrate = phy->linkrate;
+
++ skip:
+ if (new_phy)
+ if (sas_phy_add(phy->phy)) {
+ sas_phy_free(phy->phy);
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 7d4ec02..fea564c 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -408,6 +408,7 @@ static inline int core_alua_state_standby(
+ case REPORT_LUNS:
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
++ return 0;
+ case MAINTENANCE_IN:
+ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+@@ -450,6 +451,7 @@ static inline int core_alua_state_unavailable(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+@@ -490,6 +492,7 @@ static inline int core_alua_state_transition(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index c578229..78f1be2 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -934,17 +934,17 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
+ * race with the set_ldisc code path.
+ */
+
+- tty_lock_pair(tty, o_tty);
+ tty_ldisc_halt(tty);
+- tty_ldisc_flush_works(tty);
+- if (o_tty) {
++ if (o_tty)
+ tty_ldisc_halt(o_tty);
++
++ tty_ldisc_flush_works(tty);
++ if (o_tty)
+ tty_ldisc_flush_works(o_tty);
+- }
+
++ tty_lock_pair(tty, o_tty);
+ /* This will need doing differently if we need to lock */
+ tty_ldisc_kill(tty);
+-
+ if (o_tty)
+ tty_ldisc_kill(o_tty);
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 12b3da3..f7199b9 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1546,14 +1546,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ }
+ break;
+ case Opt_blank_pass:
+- vol->password = NULL;
+- break;
+- case Opt_pass:
+ /* passwords have to be handled differently
+ * to allow the character used for deliminator
+ * to be passed within them
+ */
+
++ /*
++ * Check if this is a case where the password
++ * starts with a delimiter
++ */
++ tmp_end = strchr(data, '=');
++ tmp_end++;
++ if (!(tmp_end < end && tmp_end[1] == delim)) {
++ /* No it is not. Set the password to NULL */
++ vol->password = NULL;
++ break;
++ }
++ /* Yes it is. Drop down to Opt_pass below.*/
++ case Opt_pass:
+ /* Obtain the value string */
+ value = strchr(data, '=');
+ value++;
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 991ab2d..7af426b 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -924,8 +924,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
+ cmd = F_SETLK;
+ fl->fl_type = F_UNLCK;
+ }
+- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
++ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
++ if (fl->fl_type == F_UNLCK)
++ posix_lock_file_wait(file, fl);
+ return -EIO;
++ }
+ if (IS_GETLK(cmd))
+ return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
+ else if (fl->fl_type == F_UNLCK)
+diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
+index b7eff07..9afba3d6 100644
+--- a/fs/gfs2/rgrp.c
++++ b/fs/gfs2/rgrp.c
+@@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip)
+ RB_CLEAR_NODE(&ip->i_res->rs_node);
+ out:
+ up_write(&ip->i_rw_mutex);
+- return 0;
++ return error;
+ }
+
+ static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
+diff --git a/fs/inode.c b/fs/inode.c
+index 14084b7..b98540e 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
++ list_move(&inode->i_lru, &sb->s_inode_lru);
+ continue;
+ }
+
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index 25f01d0..b1b1fa6 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -99,7 +99,12 @@ struct mmu_gather {
+ unsigned int need_flush : 1, /* Did free PTEs */
+ fast_mode : 1; /* No batching */
+
+- unsigned int fullmm;
++ /* we are in the middle of an operation to clear
++ * a full mm and can make some optimizations */
++ unsigned int fullmm : 1,
++ /* we have performed an operation which
++ * requires a complete flush of the tlb */
++ need_flush_all : 1;
+
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 92691d8..616603d 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -394,7 +394,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+ ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+-loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
+ int ftrace_regex_release(struct inode *inode, struct file *file);
+
+ void __init
+@@ -567,6 +566,8 @@ static inline int
+ ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+
++loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
++
+ /* totally disable ftrace - can not re-enable after this */
+ void ftrace_kill(void);
+
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 31cd1bf..fede1d0 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
+ goto out_unlock;
+ break;
+ }
++ msg = ERR_PTR(-EAGAIN);
+ } else
+ break;
+ msg_counter++;
+diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
+index c685e31..c3ae144 100644
+--- a/kernel/sched/clock.c
++++ b/kernel/sched/clock.c
+@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
+ u64 this_clock, remote_clock;
+ u64 *ptr, old_val, val;
+
++#if BITS_PER_LONG != 64
++again:
++ /*
++ * Careful here: The local and the remote clock values need to
++ * be read out atomic as we need to compare the values and
++ * then update either the local or the remote side. So the
++ * cmpxchg64 below only protects one readout.
++ *
++ * We must reread via sched_clock_local() in the retry case on
++ * 32bit as an NMI could use sched_clock_local() via the
++ * tracer and hit between the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ this_clock = sched_clock_local(my_scd);
++ /*
++ * We must enforce atomic readout on 32bit, otherwise the
++ * update on the remote cpu can hit inbetween the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ remote_clock = cmpxchg64(&scd->clock, 0, 0);
++#else
++ /*
++ * On 64bit the read of [my]scd->clock is atomic versus the
++ * update, so we can avoid the above 32bit dance.
++ */
+ sched_clock_local(my_scd);
+ again:
+ this_clock = my_scd->clock;
+ remote_clock = scd->clock;
++#endif
+
+ /*
+ * Use the opportunity that we have both locks
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 265b376..47f1d1b 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -323,7 +323,6 @@ void kernel_restart_prepare(char *cmd)
+ system_state = SYSTEM_RESTART;
+ usermodehelper_disable();
+ device_shutdown();
+- syscore_shutdown();
+ }
+
+ /**
+@@ -369,6 +368,7 @@ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+ disable_nonboot_cpus();
++ syscore_shutdown();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+@@ -394,6 +394,7 @@ static void kernel_shutdown_prepare(enum system_states state)
+ void kernel_halt(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_HALT);
++ disable_nonboot_cpus();
+ syscore_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ kmsg_dump(KMSG_DUMP_HALT);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 64bc5d8..35cc3a8 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -668,7 +668,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+ free_page(tmp);
+ }
+
+- free_page((unsigned long)stat->pages);
+ stat->pages = NULL;
+ stat->start = NULL;
+
+@@ -1028,6 +1027,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+
+ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
++loff_t
++ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
++{
++ loff_t ret;
++
++ if (file->f_mode & FMODE_READ)
++ ret = seq_lseek(file, offset, whence);
++ else
++ file->f_pos = ret = 1;
++
++ return ret;
++}
++
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+ #ifndef CONFIG_FTRACE_MCOUNT_RECORD
+@@ -2590,7 +2602,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
+ * routine, you can use ftrace_filter_write() for the write
+ * routine if @flag has FTRACE_ITER_FILTER set, or
+ * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
+- * ftrace_regex_lseek() should be used as the lseek routine, and
++ * ftrace_filter_lseek() should be used as the lseek routine, and
+ * release must call ftrace_regex_release().
+ */
+ int
+@@ -2674,19 +2686,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
+ inode, file);
+ }
+
+-loff_t
+-ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
+-{
+- loff_t ret;
+-
+- if (file->f_mode & FMODE_READ)
+- ret = seq_lseek(file, offset, whence);
+- else
+- file->f_pos = ret = 1;
+-
+- return ret;
+-}
+-
+ static int ftrace_match(char *str, char *regex, int len, int type)
+ {
+ int matched = 0;
+@@ -3549,7 +3548,7 @@ static const struct file_operations ftrace_filter_fops = {
+ .open = ftrace_filter_open,
+ .read = seq_read,
+ .write = ftrace_filter_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3557,7 +3556,7 @@ static const struct file_operations ftrace_notrace_fops = {
+ .open = ftrace_notrace_open,
+ .read = seq_read,
+ .write = ftrace_notrace_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3762,8 +3761,8 @@ static const struct file_operations ftrace_graph_fops = {
+ .open = ftrace_graph_open,
+ .read = seq_read,
+ .write = ftrace_graph_write,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_graph_release,
+- .llseek = seq_lseek,
+ };
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+@@ -4421,7 +4420,7 @@ static const struct file_operations ftrace_pid_fops = {
+ .open = ftrace_pid_open,
+ .write = ftrace_pid_write,
+ .read = seq_read,
+- .llseek = seq_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_pid_release,
+ };
+
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index 42ca822..83a8b5b 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
+ .open = stack_trace_filter_open,
+ .read = seq_read,
+ .write = ftrace_filter_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+diff --git a/lib/kobject.c b/lib/kobject.c
+index e07ee1f..a654866 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
+
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+diff --git a/mm/memory.c b/mm/memory.c
+index bb1369f..f8b734a 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -212,6 +212,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+ tlb->mm = mm;
+
+ tlb->fullmm = fullmm;
++ tlb->need_flush_all = 0;
+ tlb->start = -1UL;
+ tlb->end = 0;
+ tlb->need_flush = 0;
+diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
+index 1440b3f..b54c2e8 100644
+--- a/sound/soc/codecs/wm5102.c
++++ b/sound/soc/codecs/wm5102.c
+@@ -576,7 +576,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
+- struct arizona *arizona = dev_get_drvdata(codec->dev);
++ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct regmap *regmap = codec->control_data;
+ const struct reg_default *patch = NULL;
+ int i, patch_size;
+diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
+index 134e41c..f8a31ad 100644
+--- a/sound/soc/codecs/wm8903.c
++++ b/sound/soc/codecs/wm8903.c
+@@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
+ { "ROP", NULL, "Right Speaker PGA" },
+ { "RON", NULL, "Right Speaker PGA" },
+
++ { "Charge Pump", NULL, "CLK_DSP" },
++
+ { "Left Headphone Output PGA", NULL, "Charge Pump" },
+ { "Right Headphone Output PGA", NULL, "Charge Pump" },
+ { "Left Line Output PGA", NULL, "Charge Pump" },
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 2370063..f3ab918 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -2959,7 +2959,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ val = val << shift;
+
+ ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+- if (ret != 0)
++ if (ret < 0)
+ return ret;
+
+ if (snd_soc_volsw_is_stereo(mc)) {
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 15520de..190f434 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+- 0, cpu_to_le16(wIndex),
++ 0, wIndex,
+ &tmp, sizeof(tmp), 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+- cpu_to_le16(wValue), cpu_to_le16(wIndex),
++ wValue, wIndex,
+ NULL, 0, 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0115289..b9ca776 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
+ {
+ int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- cpu_to_le16(1), 0, NULL, 0, 1000);
++ 1, 0, NULL, 0, 1000);
+
+ if (ret < 0)
+ return ret;
diff --git a/3.8.7/4420_grsecurity-2.9.1-3.8.7-201304122027.patch b/3.8.8/4420_grsecurity-2.9.1-3.8.8-201304181923.patch
index a493e4d..18ab857 100644
--- a/3.8.7/4420_grsecurity-2.9.1-3.8.7-201304122027.patch
+++ b/3.8.8/4420_grsecurity-2.9.1-3.8.8-201304181923.patch
@@ -259,7 +259,7 @@ index 986614d..e8bfedc 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 85204da..9d99250 100644
+index 7684f95..12f2f86 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -3306,7 +3306,7 @@ index b0179b8..829510e 100644
+
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
-index 11c1785..c67d54c 100644
+index 11c1785..1b209f4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,7 +8,11 @@
@@ -3334,17 +3334,26 @@ index 11c1785..c67d54c 100644
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
-@@ -144,6 +153,10 @@ SECTIONS
+@@ -112,6 +121,8 @@ SECTIONS
+ ARM_CPU_KEEP(PROC_INFO)
+ }
+
++ _etext = .; /* End of text section */
++
+ RO_DATA(PAGE_SIZE)
- _etext = .; /* End of text and rodata section */
+ . = ALIGN(4);
+@@ -142,7 +153,9 @@ SECTIONS
+ NOTES
+
+- _etext = .; /* End of text and rodata section */
+#ifdef CONFIG_PAX_KERNEXEC
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
-+
+
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
- __init_begin = .;
@@ -203,6 +216,11 @@ SECTIONS
. = PAGE_OFFSET + TEXT_OFFSET;
#else
@@ -6672,6 +6681,19 @@ index 3d5c9dc..62f8414 100644
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 195ce2a..ab5c614 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -50,7 +50,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 406b7b9..af63426 100644
--- a/arch/powerpc/include/asm/thread_info.h
@@ -10493,7 +10515,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 0694d09..b58b3aa 100644
+index 0694d09..58ea1a1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -238,7 +238,7 @@ config X86_HT
@@ -10548,19 +10570,12 @@ index 0694d09..b58b3aa 100644
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
-@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
- config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
- default "0x1000000"
-+ range 0x400000 0x40000000
- ---help---
- This gives the physical address where the kernel is loaded.
-
-@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
+@@ -1662,6 +1664,8 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned" if X86_32
default "0x1000000"
-+ range 0x400000 0x1000000 if PAX_KERNEXEC
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
range 0x2000 0x1000000
---help---
This value puts the alignment restrictions on physical address
@@ -14375,7 +14390,7 @@ index 320f7bb..e89f8f8 100644
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 5edd174..c395822 100644
+index 7361e47..16dc226 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -564,7 +564,7 @@ static inline pmd_t __pmd(pmdval_t val)
@@ -14406,7 +14421,7 @@ index 5edd174..c395822 100644
static inline void pgd_clear(pgd_t *pgdp)
{
set_pgd(pgdp, __pgd(0));
-@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+@@ -714,6 +726,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
@@ -14428,7 +14443,7 @@ index 5edd174..c395822 100644
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
-@@ -927,7 +954,7 @@ extern void default_banner(void);
+@@ -930,7 +957,7 @@ extern void default_banner(void);
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
@@ -14437,7 +14452,7 @@ index 5edd174..c395822 100644
#endif
#define INTERRUPT_RETURN \
-@@ -1002,6 +1029,21 @@ extern void default_banner(void);
+@@ -1005,6 +1032,21 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
@@ -14460,7 +14475,7 @@ index 5edd174..c395822 100644
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index 142236e..5446ffbc 100644
+index b3b0ec1..b1cd3eb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
@@ -14472,7 +14487,7 @@ index 142236e..5446ffbc 100644
struct pv_lazy_ops {
-@@ -97,7 +97,7 @@ struct pv_time_ops {
+@@ -98,7 +98,7 @@ struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
unsigned long (*get_tsc_khz)(void);
@@ -14481,7 +14496,7 @@ index 142236e..5446ffbc 100644
struct pv_cpu_ops {
/* hooks for various privileged instructions */
-@@ -191,7 +191,7 @@ struct pv_cpu_ops {
+@@ -192,7 +192,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
@@ -14490,7 +14505,7 @@ index 142236e..5446ffbc 100644
struct pv_irq_ops {
/*
-@@ -222,7 +222,7 @@ struct pv_apic_ops {
+@@ -223,7 +223,7 @@ struct pv_apic_ops {
unsigned long start_eip,
unsigned long start_esp);
#endif
@@ -14499,7 +14514,7 @@ index 142236e..5446ffbc 100644
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
-@@ -312,6 +312,7 @@ struct pv_mmu_ops {
+@@ -313,6 +313,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
@@ -14507,7 +14522,7 @@ index 142236e..5446ffbc 100644
#endif /* PAGETABLE_LEVELS == 4 */
#endif /* PAGETABLE_LEVELS >= 3 */
-@@ -323,6 +324,12 @@ struct pv_mmu_ops {
+@@ -324,6 +325,12 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -14520,7 +14535,7 @@ index 142236e..5446ffbc 100644
};
struct arch_spinlock;
-@@ -333,7 +340,7 @@ struct pv_lock_ops {
+@@ -334,7 +341,7 @@ struct pv_lock_ops {
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
int (*spin_trylock)(struct arch_spinlock *lock);
void (*spin_unlock)(struct arch_spinlock *lock);
@@ -19237,7 +19252,7 @@ index 6ed91d9..6cc365b 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index cb3c591..bc63707 100644
+index cb3c591..7ba137c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -59,6 +59,8 @@
@@ -19379,7 +19394,7 @@ index cb3c591..bc63707 100644
+ pax_force_retaddr
+ retq
+
-+2: ljmpq __KERNEL_CS,1f
++2: ljmpq __KERNEL_CS,1b
+3: ljmpq __KERNEXEC_KERNEL_CS,4f
+4: SET_RDI_INTO_CR0
+ jmp 1b
@@ -20180,6 +20195,31 @@ index cb3c591..bc63707 100644
/*
* Check the special variable on the stack to see if NMIs are
* executing.
+@@ -1712,14 +2102,13 @@ nested_nmi:
+
+ 1:
+ /* Set up the interrupted NMIs stack to jump to repeat_nmi */
+- leaq -1*8(%rsp), %rdx
+- movq %rdx, %rsp
++ subq $8, %rsp
+ CFI_ADJUST_CFA_OFFSET 1*8
+ leaq -10*8(%rsp), %rdx
+ pushq_cfi $__KERNEL_DS
+ pushq_cfi %rdx
+ pushfq_cfi
+- pushq_cfi $__KERNEL_CS
++ pushq_cfi 6*8(%rsp)
+ pushq_cfi $repeat_nmi
+
+ /* Put stack back */
+@@ -1731,6 +2120,7 @@ nested_nmi_out:
+ CFI_RESTORE rdx
+
+ /* No need to check faults here */
++ pax_force_retaddr_bts
+ INTERRUPT_RETURN
+
+ CFI_RESTORE_STATE
@@ -1847,6 +2237,17 @@ end_repeat_nmi:
*/
movq %cr2, %r12
@@ -22050,7 +22090,7 @@ index 676b8c7..870ba04 100644
.spin_is_locked = __ticket_spin_is_locked,
.spin_is_contended = __ticket_spin_is_contended,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 17fff18..5cfa0f4 100644
+index 8bfb335..c1463c6 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -22095,8 +22135,8 @@ index 17fff18..5cfa0f4 100644
return insn_len;
}
-@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
- preempt_enable();
+@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return this_cpu_read(paravirt_lazy_mode);
}
-struct pv_info pv_info = {
@@ -22171,7 +22211,7 @@ index 17fff18..5cfa0f4 100644
#endif
#endif /* PAGETABLE_LEVELS >= 3 */
-@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
},
.set_fixmap = native_set_fixmap,
@@ -24530,7 +24570,7 @@ index c243b81..b692af3 100644
kvm_pmu_init(vcpu);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index df4176c..23ce092 100644
+index 20a4fd4..d806083 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
@@ -27471,7 +27511,7 @@ index 903ec1e..c4166b2 100644
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index fb674fd..223a693 100644
+index 4f7d793..165a8be 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,12 +13,19 @@
@@ -27628,7 +27668,7 @@ index fb674fd..223a693 100644
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
-@@ -541,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
@@ -27637,7 +27677,7 @@ index fb674fd..223a693 100644
return 1;
#endif
return 0;
-@@ -568,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
}
static const char nx_warning[] = KERN_CRIT
@@ -27646,7 +27686,7 @@ index fb674fd..223a693 100644
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -577,15 +648,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
@@ -27676,7 +27716,7 @@ index fb674fd..223a693 100644
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
-@@ -748,6 +831,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
@@ -27699,7 +27739,7 @@ index fb674fd..223a693 100644
/* Kernel addresses are always protection faults: */
if (address >= TASK_SIZE)
error_code |= PF_PROT;
-@@ -833,7 +932,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
@@ -27708,7 +27748,7 @@ index fb674fd..223a693 100644
code = BUS_MCEERR_AR;
}
#endif
-@@ -896,6 +995,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
return 1;
}
@@ -27808,7 +27848,7 @@ index fb674fd..223a693 100644
/*
* Handle a spurious fault caused by a stale TLB entry.
*
-@@ -968,6 +1160,9 @@ int show_unhandled_signals = 1;
+@@ -970,6 +1162,9 @@ int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
@@ -27818,7 +27858,7 @@ index fb674fd..223a693 100644
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -996,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
+@@ -998,7 +1193,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (error_code & PF_USER)
return false;
@@ -27827,7 +27867,7 @@ index fb674fd..223a693 100644
return false;
return true;
-@@ -1012,18 +1207,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1014,18 +1209,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -27866,7 +27906,7 @@ index fb674fd..223a693 100644
/*
* Detect and handle instructions that would cause a page fault for
-@@ -1084,7 +1294,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1086,7 +1296,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
@@ -27875,7 +27915,7 @@ index fb674fd..223a693 100644
local_irq_enable();
error_code |= PF_USER;
} else {
-@@ -1146,6 +1356,11 @@ retry:
+@@ -1148,6 +1358,11 @@ retry:
might_sleep();
}
@@ -27887,7 +27927,7 @@ index fb674fd..223a693 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1157,18 +1372,24 @@ retry:
+@@ -1159,18 +1374,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -27923,7 +27963,7 @@ index fb674fd..223a693 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1232,3 +1453,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1234,3 +1455,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
__do_page_fault(regs, error_code);
exception_exit(regs);
}
@@ -29393,10 +29433,10 @@ index 9f0614d..92ae64a 100644
p += get_opcode(p, &opcode);
for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index e27fbf8..8b56dc9 100644
+index 395b3b4a..213e72b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
-@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
+@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
list_del(&page->lru);
}
@@ -29463,7 +29503,7 @@ index e27fbf8..8b56dc9 100644
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
-@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
pgd_list_del(pgd);
spin_unlock(&pgd_lock);
}
@@ -29471,7 +29511,7 @@ index e27fbf8..8b56dc9 100644
/*
* List of all pgd's needed for non-PAE so it can invalidate entries
-@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
* -- nyc
*/
@@ -29480,7 +29520,7 @@ index e27fbf8..8b56dc9 100644
/*
* In PAE mode, we need to do a cr3 reload (=tlb flush) when
* updating the top-level pagetable entries to guarantee the
-@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
+@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
* not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
* and initialize the kernel pmds here.
*/
@@ -29489,7 +29529,7 @@ index e27fbf8..8b56dc9 100644
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
-@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
+@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
*/
flush_tlb_mm(mm);
}
@@ -29539,7 +29579,7 @@ index e27fbf8..8b56dc9 100644
return -ENOMEM;
}
-@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
+@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
@@ -29612,7 +29652,7 @@ index e27fbf8..8b56dc9 100644
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
-@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
mm->pgd = pgd;
@@ -29626,7 +29666,7 @@ index e27fbf8..8b56dc9 100644
/*
* Make sure that pre-populating the pmds is atomic with
-@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
spin_lock(&pgd_lock);
pgd_ctor(mm, pgd);
@@ -29644,7 +29684,7 @@ index e27fbf8..8b56dc9 100644
out_free_pgd:
free_page((unsigned long)pgd);
out:
-@@ -295,7 +356,7 @@ out:
+@@ -302,7 +363,7 @@ out:
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
@@ -31562,7 +31602,7 @@ index 2262003..3ee61cf 100644
};
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 01de35c..692023f 100644
+index cab96b6..8c629ba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1739,14 +1739,18 @@ static void *m2v(phys_addr_t maddr)
@@ -39861,7 +39901,7 @@ index 4c83003..2a2a5b9 100644
break;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index 4d6f3c5..6169e60 100644
+index 4d6f3c5..449bc5c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
@@ -39873,6 +39913,18 @@ index 4d6f3c5..6169e60 100644
}
void be_parse_stats(struct be_adapter *adapter)
+@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+- __vlan_put_tag(skb, vlan_tag);
+- skb->vlan_tci = 0;
++ skb = __vlan_put_tag(skb, vlan_tag);
++ if (skb)
++ skb->vlan_tci = 0;
+ }
+
+ return skb;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 74d749e..eefb1bd 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
@@ -40290,9 +40342,18 @@ index 8efe47a..a8075c5 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index cb95fe5..a5bdab5 100644
+index cb95fe5..16909e2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
+@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+
+ if (tun->flags & TUN_TAP_MQ &&
+ (tun->numqueues + tun->numdisabled > 1))
+- return err;
++ return -EBUSY;
+ }
+ else {
+ char *name;
@@ -1838,7 +1838,7 @@ unlock:
}
@@ -40312,6 +40373,19 @@ index cb95fe5..a5bdab5 100644
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index 16c8429..6bd9167 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
+ goto error;
+
+ if (skb) {
+- if (skb->len <= sizeof(ETH_HLEN))
++ if (skb->len <= ETH_HLEN)
+ goto error;
+
+ /* mapping VLANs to MBIM sessions:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cd8ccb2..cff5144 100644
--- a/drivers/net/usb/hso.c
@@ -44281,7 +44355,7 @@ index da9fde8..c07975f 100644
/*
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
-index c578229..45aa9ee 100644
+index 78f1be2..3e98910 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
@@ -50731,10 +50805,10 @@ index b2a34a1..162fa69 100644
return rc;
}
diff --git a/fs/exec.c b/fs/exec.c
-index 20df02c..1b1d946 100644
+index 20df02c..9a87617 100644
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -55,6 +55,17 @@
+@@ -55,8 +55,20 @@
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
@@ -50751,8 +50825,11 @@ index 20df02c..1b1d946 100644
+#include <trace/events/fs.h>
#include <asm/uaccess.h>
++#include <asm/sections.h>
#include <asm/mmu_context.h>
-@@ -66,6 +77,18 @@
+ #include <asm/tlb.h>
+
+@@ -66,6 +78,18 @@
#include <trace/events/sched.h>
@@ -50771,7 +50848,7 @@ index 20df02c..1b1d946 100644
int suid_dumpable = 0;
static LIST_HEAD(formats);
-@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
+@@ -75,8 +99,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
{
BUG_ON(!fmt);
write_lock(&binfmt_lock);
@@ -50782,7 +50859,7 @@ index 20df02c..1b1d946 100644
write_unlock(&binfmt_lock);
}
-@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
+@@ -85,7 +109,7 @@ EXPORT_SYMBOL(__register_binfmt);
void unregister_binfmt(struct linux_binfmt * fmt)
{
write_lock(&binfmt_lock);
@@ -50791,7 +50868,7 @@ index 20df02c..1b1d946 100644
write_unlock(&binfmt_lock);
}
-@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+@@ -180,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
@@ -50813,7 +50890,7 @@ index 20df02c..1b1d946 100644
return NULL;
if (write) {
-@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+@@ -207,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (size <= ARG_MAX)
return page;
@@ -50831,7 +50908,7 @@ index 20df02c..1b1d946 100644
/*
* Limit to 1/4-th the stack size for the argv+env strings.
* This ensures that:
-@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -266,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
vma->vm_end = STACK_TOP_MAX;
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
@@ -50843,7 +50920,7 @@ index 20df02c..1b1d946 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
INIT_LIST_HEAD(&vma->anon_vma_chain);
-@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
+@@ -276,6 +308,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
mm->stack_vm = mm->total_vm = 1;
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
@@ -50856,7 +50933,7 @@ index 20df02c..1b1d946 100644
return 0;
err:
up_write(&mm->mmap_sem);
-@@ -396,7 +433,7 @@ struct user_arg_ptr {
+@@ -396,7 +434,7 @@ struct user_arg_ptr {
} ptr;
};
@@ -50865,7 +50942,7 @@ index 20df02c..1b1d946 100644
{
const char __user *native;
-@@ -405,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
+@@ -405,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
compat_uptr_t compat;
if (get_user(compat, argv.ptr.compat + nr))
@@ -50882,7 +50959,7 @@ index 20df02c..1b1d946 100644
return native;
}
-@@ -431,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
+@@ -431,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
if (!p)
break;
@@ -50891,7 +50968,7 @@ index 20df02c..1b1d946 100644
return -EFAULT;
if (i >= max)
-@@ -466,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
+@@ -466,7 +504,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
ret = -EFAULT;
str = get_user_arg_ptr(argv, argc);
@@ -50900,7 +50977,7 @@ index 20df02c..1b1d946 100644
goto out;
len = strnlen_user(str, MAX_ARG_STRLEN);
-@@ -548,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
+@@ -548,7 +586,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
int r;
mm_segment_t oldfs = get_fs();
struct user_arg_ptr argv = {
@@ -50909,7 +50986,7 @@ index 20df02c..1b1d946 100644
};
set_fs(KERNEL_DS);
-@@ -583,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -583,7 +621,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long new_end = old_end - shift;
struct mmu_gather tlb;
@@ -50919,7 +50996,7 @@ index 20df02c..1b1d946 100644
/*
* ensure there are no vmas between where we want to go
-@@ -592,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+@@ -592,6 +631,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
if (vma != find_vma(mm, new_start))
return -EFAULT;
@@ -50930,7 +51007,7 @@ index 20df02c..1b1d946 100644
/*
* cover the whole range: [new_start, old_end)
*/
-@@ -672,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -672,10 +715,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
stack_top = arch_align_stack(stack_top);
stack_top = PAGE_ALIGN(stack_top);
@@ -50941,7 +51018,7 @@ index 20df02c..1b1d946 100644
stack_shift = vma->vm_end - stack_top;
bprm->p -= stack_shift;
-@@ -687,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -687,8 +726,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->exec -= stack_shift;
down_write(&mm->mmap_sem);
@@ -50970,7 +51047,7 @@ index 20df02c..1b1d946 100644
/*
* Adjust stack execute permissions; explicitly enable for
* EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
-@@ -707,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -707,13 +766,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
BUG_ON(prev != vma);
@@ -50984,7 +51061,7 @@ index 20df02c..1b1d946 100644
/* mprotect_fixup is overkill to remove the temporary stack flags */
vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
-@@ -737,6 +788,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
+@@ -737,6 +789,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
#endif
current->mm->start_stack = bprm->p;
ret = expand_stack(vma, stack_base);
@@ -51012,7 +51089,7 @@ index 20df02c..1b1d946 100644
if (ret)
ret = -EFAULT;
-@@ -772,6 +844,8 @@ struct file *open_exec(const char *name)
+@@ -772,6 +845,8 @@ struct file *open_exec(const char *name)
fsnotify_open(file);
@@ -51021,7 +51098,7 @@ index 20df02c..1b1d946 100644
err = deny_write_access(file);
if (err)
goto exit;
-@@ -795,7 +869,7 @@ int kernel_read(struct file *file, loff_t offset,
+@@ -795,7 +870,7 @@ int kernel_read(struct file *file, loff_t offset,
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
@@ -51030,7 +51107,7 @@ index 20df02c..1b1d946 100644
set_fs(old_fs);
return result;
}
-@@ -1247,7 +1321,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1247,7 +1322,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -51039,7 +51116,7 @@ index 20df02c..1b1d946 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
} else {
res = -EAGAIN;
-@@ -1447,6 +1521,31 @@ int search_binary_handler(struct linux_binprm *bprm)
+@@ -1447,6 +1522,31 @@ int search_binary_handler(struct linux_binprm *bprm)
EXPORT_SYMBOL(search_binary_handler);
@@ -51071,7 +51148,7 @@ index 20df02c..1b1d946 100644
/*
* sys_execve() executes a new program.
*/
-@@ -1454,6 +1553,11 @@ static int do_execve_common(const char *filename,
+@@ -1454,6 +1554,11 @@ static int do_execve_common(const char *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp)
{
@@ -51083,7 +51160,7 @@ index 20df02c..1b1d946 100644
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
-@@ -1461,6 +1565,8 @@ static int do_execve_common(const char *filename,
+@@ -1461,6 +1566,8 @@ static int do_execve_common(const char *filename,
int retval;
const struct cred *cred = current_cred();
@@ -51092,7 +51169,7 @@ index 20df02c..1b1d946 100644
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
-@@ -1501,12 +1607,27 @@ static int do_execve_common(const char *filename,
+@@ -1501,12 +1608,27 @@ static int do_execve_common(const char *filename,
if (IS_ERR(file))
goto out_unmark;
@@ -51120,7 +51197,7 @@ index 20df02c..1b1d946 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_file;
-@@ -1523,24 +1644,65 @@ static int do_execve_common(const char *filename,
+@@ -1523,24 +1645,65 @@ static int do_execve_common(const char *filename,
if (retval < 0)
goto out;
@@ -51190,7 +51267,7 @@ index 20df02c..1b1d946 100644
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
-@@ -1549,6 +1711,14 @@ static int do_execve_common(const char *filename,
+@@ -1549,6 +1712,14 @@ static int do_execve_common(const char *filename,
put_files_struct(displaced);
return retval;
@@ -51205,7 +51282,7 @@ index 20df02c..1b1d946 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1697,3 +1867,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1697,3 +1868,278 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return error;
}
#endif
@@ -51417,6 +51494,24 @@ index 20df02c..1b1d946 100644
+}
+#endif
+
++#ifdef CONFIG_PAX_USERCOPY
++static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
++{
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long textlow = ktla_ktva((unsigned long)_stext);
++ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
++#else
++ unsigned long textlow = _stext;
++ unsigned long texthigh = _etext;
++#endif
++
++ if (high < textlow || low > texthigh)
++ return false;
++ else
++ return true;
++}
++#endif
++
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+
@@ -51428,9 +51523,16 @@ index 20df02c..1b1d946 100644
+
+ type = check_heap_object(ptr, n);
+ if (!type) {
-+ if (check_stack_object(ptr, n) != -1)
++ int ret = check_stack_object(ptr, n);
++ if (ret == 1 || ret == 2)
+ return;
-+ type = "<process stack>";
++ if (ret == 0) {
++ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
++ type = "<kernel text>";
++ else
++ return;
++ } else
++ type = "<process stack>";
+ }
+
+ pax_report_usercopy(ptr, n, to_user, type);
@@ -53368,6 +53470,19 @@ index 2b6f569..fcb4d1f 100644
if (!IS_ERR(s))
kfree(s);
}
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index eba76eab..fc8ddc1 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 78bde32..767e906 100644
--- a/fs/hugetlbfs/inode.c
@@ -53419,7 +53534,7 @@ index 78bde32..767e906 100644
static int can_do_hugetlb_shm(void)
{
diff --git a/fs/inode.c b/fs/inode.c
-index 14084b7..29af1d9 100644
+index b98540e..6a439ea 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
@@ -69826,6 +69941,19 @@ index f3b99e1..9b73cee 100644
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 85ac9b9b..e5759ab 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
+-static inline resource_size_t resource_size(const struct resource *res)
++static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
+ }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fdf2c4a..5332486 100644
--- a/include/linux/irq.h
@@ -74139,7 +74267,7 @@ index f3f40dc..ffe5a3a 100644
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
diff --git a/ipc/msg.c b/ipc/msg.c
-index 31cd1bf..9778e0f8 100644
+index fede1d0..9778e0f8 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
@@ -74167,14 +74295,6 @@ index 31cd1bf..9778e0f8 100644
msg_params.key = key;
msg_params.flg = msgflg;
-@@ -872,6 +873,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
- goto out_unlock;
- break;
- }
-+ msg = ERR_PTR(-EAGAIN);
- } else
- break;
- msg_counter++;
diff --git a/ipc/sem.c b/ipc/sem.c
index 58d31f1..cce7a55 100644
--- a/ipc/sem.c
@@ -78376,7 +78496,7 @@ index 81fa536..6ccf96a 100644
int this_cpu = smp_processor_id();
struct rq *this_rq = cpu_rq(this_cpu);
diff --git a/kernel/signal.c b/kernel/signal.c
-index dec9c30..d1da15b 100644
+index dec9c30..92c8f65 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
@@ -78502,6 +78622,15 @@ index dec9c30..d1da15b 100644
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, info, p);
/*
+@@ -2880,7 +2911,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+
+ static int do_tkill(pid_t tgid, pid_t pid, int sig)
+ {
+- struct siginfo info;
++ struct siginfo info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
@@ -3138,8 +3169,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
}
seg = get_fs();
@@ -78664,7 +78793,7 @@ index 2f194e9..2c05ea9 100644
.priority = 10,
};
diff --git a/kernel/sys.c b/kernel/sys.c
-index 265b376..4e42ef5 100644
+index 47f1d1b..04c769e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -78680,7 +78809,7 @@ index 265b376..4e42ef5 100644
no_nice = security_task_setnice(p, niceval);
if (no_nice) {
error = no_nice;
-@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
+@@ -596,6 +602,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
goto error;
}
@@ -78690,7 +78819,7 @@ index 265b376..4e42ef5 100644
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
new->sgid = new->egid;
-@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
+@@ -631,6 +640,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
old = current_cred();
retval = -EPERM;
@@ -78701,7 +78830,7 @@ index 265b376..4e42ef5 100644
if (nsown_capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
-@@ -647,7 +660,7 @@ error:
+@@ -648,7 +661,7 @@ error:
/*
* change the user struct in a credentials set to match the new UID
*/
@@ -78710,7 +78839,7 @@ index 265b376..4e42ef5 100644
{
struct user_struct *new_user;
-@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
+@@ -728,6 +741,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error;
}
@@ -78720,7 +78849,7 @@ index 265b376..4e42ef5 100644
if (!uid_eq(new->uid, old->uid)) {
retval = set_user(new);
if (retval < 0)
-@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
+@@ -778,6 +794,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
old = current_cred();
retval = -EPERM;
@@ -78733,7 +78862,7 @@ index 265b376..4e42ef5 100644
if (nsown_capable(CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
-@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
+@@ -847,6 +869,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error;
}
@@ -78743,7 +78872,7 @@ index 265b376..4e42ef5 100644
if (ruid != (uid_t) -1) {
new->uid = kruid;
if (!uid_eq(kruid, old->uid)) {
-@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
+@@ -929,6 +954,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
goto error;
}
@@ -78753,17 +78882,16 @@ index 265b376..4e42ef5 100644
if (rgid != (gid_t) -1)
new->gid = krgid;
if (egid != (gid_t) -1)
-@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
- if (!uid_valid(kuid))
- return old_fsuid;
-
-+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
-+ goto error;
+@@ -990,12 +1018,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
+ nsown_capable(CAP_SETUID)) {
+ if (!uid_eq(kuid, old->fsuid)) {
++ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
++ goto error;
+
- new = prepare_creds();
- if (!new)
- return old_fsuid;
-@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
+ new->fsuid = kuid;
+ if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
+ goto change_okay;
}
}
@@ -78771,7 +78899,7 @@ index 265b376..4e42ef5 100644
abort_creds(new);
return old_fsuid;
-@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
+@@ -1028,12 +1060,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
nsown_capable(CAP_SETGID)) {
@@ -78788,7 +78916,7 @@ index 265b376..4e42ef5 100644
abort_creds(new);
return old_fsgid;
-@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+@@ -1341,19 +1377,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
return -EFAULT;
down_read(&uts_sem);
@@ -78813,7 +78941,7 @@ index 265b376..4e42ef5 100644
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
-@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+@@ -2027,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = get_dumpable(me->mm);
break;
case PR_SET_DUMPABLE:
@@ -79374,18 +79502,10 @@ index c0bd030..62a1927 100644
ret = -EIO;
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 64bc5d8..1ed69e2 100644
+index 35cc3a8..2a47da3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -668,7 +668,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
- free_page(tmp);
- }
-
-- free_page((unsigned long)stat->pages);
- stat->pages = NULL;
- stat->start = NULL;
-
-@@ -1874,12 +1873,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -1886,12 +1886,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -79405,7 +79525,7 @@ index 64bc5d8..1ed69e2 100644
}
/*
-@@ -2965,7 +2969,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+@@ -2964,7 +2969,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
@@ -79414,7 +79534,7 @@ index 64bc5d8..1ed69e2 100644
{
struct ftrace_func_probe *entry;
struct ftrace_page *pg;
-@@ -3832,8 +3836,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -3831,8 +3836,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -79425,7 +79545,7 @@ index 64bc5d8..1ed69e2 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -4555,8 +4561,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+@@ -4554,8 +4561,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_graph_active;
@@ -79434,7 +79554,7 @@ index 64bc5d8..1ed69e2 100644
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
{
return 0;
-@@ -4700,6 +4704,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+@@ -4699,6 +4704,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
@@ -79445,7 +79565,7 @@ index 64bc5d8..1ed69e2 100644
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
-@@ -4713,7 +4721,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+@@ -4712,7 +4721,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
goto out;
}
@@ -79884,7 +80004,7 @@ index 194d796..76edb8f 100644
key = event->type & (EVENT_HASHSIZE - 1);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
-index 42ca822..cdcacc6 100644
+index 83a8b5b..0bf39a9 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -52,7 +52,7 @@ static inline void check_stack(void)
@@ -80235,10 +80355,10 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index e07ee1f..998489d 100644
+index a654866..a4fd13d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
@@ -81071,10 +81191,10 @@ index c6e4dd3..1f41988 100644
/* keep elevated page count for bad page */
return ret;
diff --git a/mm/memory.c b/mm/memory.c
-index bb1369f..b9631d2 100644
+index f8b734a..38014f5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -434,6 +434,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
free_pte_range(tlb, pmd, addr);
} while (pmd++, addr = next, addr != end);
@@ -81082,7 +81202,7 @@ index bb1369f..b9631d2 100644
start &= PUD_MASK;
if (start < floor)
return;
-@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -448,6 +449,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
@@ -81091,7 +81211,7 @@ index bb1369f..b9631d2 100644
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -467,6 +470,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} while (pud++, addr = next, addr != end);
@@ -81099,7 +81219,7 @@ index bb1369f..b9631d2 100644
start &= PGDIR_MASK;
if (start < floor)
return;
-@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -481,6 +485,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
@@ -81108,7 +81228,7 @@ index bb1369f..b9631d2 100644
}
/*
-@@ -1618,12 +1624,6 @@ no_page_table:
+@@ -1619,12 +1625,6 @@ no_page_table:
return page;
}
@@ -81121,7 +81241,7 @@ index bb1369f..b9631d2 100644
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
-@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1710,10 +1710,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i = 0;
@@ -81134,7 +81254,7 @@ index bb1369f..b9631d2 100644
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
pgd_t *pgd;
-@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1761,7 +1761,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
}
@@ -81143,7 +81263,7 @@ index bb1369f..b9631d2 100644
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,11 +1788,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
@@ -81155,7 +81275,7 @@ index bb1369f..b9631d2 100644
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
-@@ -1865,7 +1860,7 @@ next_page:
+@@ -1866,7 +1861,7 @@ next_page:
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -81164,7 +81284,7 @@ index bb1369f..b9631d2 100644
return i;
}
EXPORT_SYMBOL(__get_user_pages);
-@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2073,6 +2068,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -81175,7 +81295,7 @@ index bb1369f..b9631d2 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2117,9 +2116,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -81197,7 +81317,7 @@ index bb1369f..b9631d2 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2202,6 +2213,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -81205,7 +81325,7 @@ index bb1369f..b9631d2 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2402,7 +2414,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -81216,7 +81336,7 @@ index bb1369f..b9631d2 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2422,7 +2436,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -81227,7 +81347,7 @@ index bb1369f..b9631d2 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2510,6 +2526,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
copy_user_highpage(dst, src, va, vma);
}
@@ -81414,7 +81534,7 @@ index bb1369f..b9631d2 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2725,6 +2921,12 @@ gotten:
+@@ -2726,6 +2922,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -81427,7 +81547,7 @@ index bb1369f..b9631d2 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2776,6 +2978,10 @@ gotten:
+@@ -2777,6 +2979,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -81438,7 +81558,7 @@ index bb1369f..b9631d2 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3052,6 +3258,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -81450,7 +81570,7 @@ index bb1369f..b9631d2 100644
unlock_page(page);
if (swapcache) {
/*
-@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3075,6 +3286,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -81462,7 +81582,7 @@ index bb1369f..b9631d2 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -3093,40 +3309,6 @@ out_release:
+@@ -3094,40 +3310,6 @@ out_release:
}
/*
@@ -81503,7 +81623,7 @@ index bb1369f..b9631d2 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3136,27 +3318,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -81536,7 +81656,7 @@ index bb1369f..b9631d2 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3175,6 +3353,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -81548,7 +81668,7 @@ index bb1369f..b9631d2 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -3181,6 +3364,12 @@ setpte:
+@@ -3182,6 +3365,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -81561,7 +81681,7 @@ index bb1369f..b9631d2 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3325,6 +3514,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -81574,7 +81694,7 @@ index bb1369f..b9631d2 100644
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3344,6 +3539,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -81589,7 +81709,7 @@ index bb1369f..b9631d2 100644
} else {
if (cow_page)
mem_cgroup_uncharge_page(cow_page);
-@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3665,6 +3868,12 @@ int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -81602,7 +81722,7 @@ index bb1369f..b9631d2 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3681,6 +3890,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -81613,7 +81733,7 @@ index bb1369f..b9631d2 100644
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3692,6 +3905,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -81648,7 +81768,7 @@ index bb1369f..b9631d2 100644
retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
-@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3790,6 +4031,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -81672,7 +81792,7 @@ index bb1369f..b9631d2 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3819,11 +4077,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3820,11 +4078,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -81710,7 +81830,7 @@ index bb1369f..b9631d2 100644
struct vm_area_struct * vma;
vma = find_vma(current->mm, addr);
-@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
+@@ -3857,7 +4139,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -81719,7 +81839,7 @@ index bb1369f..b9631d2 100644
return 0;
}
-@@ -3990,8 +4272,8 @@ out:
+@@ -3991,8 +4273,8 @@ out:
return ret;
}
@@ -81730,7 +81850,7 @@ index bb1369f..b9631d2 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -4016,8 +4298,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4017,8 +4299,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -81741,7 +81861,7 @@ index bb1369f..b9631d2 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -4025,7 +4307,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4026,7 +4308,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -81750,7 +81870,7 @@ index bb1369f..b9631d2 100644
void *maddr;
struct page *page = NULL;
-@@ -4084,8 +4366,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4085,8 +4367,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -81761,7 +81881,7 @@ index bb1369f..b9631d2 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -4095,11 +4377,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4096,11 +4378,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -86649,9 +86769,30 @@ index a8e4f26..25e5f40 100644
#endif
if (dflt != &ipv4_devconf_dflt)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
-index 3b4f0cd..8cb864c 100644
+index 3b4f0cd..a6ba66e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
+@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ /* skb is pure payload to encrypt */
+
+- err = -ENOMEM;
+-
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
return;
@@ -87174,6 +87315,21 @@ index a0fcc47..32e2c89 100644
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index b236ef04..f962f19 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -348,8 +348,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
++ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, th->source, th->dest);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d84400b..62e066e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
@@ -87438,6 +87594,25 @@ index f35f2df..ccb5ca6 100644
} else if (fastopen) { /* received a valid RST pkt */
reqsk_fastopen_remove(sk, req, true);
tcp_reset(sk);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 17d659e..a9f50ee 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ */
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
+- /* make sure skb->data is aligned on arches that require it */
+- if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
++ /* make sure skb->data is aligned on arches that require it
++ * and check if ack-trimming & collapsing extended the headroom
++ * beyond what csum_start can cover.
++ */
++ if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
++ skb_headroom(skb) >= 0xFFFF)) {
+ struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 4526fe6..1a34e43 100644
--- a/net/ipv4/tcp_probe.c
@@ -91324,10 +91499,10 @@ index e4fd45b..2eeb5c4 100644
shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
diff --git a/security/Kconfig b/security/Kconfig
-index e9c6ac7..20df9f1 100644
+index e9c6ac7..4cb4ecc 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,944 @@
+@@ -4,6 +4,943 @@
menu "Security options"
@@ -92167,7 +92342,7 @@ index e9c6ac7..20df9f1 100644
+config PAX_CONSTIFY_PLUGIN
+ bool "Automatically constify eligible structures"
+ default y
-+ depends on !UML
++ depends on !UML && PAX_KERNEXEC
+ help
+ By saying Y here the compiler will automatically constify a class
+ of types that contain only function pointers. This reduces the
@@ -92213,7 +92388,6 @@ index e9c6ac7..20df9f1 100644
+ Since this has a negligible performance impact, you should enable
+ this feature.
+
-+
+config PAX_USERCOPY_DEBUG
+ bool
+ depends on X86 && PAX_USERCOPY
@@ -92272,7 +92446,7 @@ index e9c6ac7..20df9f1 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1041,7 @@ config INTEL_TXT
+@@ -103,7 +1040,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/3.8.7/4425_grsec_remove_EI_PAX.patch b/3.8.8/4425_grsec_remove_EI_PAX.patch
index 7d06ac2..7d06ac2 100644
--- a/3.8.7/4425_grsec_remove_EI_PAX.patch
+++ b/3.8.8/4425_grsec_remove_EI_PAX.patch
diff --git a/3.8.7/4430_grsec-remove-localversion-grsec.patch b/3.8.8/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.8.7/4430_grsec-remove-localversion-grsec.patch
+++ b/3.8.8/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.8.7/4435_grsec-mute-warnings.patch b/3.8.8/4435_grsec-mute-warnings.patch
index ed941d5..ed941d5 100644
--- a/3.8.7/4435_grsec-mute-warnings.patch
+++ b/3.8.8/4435_grsec-mute-warnings.patch
diff --git a/3.8.7/4440_grsec-remove-protected-paths.patch b/3.8.8/4440_grsec-remove-protected-paths.patch
index 637934a..637934a 100644
--- a/3.8.7/4440_grsec-remove-protected-paths.patch
+++ b/3.8.8/4440_grsec-remove-protected-paths.patch
diff --git a/3.8.7/4450_grsec-kconfig-default-gids.patch b/3.8.8/4450_grsec-kconfig-default-gids.patch
index 7c20c40..7c20c40 100644
--- a/3.8.7/4450_grsec-kconfig-default-gids.patch
+++ b/3.8.8/4450_grsec-kconfig-default-gids.patch
diff --git a/3.8.7/4465_selinux-avc_audit-log-curr_ip.patch b/3.8.8/4465_selinux-avc_audit-log-curr_ip.patch
index 0a309c8..0a309c8 100644
--- a/3.8.7/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.8.8/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.8.7/4470_disable-compat_vdso.patch b/3.8.8/4470_disable-compat_vdso.patch
index 3ef36aa..3ef36aa 100644
--- a/3.8.7/4470_disable-compat_vdso.patch
+++ b/3.8.8/4470_disable-compat_vdso.patch