summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2016-11-28 14:23:36 -0500
committerAnthony G. Basile <blueness@gentoo.org>2016-11-28 14:23:36 -0500
commit58e7d4d2f3ab534cfb4a714a2654b406abdab3a1 (patch)
tree8e7a4f62c30af2f3030abc096f0795dfceee68e5
parentgrsecurity-3.1-4.8.10-201611232213 (diff)
downloadhardened-patchset-20161127.tar.gz
hardened-patchset-20161127.tar.bz2
hardened-patchset-20161127.zip
grsecurity-3.1-4.8.11-20161127122520161127
-rw-r--r--4.8.11/0000_README (renamed from 4.8.10/0000_README)6
-rw-r--r--4.8.11/1008_linux-4.8.9.patch (renamed from 4.8.10/1008_linux-4.8.9.patch)0
-rw-r--r--4.8.11/1009_linux-4.8.10.patch (renamed from 4.8.10/1009_linux-4.8.10.patch)0
-rw-r--r--4.8.11/1010_linux-4.8.11.patch2351
-rw-r--r--4.8.11/4420_grsecurity-3.1-4.8.11-201611271225.patch (renamed from 4.8.10/4420_grsecurity-3.1-4.8.10-201611232213.patch)594
-rw-r--r--4.8.11/4425_grsec_remove_EI_PAX.patch (renamed from 4.8.10/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.8.11/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.8.10/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.8.11/4430_grsec-remove-localversion-grsec.patch (renamed from 4.8.10/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.8.11/4435_grsec-mute-warnings.patch (renamed from 4.8.10/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.8.11/4440_grsec-remove-protected-paths.patch (renamed from 4.8.10/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.8.11/4450_grsec-kconfig-default-gids.patch (renamed from 4.8.10/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.8.11/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.8.10/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.8.11/4470_disable-compat_vdso.patch (renamed from 4.8.10/4470_disable-compat_vdso.patch)0
-rw-r--r--4.8.11/4475_emutramp_default_on.patch (renamed from 4.8.10/4475_emutramp_default_on.patch)0
14 files changed, 2848 insertions, 103 deletions
diff --git a/4.8.10/0000_README b/4.8.11/0000_README
index c32fdff..a5a0953 100644
--- a/4.8.10/0000_README
+++ b/4.8.11/0000_README
@@ -10,7 +10,11 @@ Patch: 1009_linux-4.8.10.patch
From: http://www.kernel.org
Desc: Linux 4.8.10
-Patch: 4420_grsecurity-3.1-4.8.10-201611232213.patch
+Patch: 1010_linux-4.8.11.patch
+From: http://www.kernel.org
+Desc: Linux 4.8.11
+
+Patch: 4420_grsecurity-3.1-4.8.11-201611271225.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.8.10/1008_linux-4.8.9.patch b/4.8.11/1008_linux-4.8.9.patch
index 2f909eb..2f909eb 100644
--- a/4.8.10/1008_linux-4.8.9.patch
+++ b/4.8.11/1008_linux-4.8.9.patch
diff --git a/4.8.10/1009_linux-4.8.10.patch b/4.8.11/1009_linux-4.8.10.patch
index 1e751e5..1e751e5 100644
--- a/4.8.10/1009_linux-4.8.10.patch
+++ b/4.8.11/1009_linux-4.8.10.patch
diff --git a/4.8.11/1010_linux-4.8.11.patch b/4.8.11/1010_linux-4.8.11.patch
new file mode 100644
index 0000000..5c67d71
--- /dev/null
+++ b/4.8.11/1010_linux-4.8.11.patch
@@ -0,0 +1,2351 @@
+diff --git a/Makefile b/Makefile
+index 7cf2b49..2b1bcba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+- -std=gnu89
++ -std=gnu89 $(call cc-option,-fno-PIE)
++
+
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+-KBUILD_AFLAGS := -D__ASSEMBLY__
++KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS_MODULE := -DMODULE
+ KBUILD_CFLAGS_MODULE := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+@@ -621,6 +622,7 @@ include arch/$(SRCARCH)/Makefile
+
+ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
+ KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS += -Os
+diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
+index dec4b07..3799396 100644
+--- a/arch/arm/boot/dts/imx53-qsb.dts
++++ b/arch/arm/boot/dts/imx53-qsb.dts
+@@ -64,8 +64,8 @@
+ };
+
+ ldo3_reg: ldo3 {
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1800000>;
++ regulator-min-microvolt = <1725000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+@@ -76,8 +76,8 @@
+ };
+
+ ldo5_reg: ldo5 {
+- regulator-min-microvolt = <1725000>;
+- regulator-max-microvolt = <3300000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+@@ -100,14 +100,14 @@
+ };
+
+ ldo9_reg: ldo9 {
+- regulator-min-microvolt = <1200000>;
++ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo10_reg: ldo10 {
+- regulator-min-microvolt = <1250000>;
+- regulator-max-microvolt = <3650000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+ };
+diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
+index 2065f46..38b6a2b 100644
+--- a/arch/arm64/include/asm/perf_event.h
++++ b/arch/arm64/include/asm/perf_event.h
+@@ -46,7 +46,15 @@
+ #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
+ #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+
+-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
++/*
++ * PMUv3 event types: required events
++ */
++#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
++#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
++#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
++#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+
+ /*
+ * Event filters for PMUv3
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 838ccf1..2c4df15 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -30,17 +30,9 @@
+
+ /*
+ * ARMv8 PMUv3 Performance Events handling code.
+- * Common event types.
++ * Common event types (some are defined in asm/perf_event.h).
+ */
+
+-/* Required events. */
+-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+-
+ /* At least one of the following is required. */
+ #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+ #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index e51367d..31c144f 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -602,8 +602,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+
+ idx = ARMV8_PMU_CYCLE_IDX;
+ } else {
+- BUG();
++ return false;
+ }
++ } else if (r->CRn == 0 && r->CRm == 9) {
++ /* PMCCNTR */
++ if (pmu_access_event_counter_el0_disabled(vcpu))
++ return false;
++
++ idx = ARMV8_PMU_CYCLE_IDX;
+ } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
+ /* PMEVCNTRn_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+@@ -611,7 +617,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+ } else {
+- BUG();
++ return false;
+ }
+
+ if (!pmu_counter_idx_valid(vcpu, idx))
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 7ac8e6e..8d586cf 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_configure_cores();
+
+- /* Enable AIL if supported, and we are in hypervisor mode */
+- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+- unsigned long lpcr = mfspr(SPRN_LPCR);
+- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+- }
++ /* AIL on native is done in cpu_ready_for_interrupts() */
+ }
+ }
+
+ static void cpu_ready_for_interrupts(void)
+ {
++ /*
++ * Enable AIL if supported, and we are in hypervisor mode. This
++ * is called once for every processor.
++ *
++ * If we are not in hypervisor mode the job is done once for
++ * the whole partition in configure_exceptions().
++ */
++ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
++ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
++ unsigned long lpcr = mfspr(SPRN_LPCR);
++ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
++ }
++
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+ }
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b81fe2d..1e81a37 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ unsigned bits;
+ int cpu = smp_processor_id();
+- unsigned int socket_id, core_complex_id;
+
+ bits = c->x86_coreid_bits;
+ /* Low order bits define the core id (index of core in socket) */
+@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
+ if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
+ return;
+
+- socket_id = (c->apicid >> bits) - 1;
+- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
+-
+- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
++ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+ #endif
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 46f74d4..2fff657 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ struct kvm_shared_msrs *locals
+ = container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;
++ unsigned long flags;
+
++ /*
++ * Disabling irqs at this point since the following code could be
++ * interrupted and executed through kvm_arch_hardware_disable()
++ */
++ local_irq_save(flags);
++ if (locals->registered) {
++ locals->registered = false;
++ user_return_notifier_unregister(urn);
++ }
++ local_irq_restore(flags);
+ for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ values->curr = values->host;
+ }
+ }
+- locals->registered = false;
+- user_return_notifier_unregister(urn);
+ }
+
+ static void shared_msr_update(unsigned slot, u32 msr)
+@@ -3372,6 +3381,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ };
+ case KVM_SET_VAPIC_ADDR: {
+ struct kvm_vapic_addr va;
++ int idx;
+
+ r = -EINVAL;
+ if (!lapic_in_kernel(vcpu))
+@@ -3379,7 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index ac58c16..555b9fa 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
+
+ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
+ KBUILD_CFLAGS += -m$(BITS)
++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index e44944f..2932a5b 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
++ dpm_wait_for_children(dev, async);
++
+ if (async_error)
+ goto Complete;
+
+@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+- dpm_wait_for_children(dev, async);
+-
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
+
+ __pm_runtime_disable(dev, false);
+
++ dpm_wait_for_children(dev, async);
++
+ if (async_error)
+ goto Complete;
+
+@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+- dpm_wait_for_children(dev, async);
+-
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
+index 19f9b62..7a6acc3 100644
+--- a/drivers/clk/imx/clk-pllv3.c
++++ b/drivers/clk/imx/clk-pllv3.c
+@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+- return (parent_rate * div) + (u32)temp64;
++ return parent_rate * div + (unsigned long)temp64;
+ }
+
+ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+- return parent_rate * div + parent_rate * mfn / mfd;
++ temp64 = (u64)parent_rate;
++ temp64 *= mfn;
++ do_div(temp64, mfd);
++
++ return parent_rate * div + (unsigned long)temp64;
+ }
+
+ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
+index 3a51fff..9adaf48 100644
+--- a/drivers/clk/mmp/clk-of-mmp2.c
++++ b/drivers/clk/mmp/clk-of-mmp2.c
+@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
+index 87f2317..f110c02 100644
+--- a/drivers/clk/mmp/clk-of-pxa168.c
++++ b/drivers/clk/mmp/clk-of-pxa168.c
+@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
+index e22a67f..64d1ef4 100644
+--- a/drivers/clk/mmp/clk-of-pxa910.c
++++ b/drivers/clk/mmp/clk-of-pxa910.c
+@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apbcp_base = of_iomap(np, 3);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apbcp_base) {
+ pr_err("failed to map apbcp registers\n");
+ return;
+ }
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index b304421..2cde379 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /*
++ * Check support for AES modes not available
++ * on LP devices.
++ */
++ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
++ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_XTS)
++ continue;
++
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 02f2a56..47d08b9 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ break;
+ }
+
+- memcpy(reg_val, chip->reg_output, NBANK(chip));
+ mutex_lock(&chip->i2c_lock);
++ memcpy(reg_val, chip->reg_output, NBANK(chip));
+ for(bank=0; bank<NBANK(chip); bank++) {
+ unsigned bankmask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
+ if(bankmask) {
+ unsigned bankval = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
++ bankval &= bankmask;
+ reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+ }
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index b2dee10..15704aa 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2667,8 +2667,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+- /* Flush direction if something changed behind our back */
+- if (chip->get_direction) {
++ /*
++ * If it's fast: flush the direction setting if something changed
++ * behind our back
++ */
++ if (!chip->can_sleep && chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 700c56b..e443073 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -492,6 +492,7 @@ struct amdgpu_bo {
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
++ unsigned prime_shared_count;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 651115d..c02db01f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ entry->priority = min(info[i].bo_priority,
+ AMDGPU_BO_LIST_MAX_PRIORITY);
+ entry->tv.bo = &entry->robj->tbo;
+- entry->tv.shared = true;
++ entry->tv.shared = !entry->robj->prime_shared_count;
+
+ if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ gds_obj = entry->robj;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 7700dc2..3826d5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ if (ret)
+ return ERR_PTR(ret);
+
++ bo->prime_shared_count = 1;
+ return &bo->gem_base;
+ }
+
+ int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+- int ret = 0;
++ long ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
++ /*
++ * Wait for all shared fences to complete before we switch to future
++ * use of exclusive fence on this prime shared bo.
++ */
++ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
++ MAX_SCHEDULE_TIMEOUT);
++ if (unlikely(ret < 0)) {
++ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
++ amdgpu_bo_unreserve(bo);
++ return ret;
++ }
++
+ /* pin buffer into GTT */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
++ if (likely(ret == 0))
++ bo->prime_shared_count++;
++
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
+ return;
+
+ amdgpu_bo_unpin(bo);
++ if (bo->prime_shared_count)
++ bo->prime_shared_count--;
+ amdgpu_bo_unreserve(bo);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 1f8af87..cf25607 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ if (!child)
+ return;
+
+- aux_channel = child->raw[25];
++ aux_channel = child->common.aux_channel;
+ ddc_pin = child->common.ddc_pin;
+
+ is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+ return false;
+ }
+
+-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
++static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
++ enum port port)
+ {
+ static const struct {
+ u16 dp, hdmi;
+@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+- int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+- if (!dev_priv->vbt.child_dev_num)
++ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
++ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return false;
+
++ if (p_child->common.dvo_port == port_mapping[port].dp)
++ return true;
++
++ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
++ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
++ p_child->common.aux_channel != 0)
++ return true;
++
++ return false;
++}
++
++bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
++ enum port port)
++{
++ int i;
++
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+- if ((p_child->common.dvo_port == port_mapping[port].dp ||
+- p_child->common.dvo_port == port_mapping[port].hdmi) &&
+- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
++ if (child_dev_is_dp_dual_mode(p_child, port))
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 3051182..b8aeb28 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4323,21 +4323,11 @@ static enum drm_connector_status
+ intel_dp_detect(struct drm_connector *connector, bool force)
+ {
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum drm_connector_status status = connector->status;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+- if (intel_dp->is_mst) {
+- /* MST devices are disconnected from a monitor POV */
+- intel_dp_unset_edid(intel_dp);
+- if (intel_encoder->type != INTEL_OUTPUT_EDP)
+- intel_encoder->type = INTEL_OUTPUT_DP;
+- return connector_status_disconnected;
+- }
+-
+ /* If full detect is not performed yet, do a full detect */
+ if (!intel_dp->detect_done)
+ status = intel_dp_long_pulse(intel_dp->attached_connector);
+diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
+index 68db962..8886cab1 100644
+--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
++++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
+@@ -280,7 +280,8 @@ struct common_child_dev_config {
+ u8 dp_support:1;
+ u8 tmds_support:1;
+ u8 support_reserved:5;
+- u8 not_common3[12];
++ u8 aux_channel;
++ u8 not_common3[11];
+ u8 iboost_level;
+ } __packed;
+
+diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
+index d223650..11edabf 100644
+--- a/drivers/i2c/Kconfig
++++ b/drivers/i2c/Kconfig
+@@ -59,7 +59,6 @@ config I2C_CHARDEV
+
+ config I2C_MUX
+ tristate "I2C bus multiplexing support"
+- depends on HAS_IOMEM
+ help
+ Say Y here if you want the I2C core to support the ability to
+ handle multiplexed I2C bus topologies, by presenting each
+diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
+index e280c8e..96de9ce 100644
+--- a/drivers/i2c/muxes/Kconfig
++++ b/drivers/i2c/muxes/Kconfig
+@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
+
+ config I2C_MUX_REG
+ tristate "Register-based I2C multiplexer"
++ depends on HAS_IOMEM
+ help
+ If you say yes to this option, support will be included for a
+ register based I2C multiplexer. This driver provides access to
+diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
+index 3278ebf..7e6f300 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
+@@ -247,9 +247,9 @@ static int pca954x_probe(struct i2c_client *client,
+ /* discard unconfigured channels */
+ break;
+ idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
+- data->deselect |= (idle_disconnect_pd
+- || idle_disconnect_dt) << num;
+ }
++ data->deselect |= (idle_disconnect_pd ||
++ idle_disconnect_dt) << num;
+
+ ret = i2c_mux_add_adapter(muxc, force, num, class);
+
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index c995255..71c7c4c 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -80,6 +80,8 @@ static struct ib_cm {
+ __be32 random_id_operand;
+ struct list_head timewait_list;
+ struct workqueue_struct *wq;
++ /* Sync on cm change port state */
++ spinlock_t state_lock;
+ } cm;
+
+ /* Counter indexes ordered by attribute ID */
+@@ -161,6 +163,8 @@ struct cm_port {
+ struct ib_mad_agent *mad_agent;
+ struct kobject port_obj;
+ u8 port_num;
++ struct list_head cm_priv_prim_list;
++ struct list_head cm_priv_altr_list;
+ struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ };
+
+@@ -241,6 +245,12 @@ struct cm_id_private {
+ u8 service_timeout;
+ u8 target_ack_delay;
+
++ struct list_head prim_list;
++ struct list_head altr_list;
++ /* Indicates that the send port mad is registered and av is set */
++ int prim_send_port_not_ready;
++ int altr_send_port_not_ready;
++
+ struct list_head work_list;
+ atomic_t work_count;
+ };
+@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_send_buf *m;
+ struct ib_ah *ah;
++ struct cm_av *av;
++ unsigned long flags, flags2;
++ int ret = 0;
+
++ /* don't let the port to be released till the agent is down */
++ spin_lock_irqsave(&cm.state_lock, flags2);
++ spin_lock_irqsave(&cm.lock, flags);
++ if (!cm_id_priv->prim_send_port_not_ready)
++ av = &cm_id_priv->av;
++ else if (!cm_id_priv->altr_send_port_not_ready &&
++ (cm_id_priv->alt_av.port))
++ av = &cm_id_priv->alt_av;
++ else {
++ pr_info("%s: not valid CM id\n", __func__);
++ ret = -ENODEV;
++ spin_unlock_irqrestore(&cm.lock, flags);
++ goto out;
++ }
++ spin_unlock_irqrestore(&cm.lock, flags);
++ /* Make sure the port haven't released the mad yet */
+ mad_agent = cm_id_priv->av.port->mad_agent;
+- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
+- if (IS_ERR(ah))
+- return PTR_ERR(ah);
++ if (!mad_agent) {
++ pr_info("%s: not a valid MAD agent\n", __func__);
++ ret = -ENODEV;
++ goto out;
++ }
++ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
++ if (IS_ERR(ah)) {
++ ret = PTR_ERR(ah);
++ goto out;
++ }
+
+ m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
+- cm_id_priv->av.pkey_index,
++ av->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
+ if (IS_ERR(m)) {
+ ib_destroy_ah(ah);
+- return PTR_ERR(m);
++ ret = PTR_ERR(m);
++ goto out;
+ }
+
+ /* Timeout set by caller if response is expected. */
+@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ atomic_inc(&cm_id_priv->refcount);
+ m->context[0] = cm_id_priv;
+ *msg = m;
+- return 0;
++
++out:
++ spin_unlock_irqrestore(&cm.state_lock, flags2);
++ return ret;
+ }
+
+ static int cm_alloc_response_msg(struct cm_port *port,
+@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ grh, &av->ah_attr);
+ }
+
+-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
++ struct cm_id_private *cm_id_priv)
+ {
+ struct cm_device *cm_dev;
+ struct cm_port *port = NULL;
+@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+ &av->ah_attr);
+ av->timeout = path->packet_life_time + 1;
+
+- return 0;
++ spin_lock_irqsave(&cm.lock, flags);
++ if (&cm_id_priv->av == av)
++ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
++ else if (&cm_id_priv->alt_av == av)
++ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
++ else
++ ret = -EINVAL;
++
++ spin_unlock_irqrestore(&cm.lock, flags);
++
++ return ret;
+ }
+
+ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
+@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ spin_lock_init(&cm_id_priv->lock);
+ init_completion(&cm_id_priv->comp);
+ INIT_LIST_HEAD(&cm_id_priv->work_list);
++ INIT_LIST_HEAD(&cm_id_priv->prim_list);
++ INIT_LIST_HEAD(&cm_id_priv->altr_list);
+ atomic_set(&cm_id_priv->work_count, -1);
+ atomic_set(&cm_id_priv->refcount, 1);
+ return &cm_id_priv->id;
+@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ break;
+ }
+
++ spin_lock_irq(&cm.lock);
++ if (!list_empty(&cm_id_priv->altr_list) &&
++ (!cm_id_priv->altr_send_port_not_ready))
++ list_del(&cm_id_priv->altr_list);
++ if (!list_empty(&cm_id_priv->prim_list) &&
++ (!cm_id_priv->prim_send_port_not_ready))
++ list_del(&cm_id_priv->prim_list);
++ spin_unlock_irq(&cm.lock);
++
+ cm_free_id(cm_id->local_id);
+ cm_deref_id(cm_id_priv);
+ wait_for_completion(&cm_id_priv->comp);
+@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ goto out;
+ }
+
+- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
++ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
++ cm_id_priv);
+ if (ret)
+ goto error1;
+ if (param->alternate_path) {
+ ret = cm_init_av_by_path(param->alternate_path,
+- &cm_id_priv->alt_av);
++ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+ goto error1;
+ }
+@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
+ dev_put(gid_attr.ndev);
+ }
+ work->path[0].gid_type = gid_attr.gid_type;
+- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
++ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
++ cm_id_priv);
+ }
+ if (ret) {
+ int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
+ goto rejected;
+ }
+ if (req_msg->alt_local_lid) {
+- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
++ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
++ cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ &work->path[0].sgid,
+@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
+ goto out;
+ }
+
+- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
++ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
++ cm_id_priv);
+ if (ret)
+ goto out;
+ cm_id_priv->alt_av.timeout =
+@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
+ cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
++ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
++ cm_id_priv);
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
+ return -EINVAL;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
++ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
+ if (ret)
+ goto out;
+
+@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
+ static int cm_migrate(struct ib_cm_id *cm_id)
+ {
+ struct cm_id_private *cm_id_priv;
++ struct cm_av tmp_av;
+ unsigned long flags;
++ int tmp_send_port_not_ready;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
+ (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ cm_id->lap_state = IB_CM_LAP_IDLE;
++ /* Swap address vector */
++ tmp_av = cm_id_priv->av;
+ cm_id_priv->av = cm_id_priv->alt_av;
++ cm_id_priv->alt_av = tmp_av;
++ /* Swap port send ready state */
++ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
++ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
++ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
+ port->cm_dev = cm_dev;
+ port->port_num = i;
+
++ INIT_LIST_HEAD(&port->cm_priv_prim_list);
++ INIT_LIST_HEAD(&port->cm_priv_altr_list);
++
+ ret = cm_create_port_fs(port);
+ if (ret)
+ goto error1;
+@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ {
+ struct cm_device *cm_dev = client_data;
+ struct cm_port *port;
++ struct cm_id_private *cm_id_priv;
++ struct ib_mad_agent *cur_mad_agent;
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_CM_SUP
+ };
+@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+
+ port = cm_dev->port[i-1];
+ ib_modify_port(ib_device, port->port_num, 0, &port_modify);
++ /* Mark all the cm_id's as not valid */
++ spin_lock_irq(&cm.lock);
++ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
++ cm_id_priv->altr_send_port_not_ready = 1;
++ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
++ cm_id_priv->prim_send_port_not_ready = 1;
++ spin_unlock_irq(&cm.lock);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
+ flush_workqueue(cm.wq);
+- ib_unregister_mad_agent(port->mad_agent);
++ spin_lock_irq(&cm.state_lock);
++ cur_mad_agent = port->mad_agent;
++ port->mad_agent = NULL;
++ spin_unlock_irq(&cm.state_lock);
++ ib_unregister_mad_agent(cur_mad_agent);
+ cm_remove_port_fs(port);
+ }
++
+ device_unregister(cm_dev->device);
+ kfree(cm_dev);
+ }
+@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
+ INIT_LIST_HEAD(&cm.device_list);
+ rwlock_init(&cm.device_lock);
+ spin_lock_init(&cm.lock);
++ spin_lock_init(&cm.state_lock);
+ cm.listen_service_table = RB_ROOT;
+ cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
+ cm.remote_id_table = RB_ROOT;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index c68746c..bdab61d 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -174,7 +174,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+
+ cur_base = addr & PAGE_MASK;
+
+- if (npages == 0) {
++ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 0012fa5..44b1104 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+
+ idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
+- if (qp != qp->real_qp) {
+- ib_close_qp(qp);
+- } else {
++ if (qp == qp->real_qp)
+ ib_uverbs_detach_umcast(qp, uqp);
+- ib_destroy_qp(qp);
+- }
++ ib_destroy_qp(qp);
+ ib_uverbs_release_uevent(file, &uqp->uevent);
+ kfree(uqp);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index bcf76c3..e362998 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -87,7 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ qp->s_flags |= RVT_S_WAIT_RNR;
+- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
++ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
+ add_timer(&priv->s_rnr_timer);
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index 1694037..8f59a4f 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -1152,7 +1152,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
+ rb_node = hfi1_mmu_rb_extract(pq->handler,
+ (unsigned long)iovec->iov.iov_base,
+ iovec->iov.iov_len);
+- if (rb_node && !IS_ERR(rb_node))
++ if (rb_node)
+ node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index 5fc6233..b9bf075 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
++ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
++ if (ret < 0)
++ return ERR_PTR(ret);
++ ah->av.eth.gid_index = ret;
+ ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+ ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
+ if (ah_attr->static_rate) {
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 5df63da..efb6414 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
+ if (context)
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
+ err = -EFAULT;
+- goto err_dbmap;
++ goto err_cq_free;
+ }
+
+ return &cq->ibcq;
+
++err_cq_free:
++ mlx4_cq_free(dev->dev, &cq->mcq);
++
+ err_dbmap:
+ if (context)
+ mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index e4fac929..ebe43cb 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -917,8 +917,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
+ if (err)
+ goto err_create;
+ } else {
+- /* for now choose 64 bytes till we have a proper interface */
+- cqe_size = 64;
++ cqe_size = cache_line_size() == 128 ? 128 : 64;
+ err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
+ &index, &inlen);
+ if (err)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index bff8707a..19f8820 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2100,14 +2100,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ {
+ struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct ib_event ibev;
+-
++ bool fatal = false;
+ u8 port = 0;
+
+ switch (event) {
+ case MLX5_DEV_EVENT_SYS_ERROR:
+- ibdev->ib_active = false;
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
++ fatal = true;
+ break;
+
+ case MLX5_DEV_EVENT_PORT_UP:
+@@ -2154,6 +2154,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+
+ if (ibdev->ib_active)
+ ib_dispatch_event(&ibev);
++
++ if (fatal)
++ ibdev->ib_active = false;
+ }
+
+ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+@@ -2835,7 +2838,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+ }
+ err = init_node_data(dev);
+ if (err)
+- goto err_dealloc;
++ goto err_free_port;
+
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+@@ -2845,7 +2848,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ err = mlx5_enable_roce(dev);
+ if (err)
+- goto err_dealloc;
++ goto err_free_port;
+ }
+
+ err = create_dev_resources(&dev->devr);
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index affc3f6..19d590d 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2037,8 +2037,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
+
+ mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
+ qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+- to_mcq(init_attr->recv_cq)->mcq.cqn,
+- to_mcq(init_attr->send_cq)->mcq.cqn);
++ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
++ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
+
+ qp->trans_qp.xrcdn = xrcdn;
+
+@@ -4702,6 +4702,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
++ if (init_attr->log_ind_tbl_size >
++ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
++ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
++ init_attr->log_ind_tbl_size,
++ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
++ return ERR_PTR(-EINVAL);
++ }
++
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
+index 33076a5..04ebbb5 100644
+--- a/drivers/infiniband/sw/rdmavt/dma.c
++++ b/drivers/infiniband/sw/rdmavt/dma.c
+@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+- if (offset + size > PAGE_SIZE)
+- return BAD_DMA_ADDRESS;
+-
+ addr = (u64)page_address(page);
+ if (addr)
+ addr += offset;
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index eedf2f1..7f5d735 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ {
+ int err;
+ struct socket *sock;
+- struct udp_port_cfg udp_cfg;
+- struct udp_tunnel_sock_cfg tnl_cfg;
+-
+- memset(&udp_cfg, 0, sizeof(udp_cfg));
++ struct udp_port_cfg udp_cfg = {0};
++ struct udp_tunnel_sock_cfg tnl_cfg = {0};
+
+ if (ipv6) {
+ udp_cfg.family = AF_INET6;
+@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ return ERR_PTR(err);
+ }
+
+- tnl_cfg.sk_user_data = NULL;
+ tnl_cfg.encap_type = 1;
+ tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+- tnl_cfg.encap_destroy = NULL;
+
+ /* Setup UDP tunnel */
+ setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 22ba24f..f724a7e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
++ rxe_queue_reset(qp->sq.queue);
+ }
+
+ /* cleanup attributes */
+@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
+ {
+ qp->req.state = QP_STATE_ERROR;
+ qp->resp.state = QP_STATE_ERROR;
++ qp->attr.qp_state = IB_QPS_ERR;
+
+ /* drain work and packet queues */
+ rxe_run_task(&qp->resp.task, 1);
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
+index 0827425..d14bf49 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.c
++++ b/drivers/infiniband/sw/rxe/rxe_queue.c
+@@ -84,6 +84,15 @@ int do_mmap_info(struct rxe_dev *rxe,
+ return -EINVAL;
+ }
+
++inline void rxe_queue_reset(struct rxe_queue *q)
++{
++ /* queue is comprised from header and the memory
++ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
++ * reset only the queue itself and not the management header
++ */
++ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
++}
++
+ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size)
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
+index 239fd60..8c8641c 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.h
++++ b/drivers/infiniband/sw/rxe/rxe_queue.h
+@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p);
+
++void rxe_queue_reset(struct rxe_queue *q);
++
+ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 13a848a..43bb166 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -695,7 +695,8 @@ int rxe_requester(void *arg)
+ qp->req.wqe_index);
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+- goto complete;
++ __rxe_do_task(&qp->comp.task);
++ return 0;
+ }
+ payload = mtu;
+ }
+@@ -744,13 +745,17 @@ int rxe_requester(void *arg)
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ wqe->state = wqe_state_error;
+
+-complete:
+- if (qp_type(qp) != IB_QPT_RC) {
+- while (rxe_completer(qp) == 0)
+- ;
+- }
+-
+- return 0;
++ /*
++ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
++ * ---------8<---------8<-------------
++ * ...Note that if a completion error occurs, a Work Completion
++ * will always be generated, even if the signaling
++ * indicator requests an Unsignaled Completion.
++ * ---------8<---------8<-------------
++ */
++ wqe->wr.send_flags |= IB_SEND_SIGNALED;
++ __rxe_do_task(&qp->comp.task);
++ return -EAGAIN;
+
+ exit:
+ return -EAGAIN;
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 41b1138..70c646b 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
+ for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
+ lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
+
+- /* Put the device into reset state */
+- writel(0, lpss->priv + LPSS_PRIV_RESETS);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 3ac486a..c57e407 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
+ clones[i]);
+ }
+
++ put_device(dev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(mfd_clone_cell);
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 94c7cc0..00dd7ff 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -761,6 +761,8 @@ static int stmpe1801_reset(struct stmpe *stmpe)
+ if (ret < 0)
+ return ret;
+
++ msleep(10);
++
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (time_before(jiffies, timeout)) {
+ ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 1b5f531..bf3fd34 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2010,23 +2010,33 @@ static struct virtio_device_id id_table[] = {
+ { 0 },
+ };
+
++#define VIRTNET_FEATURES \
++ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
++ VIRTIO_NET_F_MAC, \
++ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
++ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
++ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
++ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
++ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
++ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
++ VIRTIO_NET_F_CTRL_MAC_ADDR, \
++ VIRTIO_NET_F_MTU
++
+ static unsigned int features[] = {
+- VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+- VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+- VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+- VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+- VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+- VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+- VIRTIO_NET_F_CTRL_MAC_ADDR,
++ VIRTNET_FEATURES,
++};
++
++static unsigned int features_legacy[] = {
++ VIRTNET_FEATURES,
++ VIRTIO_NET_F_GSO,
+ VIRTIO_F_ANY_LAYOUT,
+- VIRTIO_NET_F_MTU,
+ };
+
+ static struct virtio_driver virtio_net_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
++ .feature_table_legacy = features_legacy,
++ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 4fdc3da..ea67ae9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
++ } else {
++ /* In theory, we wouldn't have to stop a running sched
++ * scan in order to start another one (for
++ * net-detect). But in practice this doesn't seem to
++ * work properly, so stop any running sched_scan now.
++ */
++ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
++ if (ret)
++ return ret;
+ }
+
+ /* rfkill release can be either for wowlan or netdetect */
+@@ -2088,6 +2097,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+ iwl_mvm_update_changed_regdom(mvm);
+
+ if (mvm->net_detect) {
++ /* If this is a non-unified image, we restart the FW,
++ * so no need to stop the netdetect scan. If that
++ * fails, continue and try to get the wake-up reasons,
++ * but trigger a HW restart by keeping a failure code
++ * in ret.
++ */
++ if (unified_image)
++ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
++ false);
++
+ iwl_mvm_query_netdetect_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+@@ -2271,7 +2290,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+ {
+ struct iwl_mvm *mvm = inode->i_private;
+- int remaining_time = 10;
++ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ mvm->d3_test_active = false;
+
+@@ -2282,17 +2302,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+- ieee80211_restart_hw(mvm->hw);
++ if (!unified_image) {
++ int remaining_time = 10;
+
+- /* wait for restart and disconnect all interfaces */
+- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+- remaining_time > 0) {
+- remaining_time--;
+- msleep(1000);
+- }
++ ieee80211_restart_hw(mvm->hw);
+
+- if (remaining_time == 0)
+- IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
++ /* wait for restart and disconnect all interfaces */
++ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
++ remaining_time > 0) {
++ remaining_time--;
++ msleep(1000);
++ }
++
++ if (remaining_time == 0)
++ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
++ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5dd77e3..90a1f4a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -4097,7 +4097,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
+ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ int ret;
+
+@@ -4119,7 +4118,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ }
+
+ if (notif->sync)
+- ret = wait_event_timeout(notif_waitq,
++ ret = wait_event_timeout(mvm->rx_sync_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0,
+ HZ);
+ WARN_ON_ONCE(!ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 6a615bb..e9cb970 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -932,6 +932,7 @@ struct iwl_mvm {
+ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+ spinlock_t d0i3_tx_lock;
+ wait_queue_head_t d0i3_exit_waitq;
++ wait_queue_head_t rx_sync_waitq;
+
+ /* BT-Coex */
+ struct iwl_bt_coex_profile_notif last_bt_notif;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 55d9096..30bbdec 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -618,6 +618,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ spin_lock_init(&mvm->refs_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
++ init_waitqueue_head(&mvm->rx_sync_waitq);
+
+ atomic_set(&mvm->queue_sync_counter, 0);
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index afb7eb6..2b994be 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -545,7 +545,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ "Received expired RX queue sync message\n");
+ return;
+ }
+- atomic_dec(&mvm->queue_sync_counter);
++ if (!atomic_dec_return(&mvm->queue_sync_counter))
++ wake_up(&mvm->rx_sync_waitq);
+ }
+
+ switch (internal_notif->type) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index dac120f..3707ec6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1185,6 +1185,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+
+ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+ {
++ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
++
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+@@ -1211,12 +1214,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+- /* No need to stop anything for net-detect since the
+- * firmware is restarted anyway. This way, any sched
+- * scans that were running will be restarted when we
+- * resume.
+- */
+- return 0;
++ /* For non-unified images, there's no need to stop
++ * anything for net-detect since the firmware is
++ * restarted anyway. This way, any sched scans that
++ * were running will be restarted when we resume.
++ */
++ if (!unified_image)
++ return 0;
++
++ /* If this is a unified image and we ran out of scans,
++ * we need to stop something. Prefer stopping regular
++ * scans, because the results are useless at this
++ * point, and we should be able to keep running
++ * another scheduled scan while suspended.
++ */
++ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
++ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
++ true);
++ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
++ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
++ true);
++
++ /* fall through, something is wrong if no scan was
++ * running but we ran out of scans.
++ */
+ default:
+ WARN_ON(1);
+ break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 78cf9a7..13842ca 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -526,48 +526,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+ #ifdef CONFIG_ACPI
+-#define SPL_METHOD "SPLC"
+-#define SPL_DOMAINTYPE_MODULE BIT(0)
+-#define SPL_DOMAINTYPE_WIFI BIT(1)
+-#define SPL_DOMAINTYPE_WIGIG BIT(2)
+-#define SPL_DOMAINTYPE_RFEM BIT(3)
++#define ACPI_SPLC_METHOD "SPLC"
++#define ACPI_SPLC_DOMAIN_WIFI (0x07)
+
+-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
++static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
+ {
+- union acpi_object *limits, *domain_type, *power_limit;
+-
+- if (splx->type != ACPI_TYPE_PACKAGE ||
+- splx->package.count != 2 ||
+- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+- splx->package.elements[0].integer.value != 0) {
+- IWL_ERR(trans, "Unsupported splx structure\n");
++ union acpi_object *data_pkg, *dflt_pwr_limit;
++ int i;
++
++ /* We need at least two elements, one for the revision and one
++ * for the data itself. Also check that the revision is
++ * supported (currently only revision 0).
++ */
++ if (splc->type != ACPI_TYPE_PACKAGE ||
++ splc->package.count < 2 ||
++ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
++ splc->package.elements[0].integer.value != 0) {
++ IWL_DEBUG_INFO(trans,
++ "Unsupported structure returned by the SPLC method. Ignoring.\n");
+ return 0;
+ }
+
+- limits = &splx->package.elements[1];
+- if (limits->type != ACPI_TYPE_PACKAGE ||
+- limits->package.count < 2 ||
+- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+- IWL_ERR(trans, "Invalid limits element\n");
+- return 0;
++ /* loop through all the packages to find the one for WiFi */
++ for (i = 1; i < splc->package.count; i++) {
++ union acpi_object *domain;
++
++ data_pkg = &splc->package.elements[i];
++
++ /* Skip anything that is not a package with the right
++ * amount of elements (i.e. at least 2 integers).
++ */
++ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
++ data_pkg->package.count < 2 ||
++ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
++ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
++ continue;
++
++ domain = &data_pkg->package.elements[0];
++ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
++ break;
++
++ data_pkg = NULL;
+ }
+
+- domain_type = &limits->package.elements[0];
+- power_limit = &limits->package.elements[1];
+- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
++ if (!data_pkg) {
++ IWL_DEBUG_INFO(trans,
++ "No element for the WiFi domain returned by the SPLC method.\n");
+ return 0;
+ }
+
+- return power_limit->integer.value;
++ dflt_pwr_limit = &data_pkg->package.elements[1];
++ return dflt_pwr_limit->integer.value;
+ }
+
+ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+ {
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
++ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+@@ -578,23 +594,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+ }
+
+ /* Get the method's handle */
+- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
++ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
++ &handle);
+ if (ACPI_FAILURE(status)) {
+- IWL_DEBUG_INFO(trans, "SPL method not found\n");
++ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
++ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
+ return;
+ }
+
+- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
++ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
+ trans->dflt_pwr_limit);
+- kfree(splx.pointer);
++ kfree(splc.pointer);
+ }
+
+ #else /* CONFIG_ACPI */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 18650dc..478bba5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -522,6 +522,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, u32 txq_id)
+ {
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ txq->need_update = false;
+@@ -536,6 +537,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ return ret;
+
+ spin_lock_init(&txq->lock);
++
++ if (txq_id == trans_pcie->cmd_queue) {
++ static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
++
++ lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
++ }
++
+ __skb_queue_head_init(&txq->overflow_q);
+
+ /*
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
+index ec2e9c5..22394fe 100644
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -109,6 +109,7 @@
+ /* OMAP_RTC_OSC_REG bit fields: */
+ #define OMAP_RTC_OSC_32KCLK_EN BIT(6)
+ #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
++#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
+
+ /* OMAP_RTC_IRQWAKEEN bit fields: */
+ #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
+@@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
+ */
+ if (rtc->has_ext_clk) {
+ reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+- rtc_write(rtc, OMAP_RTC_OSC_REG,
+- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
++ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
++ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
++ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
+ }
+
+ rtc->type->lock(rtc);
+diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
+index d059ad4..97ee1b4 100644
+--- a/drivers/uwb/lc-rc.c
++++ b/drivers/uwb/lc-rc.c
+@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
++
+ return rc;
+ }
+
+@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
+ if (dev) {
+ rc = dev_get_drvdata(dev);
+ __uwb_rc_get(rc);
++ put_device(dev);
+ }
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
+@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
+
+ dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
+ find_rc_grandpa);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
+@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
+
+ return rc;
+ }
+diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
+index c1304b8..678e937 100644
+--- a/drivers/uwb/pal.c
++++ b/drivers/uwb/pal.c
+@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
+
+ dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+
++ put_device(dev);
++
+ return (dev != NULL);
+ }
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index ea31931..7bd21aa 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -235,6 +235,7 @@ struct ext4_io_submit {
+ #define EXT4_MAX_BLOCK_SIZE 65536
+ #define EXT4_MIN_BLOCK_LOG_SIZE 10
+ #define EXT4_MAX_BLOCK_LOG_SIZE 16
++#define EXT4_MAX_CLUSTER_LOG_SIZE 30
+ #ifdef __KERNEL__
+ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
+ #else
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3ec8708..ec89f50 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3518,7 +3518,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+- "Unsupported filesystem blocksize %d", blocksize);
++ "Unsupported filesystem blocksize %d (%d log_block_size)",
++ blocksize, le32_to_cpu(es->s_log_block_size));
++ goto failed_mount;
++ }
++ if (le32_to_cpu(es->s_log_block_size) >
++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++ ext4_msg(sb, KERN_ERR,
++ "Invalid log block size: %u",
++ le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+
+@@ -3650,6 +3658,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ "block size (%d)", clustersize, blocksize);
+ goto failed_mount;
+ }
++ if (le32_to_cpu(es->s_log_cluster_size) >
++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++ ext4_msg(sb, KERN_ERR,
++ "Invalid log cluster size: %u",
++ le32_to_cpu(es->s_log_cluster_size));
++ goto failed_mount;
++ }
+ sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ le32_to_cpu(es->s_log_block_size);
+ sbi->s_clusters_per_group =
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3988b43..a621dd9 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
+ {
+ struct inode *inode = page->mapping->host;
+
++ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
++ if (!copied)
++ goto unlock;
++
+ if (!PageUptodate(page)) {
+ /* Zero any unwritten bytes at the end of the page */
+ size_t endoff = (pos + copied) & ~PAGE_MASK;
+@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
+
+ fuse_write_update_size(inode, pos + copied);
+ set_page_dirty(page);
++
++unlock:
+ unlock_page(page);
+ put_page(page);
+
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index ab02a45..e5d1934 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -25,6 +25,7 @@ struct svc_xprt_ops {
+ void (*xpo_detach)(struct svc_xprt *);
+ void (*xpo_free)(struct svc_xprt *);
+ int (*xpo_secure_port)(struct svc_rqst *);
++ void (*xpo_kill_temp_xprt)(struct svc_xprt *);
+ };
+
+ struct svc_xprt_class {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 9530fcd..9d592c6 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+
+ } else if (new->flags & IRQF_TRIGGER_MASK) {
+ unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+- unsigned int omsk = irq_settings_get_trigger_mask(desc);
++ unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
+
+ if (nmsk != omsk)
+ /* hope the handler works with current trigger mode */
+ pr_warn("irq %d uses trigger mode %u; requested %u\n",
+- irq, nmsk, omsk);
++ irq, omsk, nmsk);
+ }
+
+ *old_ptr = new;
+diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
+index 084452e..bdff5ed 100644
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -203,8 +203,10 @@ static int __init test_suspend(void)
+
+ /* RTCs have initialized by now too ... can we use one? */
+ dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
+- if (dev)
++ if (dev) {
+ rtc = rtc_class_open(dev_name(dev));
++ put_device(dev);
++ }
+ if (!rtc) {
+ printk(warn_no_rtc);
+ return 0;
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index d0a1617..979e7bf 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -1,8 +1,4 @@
+
+-# We are fully aware of the dangers of __builtin_return_address()
+-FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
+-KBUILD_CFLAGS += $(FRAME_CFLAGS)
+-
+ # Do not instrument the tracer itself:
+
+ ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 84752c8..b1d7f1b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1856,6 +1856,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+
+ /* Update rec->flags */
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ /* We need to update only differences of filter_hash */
+ in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+ in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+@@ -1878,6 +1882,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+
+ /* Roll back what we did above */
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (rec == end)
+ goto err_out;
+
+@@ -2391,6 +2399,10 @@ void __weak ftrace_replace_code(int enable)
+ return;
+
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ failed = __ftrace_replace_code(rec, enable);
+ if (failed) {
+ ftrace_bug(failed, rec);
+@@ -2757,7 +2769,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ struct dyn_ftrace *rec;
+
+ do_for_each_ftrace_rec(pg, rec) {
+- if (FTRACE_WARN_ON_ONCE(rec->flags))
++ if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
+ pr_warn(" %pS flags:%lx\n",
+ (void *)rec->ip, rec->flags);
+ } while_for_each_ftrace_rec();
+@@ -3592,6 +3604,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
+ goto out_unlock;
+
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
+ ret = enter_record(hash, rec, clear_filter);
+ if (ret < 0) {
+@@ -3787,6 +3803,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+
+ do_for_each_ftrace_rec(pg, rec) {
+
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (!ftrace_match_record(rec, &func_g, NULL, 0))
+ continue;
+
+@@ -4679,6 +4698,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
+
+ do_for_each_ftrace_rec(pg, rec) {
+
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (ftrace_match_record(rec, &func_g, NULL, 0)) {
+ /* if it is in the array */
+ exists = false;
+diff --git a/mm/Makefile b/mm/Makefile
+index 2ca1faf..295bd7a 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -21,9 +21,6 @@ KCOV_INSTRUMENT_memcontrol.o := n
+ KCOV_INSTRUMENT_mmzone.o := n
+ KCOV_INSTRUMENT_vmstat.o := n
+
+-# Since __builtin_frame_address does work as used, disable the warning.
+-CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+-
+ mmu-y := nommu.o
+ mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
+ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 8e999ff..8af9d25 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ struct sock *sk = sock->sk;
+ struct bcm_sock *bo = bcm_sk(sk);
++ int ret = 0;
+
+ if (len < sizeof(*addr))
+ return -EINVAL;
+
+- if (bo->bound)
+- return -EISCONN;
++ lock_sock(sk);
++
++ if (bo->bound) {
++ ret = -EISCONN;
++ goto fail;
++ }
+
+ /* bind a device to this socket */
+ if (addr->can_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(&init_net, addr->can_ifindex);
+- if (!dev)
+- return -ENODEV;
+-
++ if (!dev) {
++ ret = -ENODEV;
++ goto fail;
++ }
+ if (dev->type != ARPHRD_CAN) {
+ dev_put(dev);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto fail;
+ }
+
+ bo->ifindex = dev->ifindex;
+@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ bo->ifindex = 0;
+ }
+
+- bo->bound = 1;
+-
+ if (proc_dir) {
+ /* unique socket address as filename */
+ sprintf(bo->procname, "%lu", sock_i_ino(sk));
+ bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
+ proc_dir,
+ &bcm_proc_fops, sk);
++ if (!bo->bcm_proc_read) {
++ ret = -ENOMEM;
++ goto fail;
++ }
+ }
+
+- return 0;
++ bo->bound = 1;
++
++fail:
++ release_sock(sk);
++
++ return ret;
+ }
+
+ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 0af2669..584ac76 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -143,7 +143,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
++ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
++ tb[NFTA_DYNSET_TIMEOUT])));
+ }
+
+ priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
+@@ -230,7 +231,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
+ goto nla_put_failure;
+- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
++ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
++ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
+ NFTA_DYNSET_PAD))
+ goto nla_put_failure;
+ if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c3f6523..3bc1d61 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
+ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
+ {
+ struct svc_xprt *xprt;
+- struct svc_sock *svsk;
+- struct socket *sock;
+ struct list_head *le, *next;
+ LIST_HEAD(to_be_closed);
+- struct linger no_linger = {
+- .l_onoff = 1,
+- .l_linger = 0,
+- };
+
+ spin_lock_bh(&serv->sv_lock);
+ list_for_each_safe(le, next, &serv->sv_tempsocks) {
+@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
+ list_del_init(le);
+ xprt = list_entry(le, struct svc_xprt, xpt_list);
+ dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
+- svsk = container_of(xprt, struct svc_sock, sk_xprt);
+- sock = svsk->sk_sock;
+- kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+- (char *)&no_linger, sizeof(no_linger));
++ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
+ svc_close_xprt(xprt);
+ }
+ }
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 57625f6..a4bc982 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
+ return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
+ }
+
++static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
++{
++ struct svc_sock *svsk;
++ struct socket *sock;
++ struct linger no_linger = {
++ .l_onoff = 1,
++ .l_linger = 0,
++ };
++
++ svsk = container_of(xprt, struct svc_sock, sk_xprt);
++ sock = svsk->sk_sock;
++ kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
++ (char *)&no_linger, sizeof(no_linger));
++}
++
+ /*
+ * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
+ */
+@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
+ return NULL;
+ }
+
++static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
++{
++}
++
+ static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
+ struct net *net,
+ struct sockaddr *sa, int salen,
+@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
+ .xpo_has_wspace = svc_udp_has_wspace,
+ .xpo_accept = svc_udp_accept,
+ .xpo_secure_port = svc_sock_secure_port,
++ .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
+ };
+
+ static struct svc_xprt_class svc_udp_class = {
+@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
+ .xpo_has_wspace = svc_tcp_has_wspace,
+ .xpo_accept = svc_tcp_accept,
+ .xpo_secure_port = svc_sock_secure_port,
++ .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
+ };
+
+ static struct svc_xprt_class svc_tcp_class = {
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 924271c..a55b809 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
+ static void svc_rdma_free(struct svc_xprt *xprt);
+ static int svc_rdma_has_wspace(struct svc_xprt *xprt);
+ static int svc_rdma_secure_port(struct svc_rqst *);
++static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
+
+ static struct svc_xprt_ops svc_rdma_ops = {
+ .xpo_create = svc_rdma_create,
+@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
+ .xpo_has_wspace = svc_rdma_has_wspace,
+ .xpo_accept = svc_rdma_accept,
+ .xpo_secure_port = svc_rdma_secure_port,
++ .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
+ };
+
+ struct svc_xprt_class svc_rdma_class = {
+@@ -1285,6 +1287,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
+ return 1;
+ }
+
++static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
++{
++}
++
+ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+ {
+ struct ib_send_wr *bad_wr, *n_wr;
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 973e8c1..17867e7 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 26e866f..1628180 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6905,8 +6905,6 @@ static const struct hda_fixup alc662_fixups[] = {
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x40f000f0 }, /* disabled */
+ { 0x16, 0x40f000f0 }, /* disabled */
+- { 0x18, 0x01014011 }, /* LO */
+- { 0x1a, 0x01014012 }, /* LO */
+ { }
+ }
+ },
+diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
+index 6a23302..4d9d320 100644
+--- a/sound/pci/hda/thinkpad_helper.c
++++ b/sound/pci/hda/thinkpad_helper.c
+@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
+ static bool is_thinkpad(struct hda_codec *codec)
+ {
+ return (codec->core.subsystem_id >> 16 == 0x17aa) &&
+- (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
++ (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
++ acpi_dev_found("IBM0068"));
+ }
+
+ static void update_tpacpi_mute_led(void *private_data, int enabled)
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 9e5276d..2ddc034 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
+ snd_usb_endpoint_free(ep);
+
+ mutex_destroy(&chip->mutex);
+- dev_set_drvdata(&chip->dev->dev, NULL);
++ if (!atomic_read(&chip->shutdown))
++ dev_set_drvdata(&chip->dev->dev, NULL);
+ kfree(chip);
+ return 0;
+ }
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index de15dbc..7214913 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -1596,18 +1596,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
+ if (prog)
+ ui_progress__update(prog, 1);
+
++ hists->nr_entries++;
++ if (!he->filtered) {
++ hists->nr_non_filtered_entries++;
++ hists__calc_col_len(hists, he);
++ }
++
+ if (!he->leaf) {
+ hists__hierarchy_output_resort(hists, prog,
+ &he->hroot_in,
+ &he->hroot_out,
+ min_callchain_hits,
+ use_callchain);
+- hists->nr_entries++;
+- if (!he->filtered) {
+- hists->nr_non_filtered_entries++;
+- hists__calc_col_len(hists, he);
+- }
+-
+ continue;
+ }
+
+diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
+index 6e9c40e..69ccce3 100644
+--- a/virt/kvm/arm/pmu.c
++++ b/virt/kvm/arm/pmu.c
+@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+ continue;
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_PMU_EVTYPE_EVENT;
+- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
+ && (enable & BIT(i))) {
+ reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
+
+ /* Software increment event does't need to be backed by a perf event */
+- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
++ select_idx != ARMV8_PMU_CYCLE_IDX)
+ return;
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
+ attr.exclude_hv = 1; /* Don't count EL2 events */
+ attr.exclude_host = 1; /* Don't count host events */
+- attr.config = eventsel;
++ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
++ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
+
+ counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ /* The initial sample period (overflow count) of an event. */
diff --git a/4.8.10/4420_grsecurity-3.1-4.8.10-201611232213.patch b/4.8.11/4420_grsecurity-3.1-4.8.11-201611271225.patch
index 0149d93..ec53570 100644
--- a/4.8.10/4420_grsecurity-3.1-4.8.10-201611232213.patch
+++ b/4.8.11/4420_grsecurity-3.1-4.8.11-201611271225.patch
@@ -407,7 +407,7 @@ index ffab8b5..b8fcd61 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index 7cf2b49..3e3071c 100644
+index 2b1bcba..e6f635b 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -421,16 +421,7 @@ index 7cf2b49..3e3071c 100644
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-@@ -621,6 +623,8 @@ include arch/$(SRCARCH)/Makefile
-
- KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
- KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
-+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
-+KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
-
- ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
- KBUILD_CFLAGS += -Os
-@@ -715,7 +719,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+@@ -717,7 +719,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
@@ -439,7 +430,7 @@ index 7cf2b49..3e3071c 100644
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -890,7 +894,7 @@ export mod_sign_cmd
+@@ -892,7 +894,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -448,7 +439,7 @@ index 7cf2b49..3e3071c 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -1256,7 +1260,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1258,7 +1260,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.pem signing_key.priv signing_key.x509 \
x509.genkey extra_certificates signing_key.x509.keyid \
@@ -460,7 +451,7 @@ index 7cf2b49..3e3071c 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1295,7 +1302,7 @@ distclean: mrproper
+@@ -1297,7 +1302,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -22648,7 +22639,7 @@ index 904f528..b4d0d24 100644
#ifdef CONFIG_FLATMEM
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
-index b3bebf9..cb419e7 100644
+index b3bebf9..2c3570f 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -7,9 +7,9 @@
@@ -22659,7 +22650,7 @@ index b3bebf9..cb419e7 100644
+extern const unsigned long phys_base;
-static inline unsigned long __phys_addr_nodebug(unsigned long x)
-+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
++static __always_inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
{
unsigned long y = x - __START_KERNEL_map;
@@ -26546,10 +26537,10 @@ index 4a8697f..8a13428 100644
obj-y += common.o
obj-y += rdrand.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index b81fe2d..fa46eca 100644
+index 1e81a37..7a498d6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
-@@ -792,7 +792,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+@@ -788,7 +788,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
@@ -33161,10 +33152,10 @@ index 5cede40..f932797 100644
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 46f74d4..eca57f1 100644
+index 2fff657..84ea93a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1948,8 +1948,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1957,8 +1957,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -33175,7 +33166,7 @@ index 46f74d4..eca57f1 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2657,6 +2657,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2666,6 +2666,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -33184,7 +33175,7 @@ index 46f74d4..eca57f1 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -3073,7 +3075,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+@@ -3082,7 +3084,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
@@ -33193,7 +33184,7 @@ index 46f74d4..eca57f1 100644
u64 xstate_bv = xsave->header.xfeatures;
u64 valid;
-@@ -3109,7 +3111,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+@@ -3118,7 +3120,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
@@ -33202,7 +33193,7 @@ index 46f74d4..eca57f1 100644
u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
u64 valid;
-@@ -3153,7 +3155,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+@@ -3162,7 +3164,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
@@ -33211,7 +33202,7 @@ index 46f74d4..eca57f1 100644
sizeof(struct fxregs_state));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XFEATURE_MASK_FPSSE;
-@@ -3178,7 +3180,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+@@ -3187,7 +3189,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
} else {
if (xstate_bv & ~XFEATURE_MASK_FPSSE)
return -EINVAL;
@@ -33220,7 +33211,7 @@ index 46f74d4..eca57f1 100644
guest_xsave->region, sizeof(struct fxregs_state));
}
return 0;
-@@ -5739,7 +5741,7 @@ static unsigned long kvm_get_guest_ip(void)
+@@ -5751,7 +5753,7 @@ static unsigned long kvm_get_guest_ip(void)
unsigned long ip = 0;
if (__this_cpu_read(current_vcpu))
@@ -33229,7 +33220,7 @@ index 46f74d4..eca57f1 100644
return ip;
}
-@@ -6462,6 +6464,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+@@ -6474,6 +6476,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -33237,7 +33228,7 @@ index 46f74d4..eca57f1 100644
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
-@@ -6737,6 +6740,7 @@ out:
+@@ -6749,6 +6752,7 @@ out:
return r;
}
@@ -33245,7 +33236,7 @@ index 46f74d4..eca57f1 100644
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu) &&
-@@ -7284,7 +7288,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+@@ -7296,7 +7300,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -33254,7 +33245,7 @@ index 46f74d4..eca57f1 100644
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
-@@ -7301,7 +7305,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7313,7 +7317,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -33263,7 +33254,7 @@ index 46f74d4..eca57f1 100644
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
-@@ -7317,9 +7321,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7329,9 +7333,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
static void fx_init(struct kvm_vcpu *vcpu)
{
@@ -33275,7 +33266,7 @@ index 46f74d4..eca57f1 100644
host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
-@@ -7342,7 +7346,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+@@ -7354,7 +7358,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
@@ -33284,7 +33275,7 @@ index 46f74d4..eca57f1 100644
trace_kvm_fpu(1);
}
-@@ -7642,6 +7646,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+@@ -7654,6 +7658,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
struct static_key kvm_no_apic_vcpu __read_mostly;
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
@@ -33293,7 +33284,7 @@ index 46f74d4..eca57f1 100644
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
-@@ -7659,11 +7665,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+@@ -7671,11 +7677,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -33312,7 +33303,7 @@ index 46f74d4..eca57f1 100644
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
-@@ -7721,6 +7730,9 @@ fail_mmu_destroy:
+@@ -7733,6 +7742,9 @@ fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
@@ -33322,7 +33313,7 @@ index 46f74d4..eca57f1 100644
fail:
return r;
}
-@@ -7739,6 +7751,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+@@ -7751,6 +7763,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.pio_data);
if (!lapic_in_kernel(vcpu))
static_key_slow_dec(&kvm_no_apic_vcpu);
@@ -40915,6 +40906,18 @@ index 0774799..a0012ea 100644
{
return jiffies_to_clock_t(q->sg_timeout);
}
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index 865f46e..c80765b 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
+ return cert;
+
+ error_decode:
+- kfree(cert->pub->key);
+ kfree(ctx);
+ error_no_ctx:
+ x509_free_certificate(cert);
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 058c8d7..55229dd 100644
--- a/crypto/cast6_generic.c
@@ -43536,7 +43539,7 @@ index ab62b81..8f38450 100644
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index 4cb8f21..fc2c3e2 100644
+index 4cb8f21..d056229 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -383,7 +383,7 @@ struct drbd_epoch {
@@ -43579,6 +43582,15 @@ index 4cb8f21..fc2c3e2 100644
int rs_last_sect_ev; /* counter to compare with */
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
+@@ -1129,7 +1129,7 @@ extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
+ enum drbd_packet cmd);
+ extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
+
+-extern int drbd_send_bitmap(struct drbd_device *device);
++extern int drbd_send_bitmap(struct drbd_device *device) __intentional_overflow(-1);
+ extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
+ extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
+ extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8348272..f2ddf22 100644
--- a/drivers/block/drbd/drbd_main.c
@@ -46604,7 +46616,7 @@ index ac8deb0..f3caa10 100644
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index b2dee10..fc44efe 100644
+index 15704aa..623f140 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1631,8 +1631,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
@@ -46634,10 +46646,10 @@ index b2dee10..fc44efe 100644
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-index 700c56b..267fde4 100644
+index e443073..2ce0ad5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-@@ -1796,7 +1796,7 @@ int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+@@ -1797,7 +1797,7 @@ int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
* amdgpu smumgr functions
*/
struct amdgpu_smumgr_funcs {
@@ -51535,10 +51547,10 @@ index d2b8899..5b0e8f5 100644
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index c995255..7de0b49 100644
+index 71c7c4c..f91d896 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
-@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
+@@ -117,7 +117,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
struct cm_counter_group {
struct kobject obj;
@@ -51547,7 +51559,7 @@ index c995255..7de0b49 100644
};
struct cm_counter_attribute {
-@@ -1432,7 +1432,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
+@@ -1495,7 +1495,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_rej_reason reason,
@@ -51556,7 +51568,7 @@ index c995255..7de0b49 100644
u8 ari_length,
const void *private_data,
u8 private_data_len)
-@@ -1476,7 +1476,7 @@ static void cm_dup_req_handler(struct cm_work *work,
+@@ -1539,7 +1539,7 @@ static void cm_dup_req_handler(struct cm_work *work,
struct ib_mad_send_buf *msg = NULL;
int ret;
@@ -51565,7 +51577,7 @@ index c995255..7de0b49 100644
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
-@@ -1884,7 +1884,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
+@@ -1949,7 +1949,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
if (!cm_id_priv)
return;
@@ -51574,7 +51586,7 @@ index c995255..7de0b49 100644
counter[CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
-@@ -2051,7 +2051,7 @@ static int cm_rtu_handler(struct cm_work *work)
+@@ -2116,7 +2116,7 @@ static int cm_rtu_handler(struct cm_work *work)
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
@@ -51583,7 +51595,7 @@ index c995255..7de0b49 100644
counter[CM_RTU_COUNTER]);
goto out;
}
-@@ -2234,7 +2234,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2299,7 +2299,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
dreq_msg->local_comm_id);
if (!cm_id_priv) {
@@ -51592,7 +51604,7 @@ index c995255..7de0b49 100644
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
return -EINVAL;
-@@ -2259,7 +2259,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2324,7 +2324,7 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
@@ -51601,7 +51613,7 @@ index c995255..7de0b49 100644
counter[CM_DREQ_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
-@@ -2273,7 +2273,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2338,7 +2338,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
@@ -51610,7 +51622,7 @@ index c995255..7de0b49 100644
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
-@@ -2336,12 +2336,13 @@ out:
+@@ -2401,12 +2401,13 @@ out:
}
int ib_send_cm_rej(struct ib_cm_id *cm_id,
@@ -51626,7 +51638,7 @@ index c995255..7de0b49 100644
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
-@@ -2640,7 +2641,7 @@ static int cm_mra_handler(struct cm_work *work)
+@@ -2705,7 +2706,7 @@ static int cm_mra_handler(struct cm_work *work)
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
@@ -51635,7 +51647,7 @@ index c995255..7de0b49 100644
counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
goto out;
-@@ -2649,7 +2650,7 @@ static int cm_mra_handler(struct cm_work *work)
+@@ -2714,7 +2715,7 @@ static int cm_mra_handler(struct cm_work *work)
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
@@ -51644,7 +51656,7 @@ index c995255..7de0b49 100644
counter[CM_MRA_COUNTER]);
/* fall through */
default:
-@@ -2811,7 +2812,7 @@ static int cm_lap_handler(struct cm_work *work)
+@@ -2877,7 +2878,7 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
@@ -51653,7 +51665,7 @@ index c995255..7de0b49 100644
counter[CM_LAP_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
-@@ -2827,7 +2828,7 @@ static int cm_lap_handler(struct cm_work *work)
+@@ -2893,7 +2894,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
@@ -51662,7 +51674,7 @@ index c995255..7de0b49 100644
counter[CM_LAP_COUNTER]);
goto unlock;
default:
-@@ -2859,7 +2860,7 @@ deref: cm_deref_id(cm_id_priv);
+@@ -2926,7 +2927,7 @@ deref: cm_deref_id(cm_id_priv);
static void cm_format_apr(struct cm_apr_msg *apr_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_apr_status status,
@@ -51671,7 +51683,7 @@ index c995255..7de0b49 100644
u8 info_length,
const void *private_data,
u8 private_data_len)
-@@ -2879,12 +2880,13 @@ static void cm_format_apr(struct cm_apr_msg *apr_msg,
+@@ -2946,12 +2947,13 @@ static void cm_format_apr(struct cm_apr_msg *apr_msg,
}
int ib_send_cm_apr(struct ib_cm_id *cm_id,
@@ -51687,7 +51699,7 @@ index c995255..7de0b49 100644
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
-@@ -3113,7 +3115,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
+@@ -3180,7 +3182,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
@@ -51696,7 +51708,7 @@ index c995255..7de0b49 100644
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
-@@ -3327,10 +3329,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+@@ -3394,10 +3396,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
msg->retries = 1;
@@ -51709,7 +51721,7 @@ index c995255..7de0b49 100644
&port->counter_group[CM_XMIT_RETRIES].
counter[attr_index]);
-@@ -3557,7 +3559,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+@@ -3633,7 +3635,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
@@ -51718,7 +51730,7 @@ index c995255..7de0b49 100644
counter[attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
-@@ -3764,7 +3766,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
+@@ -3840,7 +3842,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
return sprintf(buf, "%ld\n",
@@ -52805,7 +52817,7 @@ index 6abe1c6..f866a31 100644
struct qib_devdata *dd = pci_get_drvdata(pdev);
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
-index 22ba24f..194cc2b 100644
+index f724a7e..18ffc45 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -219,7 +219,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
@@ -52817,7 +52829,7 @@ index 22ba24f..194cc2b 100644
atomic_set(&qp->skb_out, 0);
}
-@@ -525,7 +525,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+@@ -526,7 +526,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
}
/* cleanup attributes */
@@ -66093,7 +66105,7 @@ index a2515887..6d13233 100644
/* we will have to manufacture ethernet headers, prepare template */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 1b5f531..3c16c42 100644
+index bf3fd34..43d918f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
@@ -68280,7 +68292,7 @@ index 6c2d6da..4660f39 100644
} else
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-index 4fdc3da..4f63dd9 100644
+index ea67ae9..89be094 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -258,7 +258,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -68301,7 +68313,7 @@ index 4fdc3da..4f63dd9 100644
aes_tx_sc->pn = cpu_to_le64(pn64);
} else {
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-@@ -1610,12 +1610,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
+@@ -1619,12 +1619,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_CCMP:
iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
sta, key);
@@ -103477,10 +103489,10 @@ index e04ec86..953c3e6 100644
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index ea31931..2e49089 100644
+index 7bd21aa..3288330 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
-@@ -1439,19 +1439,19 @@ struct ext4_sb_info {
+@@ -1440,19 +1440,19 @@ struct ext4_sb_info {
unsigned long s_mb_last_start;
/* stats for buddy allocator */
@@ -103683,7 +103695,7 @@ index cf68100..f96c5c0 100644
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (unlikely(err))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 3ec8708..f39299c 100644
+index ec89f50..01b055f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -989,10 +989,12 @@ static void init_once(void *foo)
@@ -105581,7 +105593,7 @@ index a94d2ed..80c8060 100644
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
-index 3988b43..c02080c 100644
+index a621dd9..1ac40c9 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -838,9 +838,9 @@ struct fuse_fill_data {
@@ -120331,10 +120343,10 @@ index fd6be45..6be6542 100644
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
-index 0000000..821601d
+index 0000000..307ca55
--- /dev/null
+++ b/grsecurity/Kconfig
-@@ -0,0 +1,1205 @@
+@@ -0,0 +1,1206 @@
+#
+# grecurity configuration
+#
@@ -120563,6 +120575,7 @@ index 0000000..821601d
+config GRKERNSEC_RANDSTRUCT
+ bool "Randomize layout of sensitive kernel structures"
+ default y if GRKERNSEC_CONFIG_AUTO
++ depends on GCC_PLUGINS
+ select GRKERNSEC_HIDESYM
+ select MODVERSIONS if MODULES
+ help
@@ -145065,7 +145078,7 @@ index 4ae3232..5adee02 100644
{
compat_uptr_t base = ptr_to_compat(entry);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 9530fcd..7f3a521 100644
+index 9d592c6..9f6bd5e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -972,7 +972,7 @@ static int irq_thread(void *data)
@@ -149664,7 +149677,7 @@ index dbafc5d..819bd5d 100644
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 84752c8..64513c9 100644
+index b1d7f1b..22e2e5b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -120,8 +120,9 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
@@ -149679,7 +149692,7 @@ index 84752c8..64513c9 100644
#endif
/*
-@@ -2480,13 +2481,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -2492,13 +2493,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -149700,7 +149713,7 @@ index 84752c8..64513c9 100644
}
/*
-@@ -4850,8 +4856,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4872,8 +4878,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -149711,7 +149724,7 @@ index 84752c8..64513c9 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
-@@ -5267,7 +5275,8 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+@@ -5289,7 +5297,8 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
#else
@@ -149721,7 +149734,7 @@ index 84752c8..64513c9 100644
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
-@@ -5690,8 +5699,12 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+@@ -5712,8 +5721,12 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
}
/* The callbacks that hook a function */
@@ -149736,7 +149749,7 @@ index 84752c8..64513c9 100644
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
-@@ -5724,7 +5737,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -5746,7 +5759,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
if (t->ret_stack == NULL) {
atomic_set(&t->tracing_graph_pause, 0);
@@ -149745,7 +149758,7 @@ index 84752c8..64513c9 100644
t->curr_ret_stack = -1;
/* Make sure the tasks see the -1 first: */
smp_wmb();
-@@ -5919,7 +5932,7 @@ void unregister_ftrace_graph(void)
+@@ -5941,7 +5954,7 @@ void unregister_ftrace_graph(void)
goto out;
ftrace_graph_active--;
@@ -149754,7 +149767,7 @@ index 84752c8..64513c9 100644
ftrace_graph_entry = ftrace_graph_entry_stub;
__ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
-@@ -5947,7 +5960,7 @@ static void
+@@ -5969,7 +5982,7 @@ static void
graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
{
atomic_set(&t->tracing_graph_pause, 0);
@@ -150267,10 +150280,10 @@ index 0bb9cf2..f319026 100644
key = event->type & (EVENT_HASHSIZE - 1);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
-index ad1d616..139606f 100644
+index ad1d616..da547c1 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
-@@ -167,6 +167,10 @@ static void format_mod_stop(void)
+@@ -167,13 +167,11 @@ static void format_mod_stop(void)
mutex_unlock(&btrace_mutex);
}
@@ -150279,9 +150292,16 @@ index ad1d616..139606f 100644
+};
+
#else /* !CONFIG_MODULES */
- __init static int
- module_trace_bprintk_format_notify(struct notifier_block *self,
-@@ -190,11 +194,6 @@ void trace_printk_control(bool enabled)
+-__init static int
+-module_trace_bprintk_format_notify(struct notifier_block *self,
+- unsigned long val, void *data)
+-{
+- return 0;
+-}
+ static inline const char **
+ find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
+ {
+@@ -190,11 +188,6 @@ void trace_printk_control(bool enabled)
trace_printk_enabled = enabled;
}
@@ -150293,7 +150313,7 @@ index ad1d616..139606f 100644
int __trace_bprintk(unsigned long ip, const char *fmt, ...)
{
int ret;
-@@ -373,9 +372,11 @@ static __init int init_trace_printk_function_export(void)
+@@ -373,9 +366,11 @@ static __init int init_trace_printk_function_export(void)
fs_initcall(init_trace_printk_function_export);
@@ -151524,6 +151544,25 @@ index 5a92189..d77978d 100644
retval = 1;
}
spin_unlock(&lockref->lock);
+diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
+index 5464c87..e24388a 100644
+--- a/lib/mpi/mpi-pow.c
++++ b/lib/mpi/mpi-pow.c
+@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
+ if (!esize) {
+ /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+ * depending on if MOD equals 1. */
+- rp[0] = 1;
+ res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
++ if (res->nlimbs) {
++ if (mpi_resize(res, 1) < 0)
++ goto enomem;
++ rp = res->d;
++ rp[0] = 1;
++ }
+ res->sign = 0;
+ goto leave;
+ }
diff --git a/lib/nlattr.c b/lib/nlattr.c
index fce1e9a..d44559b 100644
--- a/lib/nlattr.c
@@ -157167,7 +157206,7 @@ index bf262e4..c5bc390 100644
if (S_ISREG(inode->i_mode))
diff --git a/mm/usercopy.c b/mm/usercopy.c
-index 3c8da0a..5c81035 100644
+index 3c8da0a..62823b9 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -16,15 +16,9 @@
@@ -157250,11 +157289,11 @@ index 3c8da0a..5c81035 100644
unsigned long n)
{
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
++ unsigned long textlow = ktla_ktva((unsigned long)_stext);
+#ifdef CONFIG_MODULES
-+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
++ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
+#else
-+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
++ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
+#endif
+#else
unsigned long textlow = (unsigned long)_stext;
@@ -158857,10 +158896,10 @@ index 1108079..1871d16 100644
};
diff --git a/net/can/bcm.c b/net/can/bcm.c
-index 8e999ff..684a43e 100644
+index 8af9d25..44e9458 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
-@@ -1674,7 +1674,7 @@ static int __init bcm_module_init(void)
+@@ -1688,7 +1688,7 @@ static int __init bcm_module_init(void)
}
/* create /proc/net/can-bcm directory */
@@ -163504,6 +163543,292 @@ index 29c509c..c19322a 100644
if (!llc_proc_dir)
goto out;
+diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
+index 7663c28..a4e0d59 100644
+--- a/net/mac80211/aes_ccm.c
++++ b/net/mac80211/aes_ccm.c
+@@ -18,21 +18,24 @@
+ #include "key.h"
+ #include "aes_ccm.h"
+
+-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+- u8 *data, size_t data_len, u8 *mic,
+- size_t mic_len)
++int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
++ u8 *data, size_t data_len, u8 *mic,
++ size_t mic_len)
+ {
+ struct scatterlist sg[3];
++ struct aead_request *aead_req;
++ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
++ u8 *__aad;
+
+- char aead_req_data[sizeof(struct aead_request) +
+- crypto_aead_reqsize(tfm)]
+- __aligned(__alignof__(struct aead_request));
+- struct aead_request *aead_req = (void *) aead_req_data;
++ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
++ if (!aead_req)
++ return -ENOMEM;
+
+- memset(aead_req, 0, sizeof(aead_req_data));
++ __aad = (u8 *)aead_req + reqsize;
++ memcpy(__aad, aad, CCM_AAD_LEN);
+
+ sg_init_table(sg, 3);
+- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
++ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
+ sg_set_buf(&sg[1], data, data_len);
+ sg_set_buf(&sg[2], mic, mic_len);
+
+@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ aead_request_set_ad(aead_req, sg[0].length);
+
+ crypto_aead_encrypt(aead_req);
++ kzfree(aead_req);
++
++ return 0;
+ }
+
+ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ size_t mic_len)
+ {
+ struct scatterlist sg[3];
+- char aead_req_data[sizeof(struct aead_request) +
+- crypto_aead_reqsize(tfm)]
+- __aligned(__alignof__(struct aead_request));
+- struct aead_request *aead_req = (void *) aead_req_data;
++ struct aead_request *aead_req;
++ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
++ u8 *__aad;
++ int err;
+
+ if (data_len == 0)
+ return -EINVAL;
+
+- memset(aead_req, 0, sizeof(aead_req_data));
++ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
++ if (!aead_req)
++ return -ENOMEM;
++
++ __aad = (u8 *)aead_req + reqsize;
++ memcpy(__aad, aad, CCM_AAD_LEN);
+
+ sg_init_table(sg, 3);
+- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
++ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
+ sg_set_buf(&sg[1], data, data_len);
+ sg_set_buf(&sg[2], mic, mic_len);
+
+@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
+ aead_request_set_ad(aead_req, sg[0].length);
+
+- return crypto_aead_decrypt(aead_req);
++ err = crypto_aead_decrypt(aead_req);
++ kzfree(aead_req);
++
++ return err;
+ }
+
+ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
+index 6a73d1e..fcd3254 100644
+--- a/net/mac80211/aes_ccm.h
++++ b/net/mac80211/aes_ccm.h
+@@ -12,12 +12,14 @@
+
+ #include <linux/crypto.h>
+
++#define CCM_AAD_LEN 32
++
+ struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
+ size_t key_len,
+ size_t mic_len);
+-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+- u8 *data, size_t data_len, u8 *mic,
+- size_t mic_len);
++int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
++ u8 *data, size_t data_len, u8 *mic,
++ size_t mic_len);
+ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
+diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
+index 3afe361f..8a4397c 100644
+--- a/net/mac80211/aes_gcm.c
++++ b/net/mac80211/aes_gcm.c
+@@ -15,20 +15,23 @@
+ #include "key.h"
+ #include "aes_gcm.h"
+
+-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+- u8 *data, size_t data_len, u8 *mic)
++int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
++ u8 *data, size_t data_len, u8 *mic)
+ {
+ struct scatterlist sg[3];
++ struct aead_request *aead_req;
++ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
++ u8 *__aad;
+
+- char aead_req_data[sizeof(struct aead_request) +
+- crypto_aead_reqsize(tfm)]
+- __aligned(__alignof__(struct aead_request));
+- struct aead_request *aead_req = (void *)aead_req_data;
++ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
++ if (!aead_req)
++ return -ENOMEM;
+
+- memset(aead_req, 0, sizeof(aead_req_data));
++ __aad = (u8 *)aead_req + reqsize;
++ memcpy(__aad, aad, GCM_AAD_LEN);
+
+ sg_init_table(sg, 3);
+- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
++ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
+ sg_set_buf(&sg[1], data, data_len);
+ sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
+
+@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ aead_request_set_ad(aead_req, sg[0].length);
+
+ crypto_aead_encrypt(aead_req);
++ kzfree(aead_req);
++ return 0;
+ }
+
+ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
+ {
+ struct scatterlist sg[3];
+- char aead_req_data[sizeof(struct aead_request) +
+- crypto_aead_reqsize(tfm)]
+- __aligned(__alignof__(struct aead_request));
+- struct aead_request *aead_req = (void *)aead_req_data;
++ struct aead_request *aead_req;
++ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
++ u8 *__aad;
++ int err;
+
+ if (data_len == 0)
+ return -EINVAL;
+
+- memset(aead_req, 0, sizeof(aead_req_data));
++ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
++ if (!aead_req)
++ return -ENOMEM;
++
++ __aad = (u8 *)aead_req + reqsize;
++ memcpy(__aad, aad, GCM_AAD_LEN);
+
+ sg_init_table(sg, 3);
+- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
++ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
+ sg_set_buf(&sg[1], data, data_len);
+ sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
+
+@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ data_len + IEEE80211_GCMP_MIC_LEN, j_0);
+ aead_request_set_ad(aead_req, sg[0].length);
+
+- return crypto_aead_decrypt(aead_req);
++ err = crypto_aead_decrypt(aead_req);
++ kzfree(aead_req);
++
++ return err;
+ }
+
+ struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
+index 1347fda..55aed53 100644
+--- a/net/mac80211/aes_gcm.h
++++ b/net/mac80211/aes_gcm.h
+@@ -11,8 +11,10 @@
+
+ #include <linux/crypto.h>
+
+-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+- u8 *data, size_t data_len, u8 *mic);
++#define GCM_AAD_LEN 32
++
++int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
++ u8 *data, size_t data_len, u8 *mic);
+ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
+ struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
+diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
+index 3ddd927..bd72a86 100644
+--- a/net/mac80211/aes_gmac.c
++++ b/net/mac80211/aes_gmac.c
+@@ -17,28 +17,27 @@
+ #include "key.h"
+ #include "aes_gmac.h"
+
+-#define GMAC_MIC_LEN 16
+-#define GMAC_NONCE_LEN 12
+-#define AAD_LEN 20
+-
+ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+ const u8 *data, size_t data_len, u8 *mic)
+ {
+ struct scatterlist sg[4];
+- char aead_req_data[sizeof(struct aead_request) +
+- crypto_aead_reqsize(tfm)]
+- __aligned(__alignof__(struct aead_request));
+- struct aead_request *aead_req = (void *)aead_req_data;
+- u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
++ u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
++ struct aead_request *aead_req;
++ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+
+ if (data_len < GMAC_MIC_LEN)
+ return -EINVAL;
+
+- memset(aead_req, 0, sizeof(aead_req_data));
++ aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
++ if (!aead_req)
++ return -ENOMEM;
++
++ zero = (u8 *)aead_req + reqsize;
++ __aad = zero + GMAC_MIC_LEN;
++ memcpy(__aad, aad, GMAC_AAD_LEN);
+
+- memset(zero, 0, GMAC_MIC_LEN);
+ sg_init_table(sg, 4);
+- sg_set_buf(&sg[0], aad, AAD_LEN);
++ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
+ sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
+ sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
+ sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
+@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
+
+ aead_request_set_tfm(aead_req, tfm);
+ aead_request_set_crypt(aead_req, sg, sg, 0, iv);
+- aead_request_set_ad(aead_req, AAD_LEN + data_len);
++ aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
+
+ crypto_aead_encrypt(aead_req);
++ kzfree(aead_req);
+
+ return 0;
+ }
+diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
+index d328204..32e6442 100644
+--- a/net/mac80211/aes_gmac.h
++++ b/net/mac80211/aes_gmac.h
+@@ -11,6 +11,10 @@
+
+ #include <linux/crypto.h>
+
++#define GMAC_AAD_LEN 20
++#define GMAC_MIC_LEN 16
++#define GMAC_NONCE_LEN 12
++
+ struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+ size_t key_len);
+ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 543b1d4..bead45d 100644
--- a/net/mac80211/cfg.c
@@ -163822,7 +164147,7 @@ index 42bf0b6..8dcf0b2 100644
list_for_each_entry(sdata, &local->interfaces, list) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
-index b48c1e1..4c02b5b 100644
+index b48c1e1..36ee4c2 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -223,7 +223,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
@@ -163834,6 +164159,15 @@ index b48c1e1..4c02b5b 100644
pos = ieee80211_tkip_add_iv(pos, &key->conf, pn);
/* hwaccel - with software IV */
+@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+ u8 *pos;
+ u8 pn[6];
+ u64 pn64;
+- u8 aad[2 * AES_BLOCK_SIZE];
++ u8 aad[CCM_AAD_LEN];
+ u8 b_0[AES_BLOCK_SIZE];
+
+ if (info->control.hw_key &&
@@ -444,7 +444,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
@@ -163843,7 +164177,29 @@ index b48c1e1..4c02b5b 100644
pn[5] = pn64;
pn[4] = pn64 >> 8;
-@@ -679,7 +679,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
+
+ pos += IEEE80211_CCMP_HDR_LEN;
+ ccmp_special_blocks(skb, pn, b_0, aad);
+- ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
+- skb_put(skb, mic_len), mic_len);
+-
+- return 0;
++ return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
++ skb_put(skb, mic_len), mic_len);
+ }
+
+
+@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+ u8 *pos;
+ u8 pn[6];
+ u64 pn64;
+- u8 aad[2 * AES_BLOCK_SIZE];
++ u8 aad[GCM_AAD_LEN];
+ u8 j_0[AES_BLOCK_SIZE];
+
+ if (info->control.hw_key &&
+@@ -679,7 +677,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)pos;
pos += hdrlen;
@@ -163852,7 +164208,20 @@ index b48c1e1..4c02b5b 100644
pn[5] = pn64;
pn[4] = pn64 >> 8;
-@@ -957,7 +957,7 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
+@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
+
+ pos += IEEE80211_GCMP_HDR_LEN;
+ gcmp_special_blocks(skb, pn, j_0, aad);
+- ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+- skb_put(skb, IEEE80211_GCMP_MIC_LEN));
+-
+- return 0;
++ return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
++ skb_put(skb, IEEE80211_GCMP_MIC_LEN));
+ }
+
+ ieee80211_tx_result
+@@ -957,7 +953,7 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
@@ -163861,7 +164230,7 @@ index b48c1e1..4c02b5b 100644
bip_ipn_set64(mmie->sequence_number, pn64);
-@@ -1001,7 +1001,7 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
+@@ -1001,7 +997,7 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
@@ -163870,7 +164239,19 @@ index b48c1e1..4c02b5b 100644
bip_ipn_set64(mmie->sequence_number, pn64);
-@@ -1146,7 +1146,7 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
+ struct ieee80211_key *key = tx->key;
+ struct ieee80211_mmie_16 *mmie;
+ struct ieee80211_hdr *hdr;
+- u8 aad[20];
++ u8 aad[GMAC_AAD_LEN];
+ u64 pn64;
+- u8 nonce[12];
++ u8 nonce[GMAC_NONCE_LEN];
+
+ if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+ return TX_DROP;
+@@ -1146,7 +1142,7 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
@@ -163879,6 +164260,15 @@ index b48c1e1..4c02b5b 100644
bip_ipn_set64(mmie->sequence_number, pn64);
+@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_key *key = rx->key;
+ struct ieee80211_mmie_16 *mmie;
+- u8 aad[20], mic[16], ipn[6], nonce[12];
++ u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 7079cd3..c299f08 100644
--- a/net/mac802154/iface.c
@@ -166863,10 +167253,10 @@ index 3b95b19..914e482 100644
goto err;
return write_len - bc;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-index 924271c..e7a0ab3 100644
+index a55b8093a..c32fb3b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-@@ -1304,7 +1304,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+@@ -1310,7 +1310,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
spin_lock_bh(&xprt->sc_lock);
if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
spin_unlock_bh(&xprt->sc_lock);
@@ -174998,7 +175388,7 @@ index 0000000..36211fb
+e_*.h
diff --git a/scripts/gcc-plugins/size_overflow_plugin/Makefile b/scripts/gcc-plugins/size_overflow_plugin/Makefile
new file mode 100644
-index 0000000..62c26c9
+index 0000000..a8039b2
--- /dev/null
+++ b/scripts/gcc-plugins/size_overflow_plugin/Makefile
@@ -0,0 +1,22 @@
@@ -175015,8 +175405,8 @@ index 0000000..62c26c9
+
+define build_size_overflow_hash
+targets += $(addsuffix .h,$(1))
-+$(srctree)/$(src)/size_overflow_plugin_hash.c: $(addprefix $(objtree)/$(obj)/,$(addsuffix .h,$(1)))
-+$(addprefix $(objtree)/$(obj)/,$(addsuffix .h,$(1))): $(addprefix $(src)/,$(addsuffix .data,$(1)))
++$(srctree)/$(src)/size_overflow_plugin_hash.c: $(addprefix $(obj)/,$(addsuffix .h,$(1)))
++$(addprefix $(obj)/,$(addsuffix .h,$(1))): $(addprefix $(src)/,$(addsuffix .data,$(1)))
+ $$(call if_changed,build_size_overflow_hash)
+endef
+
diff --git a/4.8.10/4425_grsec_remove_EI_PAX.patch b/4.8.11/4425_grsec_remove_EI_PAX.patch
index 594598a..594598a 100644
--- a/4.8.10/4425_grsec_remove_EI_PAX.patch
+++ b/4.8.11/4425_grsec_remove_EI_PAX.patch
diff --git a/4.8.10/4427_force_XATTR_PAX_tmpfs.patch b/4.8.11/4427_force_XATTR_PAX_tmpfs.patch
index 2562d2f..2562d2f 100644
--- a/4.8.10/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.8.11/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.8.10/4430_grsec-remove-localversion-grsec.patch b/4.8.11/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.8.10/4430_grsec-remove-localversion-grsec.patch
+++ b/4.8.11/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.8.10/4435_grsec-mute-warnings.patch b/4.8.11/4435_grsec-mute-warnings.patch
index 8929222..8929222 100644
--- a/4.8.10/4435_grsec-mute-warnings.patch
+++ b/4.8.11/4435_grsec-mute-warnings.patch
diff --git a/4.8.10/4440_grsec-remove-protected-paths.patch b/4.8.11/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.8.10/4440_grsec-remove-protected-paths.patch
+++ b/4.8.11/4440_grsec-remove-protected-paths.patch
diff --git a/4.8.10/4450_grsec-kconfig-default-gids.patch b/4.8.11/4450_grsec-kconfig-default-gids.patch
index 6fd0511..6fd0511 100644
--- a/4.8.10/4450_grsec-kconfig-default-gids.patch
+++ b/4.8.11/4450_grsec-kconfig-default-gids.patch
diff --git a/4.8.10/4465_selinux-avc_audit-log-curr_ip.patch b/4.8.11/4465_selinux-avc_audit-log-curr_ip.patch
index 7248385..7248385 100644
--- a/4.8.10/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.8.11/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.8.10/4470_disable-compat_vdso.patch b/4.8.11/4470_disable-compat_vdso.patch
index 1e4b84a..1e4b84a 100644
--- a/4.8.10/4470_disable-compat_vdso.patch
+++ b/4.8.11/4470_disable-compat_vdso.patch
diff --git a/4.8.10/4475_emutramp_default_on.patch b/4.8.11/4475_emutramp_default_on.patch
index 7b468ee..7b468ee 100644
--- a/4.8.10/4475_emutramp_default_on.patch
+++ b/4.8.11/4475_emutramp_default_on.patch