summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.8.12/1010_linux-4.8.11.patch2351
-rw-r--r--4.8.12/1011_linux-4.8.12.patch1563
-rw-r--r--4.8.13/0000_README (renamed from 4.8.12/0000_README)14
-rw-r--r--4.8.13/1012_linux-4.8.13.patch1063
-rw-r--r--4.8.13/4420_grsecurity-3.1-4.8.13-201612082118.patch (renamed from 4.8.12/4420_grsecurity-3.1-4.8.12-201612062306.patch)143
-rw-r--r--4.8.13/4425_grsec_remove_EI_PAX.patch (renamed from 4.8.12/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.8.13/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.8.12/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.8.13/4430_grsec-remove-localversion-grsec.patch (renamed from 4.8.12/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.8.13/4435_grsec-mute-warnings.patch (renamed from 4.8.12/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.8.13/4440_grsec-remove-protected-paths.patch (renamed from 4.8.12/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.8.13/4450_grsec-kconfig-default-gids.patch (renamed from 4.8.12/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.8.13/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.8.12/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.8.13/4470_disable-compat_vdso.patch (renamed from 4.8.12/4470_disable-compat_vdso.patch)0
-rw-r--r--4.8.13/4475_emutramp_default_on.patch (renamed from 4.8.12/4475_emutramp_default_on.patch)0
14 files changed, 1147 insertions, 3987 deletions
diff --git a/4.8.12/1010_linux-4.8.11.patch b/4.8.12/1010_linux-4.8.11.patch
deleted file mode 100644
index 5c67d71..0000000
--- a/4.8.12/1010_linux-4.8.11.patch
+++ /dev/null
@@ -1,2351 +0,0 @@
-diff --git a/Makefile b/Makefile
-index 7cf2b49..2b1bcba 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 8
--SUBLEVEL = 10
-+SUBLEVEL = 11
- EXTRAVERSION =
- NAME = Psychotic Stoned Sheep
-
-@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
- -fno-strict-aliasing -fno-common \
- -Werror-implicit-function-declaration \
- -Wno-format-security \
-- -std=gnu89
-+ -std=gnu89 $(call cc-option,-fno-PIE)
-+
-
- KBUILD_AFLAGS_KERNEL :=
- KBUILD_CFLAGS_KERNEL :=
--KBUILD_AFLAGS := -D__ASSEMBLY__
-+KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
- KBUILD_AFLAGS_MODULE := -DMODULE
- KBUILD_CFLAGS_MODULE := -DMODULE
- KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
-@@ -621,6 +622,7 @@ include arch/$(SRCARCH)/Makefile
-
- KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
- KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
-+KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
-
- ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
- KBUILD_CFLAGS += -Os
-diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
-index dec4b07..3799396 100644
---- a/arch/arm/boot/dts/imx53-qsb.dts
-+++ b/arch/arm/boot/dts/imx53-qsb.dts
-@@ -64,8 +64,8 @@
- };
-
- ldo3_reg: ldo3 {
-- regulator-min-microvolt = <600000>;
-- regulator-max-microvolt = <1800000>;
-+ regulator-min-microvolt = <1725000>;
-+ regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
-@@ -76,8 +76,8 @@
- };
-
- ldo5_reg: ldo5 {
-- regulator-min-microvolt = <1725000>;
-- regulator-max-microvolt = <3300000>;
-+ regulator-min-microvolt = <1200000>;
-+ regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
-@@ -100,14 +100,14 @@
- };
-
- ldo9_reg: ldo9 {
-- regulator-min-microvolt = <1200000>;
-+ regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
-
- ldo10_reg: ldo10 {
-- regulator-min-microvolt = <1250000>;
-- regulator-max-microvolt = <3650000>;
-+ regulator-min-microvolt = <1200000>;
-+ regulator-max-microvolt = <3600000>;
- regulator-always-on;
- };
- };
-diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
-index 2065f46..38b6a2b 100644
---- a/arch/arm64/include/asm/perf_event.h
-+++ b/arch/arm64/include/asm/perf_event.h
-@@ -46,7 +46,15 @@
- #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
- #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
-
--#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
-+/*
-+ * PMUv3 event types: required events
-+ */
-+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
-+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
-+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
-+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
-+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
-+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
-
- /*
- * Event filters for PMUv3
-diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
-index 838ccf1..2c4df15 100644
---- a/arch/arm64/kernel/perf_event.c
-+++ b/arch/arm64/kernel/perf_event.c
-@@ -30,17 +30,9 @@
-
- /*
- * ARMv8 PMUv3 Performance Events handling code.
-- * Common event types.
-+ * Common event types (some are defined in asm/perf_event.h).
- */
-
--/* Required events. */
--#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
--#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
--#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
--#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
--#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
--#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
--
- /* At least one of the following is required. */
- #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
- #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
-diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
-index e51367d..31c144f 100644
---- a/arch/arm64/kvm/sys_regs.c
-+++ b/arch/arm64/kvm/sys_regs.c
-@@ -602,8 +602,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
-
- idx = ARMV8_PMU_CYCLE_IDX;
- } else {
-- BUG();
-+ return false;
- }
-+ } else if (r->CRn == 0 && r->CRm == 9) {
-+ /* PMCCNTR */
-+ if (pmu_access_event_counter_el0_disabled(vcpu))
-+ return false;
-+
-+ idx = ARMV8_PMU_CYCLE_IDX;
- } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
- /* PMEVCNTRn_EL0 */
- if (pmu_access_event_counter_el0_disabled(vcpu))
-@@ -611,7 +617,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
-
- idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
- } else {
-- BUG();
-+ return false;
- }
-
- if (!pmu_counter_idx_valid(vcpu, idx))
-diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
-index 7ac8e6e..8d586cf 100644
---- a/arch/powerpc/kernel/setup_64.c
-+++ b/arch/powerpc/kernel/setup_64.c
-@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
- if (firmware_has_feature(FW_FEATURE_OPAL))
- opal_configure_cores();
-
-- /* Enable AIL if supported, and we are in hypervisor mode */
-- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
-- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
-- unsigned long lpcr = mfspr(SPRN_LPCR);
-- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
-- }
-+ /* AIL on native is done in cpu_ready_for_interrupts() */
- }
- }
-
- static void cpu_ready_for_interrupts(void)
- {
-+ /*
-+ * Enable AIL if supported, and we are in hypervisor mode. This
-+ * is called once for every processor.
-+ *
-+ * If we are not in hypervisor mode the job is done once for
-+ * the whole partition in configure_exceptions().
-+ */
-+ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
-+ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
-+ unsigned long lpcr = mfspr(SPRN_LPCR);
-+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
-+ }
-+
- /* Set IR and DR in PACA MSR */
- get_paca()->kernel_msr = MSR_KERNEL;
- }
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index b81fe2d..1e81a37 100644
---- a/arch/x86/kernel/cpu/amd.c
-+++ b/arch/x86/kernel/cpu/amd.c
-@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
- #ifdef CONFIG_SMP
- unsigned bits;
- int cpu = smp_processor_id();
-- unsigned int socket_id, core_complex_id;
-
- bits = c->x86_coreid_bits;
- /* Low order bits define the core id (index of core in socket) */
-@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
- if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
- return;
-
-- socket_id = (c->apicid >> bits) - 1;
-- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
--
-- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
-+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
- #endif
- }
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 46f74d4..2fff657 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
- struct kvm_shared_msrs *locals
- = container_of(urn, struct kvm_shared_msrs, urn);
- struct kvm_shared_msr_values *values;
-+ unsigned long flags;
-
-+ /*
-+ * Disabling irqs at this point since the following code could be
-+ * interrupted and executed through kvm_arch_hardware_disable()
-+ */
-+ local_irq_save(flags);
-+ if (locals->registered) {
-+ locals->registered = false;
-+ user_return_notifier_unregister(urn);
-+ }
-+ local_irq_restore(flags);
- for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
- values = &locals->values[slot];
- if (values->host != values->curr) {
-@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
- values->curr = values->host;
- }
- }
-- locals->registered = false;
-- user_return_notifier_unregister(urn);
- }
-
- static void shared_msr_update(unsigned slot, u32 msr)
-@@ -3372,6 +3381,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
- };
- case KVM_SET_VAPIC_ADDR: {
- struct kvm_vapic_addr va;
-+ int idx;
-
- r = -EINVAL;
- if (!lapic_in_kernel(vcpu))
-@@ -3379,7 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
- r = -EFAULT;
- if (copy_from_user(&va, argp, sizeof va))
- goto out;
-+ idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
-+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
- break;
- }
- case KVM_X86_SETUP_MCE: {
-diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
-index ac58c16..555b9fa 100644
---- a/arch/x86/purgatory/Makefile
-+++ b/arch/x86/purgatory/Makefile
-@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
-
- KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
- KBUILD_CFLAGS += -m$(BITS)
-+KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
-
- $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
- $(call if_changed,ld)
-diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
-index e44944f..2932a5b 100644
---- a/drivers/base/power/main.c
-+++ b/drivers/base/power/main.c
-@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
- TRACE_DEVICE(dev);
- TRACE_SUSPEND(0);
-
-+ dpm_wait_for_children(dev, async);
-+
- if (async_error)
- goto Complete;
-
-@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
- if (dev->power.syscore || dev->power.direct_complete)
- goto Complete;
-
-- dpm_wait_for_children(dev, async);
--
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
-@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
-
- __pm_runtime_disable(dev, false);
-
-+ dpm_wait_for_children(dev, async);
-+
- if (async_error)
- goto Complete;
-
-@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
- if (dev->power.syscore || dev->power.direct_complete)
- goto Complete;
-
-- dpm_wait_for_children(dev, async);
--
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
-diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
-index 19f9b62..7a6acc3 100644
---- a/drivers/clk/imx/clk-pllv3.c
-+++ b/drivers/clk/imx/clk-pllv3.c
-@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
- temp64 *= mfn;
- do_div(temp64, mfd);
-
-- return (parent_rate * div) + (u32)temp64;
-+ return parent_rate * div + (unsigned long)temp64;
- }
-
- static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
-@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
- do_div(temp64, parent_rate);
- mfn = temp64;
-
-- return parent_rate * div + parent_rate * mfn / mfd;
-+ temp64 = (u64)parent_rate;
-+ temp64 *= mfn;
-+ do_div(temp64, mfd);
-+
-+ return parent_rate * div + (unsigned long)temp64;
- }
-
- static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
-diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
-index 3a51fff..9adaf48 100644
---- a/drivers/clk/mmp/clk-of-mmp2.c
-+++ b/drivers/clk/mmp/clk-of-mmp2.c
-@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
- }
-
- pxa_unit->apmu_base = of_iomap(np, 1);
-- if (!pxa_unit->mpmu_base) {
-+ if (!pxa_unit->apmu_base) {
- pr_err("failed to map apmu registers\n");
- return;
- }
-diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
-index 87f2317..f110c02 100644
---- a/drivers/clk/mmp/clk-of-pxa168.c
-+++ b/drivers/clk/mmp/clk-of-pxa168.c
-@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
- }
-
- pxa_unit->apmu_base = of_iomap(np, 1);
-- if (!pxa_unit->mpmu_base) {
-+ if (!pxa_unit->apmu_base) {
- pr_err("failed to map apmu registers\n");
- return;
- }
-diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
-index e22a67f..64d1ef4 100644
---- a/drivers/clk/mmp/clk-of-pxa910.c
-+++ b/drivers/clk/mmp/clk-of-pxa910.c
-@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
- }
-
- pxa_unit->apmu_base = of_iomap(np, 1);
-- if (!pxa_unit->mpmu_base) {
-+ if (!pxa_unit->apmu_base) {
- pr_err("failed to map apmu registers\n");
- return;
- }
-@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
- }
-
- pxa_unit->apbcp_base = of_iomap(np, 3);
-- if (!pxa_unit->mpmu_base) {
-+ if (!pxa_unit->apbcp_base) {
- pr_err("failed to map apbcp registers\n");
- return;
- }
-diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
-index b304421..2cde379 100644
---- a/drivers/crypto/caam/caamalg.c
-+++ b/drivers/crypto/caam/caamalg.c
-@@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
- if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
- continue;
-
-+ /*
-+ * Check support for AES modes not available
-+ * on LP devices.
-+ */
-+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
-+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_XTS)
-+ continue;
-+
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
-diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
-index 02f2a56..47d08b9 100644
---- a/drivers/gpio/gpio-pca953x.c
-+++ b/drivers/gpio/gpio-pca953x.c
-@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- break;
- }
-
-- memcpy(reg_val, chip->reg_output, NBANK(chip));
- mutex_lock(&chip->i2c_lock);
-+ memcpy(reg_val, chip->reg_output, NBANK(chip));
- for(bank=0; bank<NBANK(chip); bank++) {
- unsigned bankmask = mask[bank / sizeof(*mask)] >>
- ((bank % sizeof(*mask)) * 8);
- if(bankmask) {
- unsigned bankval = bits[bank / sizeof(*bits)] >>
- ((bank % sizeof(*bits)) * 8);
-+ bankval &= bankmask;
- reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
- }
- }
-diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index b2dee10..15704aa 100644
---- a/drivers/gpio/gpiolib.c
-+++ b/drivers/gpio/gpiolib.c
-@@ -2667,8 +2667,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
-- /* Flush direction if something changed behind our back */
-- if (chip->get_direction) {
-+ /*
-+ * If it's fast: flush the direction setting if something changed
-+ * behind our back
-+ */
-+ if (!chip->can_sleep && chip->get_direction) {
- int dir = chip->get_direction(chip, offset);
-
- if (dir)
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-index 700c56b..e443073 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-@@ -492,6 +492,7 @@ struct amdgpu_bo {
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
-+ unsigned prime_shared_count;
- /* list of all virtual address to which this bo
- * is associated to
- */
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-index 651115d..c02db01f6 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
-@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- entry->priority = min(info[i].bo_priority,
- AMDGPU_BO_LIST_MAX_PRIORITY);
- entry->tv.bo = &entry->robj->tbo;
-- entry->tv.shared = true;
-+ entry->tv.shared = !entry->robj->prime_shared_count;
-
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
-index 7700dc2..3826d5a 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
-@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- if (ret)
- return ERR_PTR(ret);
-
-+ bo->prime_shared_count = 1;
- return &bo->gem_base;
- }
-
- int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
- {
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-- int ret = 0;
-+ long ret = 0;
-
- ret = amdgpu_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
-+ /*
-+ * Wait for all shared fences to complete before we switch to future
-+ * use of exclusive fence on this prime shared bo.
-+ */
-+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
-+ MAX_SCHEDULE_TIMEOUT);
-+ if (unlikely(ret < 0)) {
-+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
-+ amdgpu_bo_unreserve(bo);
-+ return ret;
-+ }
-+
- /* pin buffer into GTT */
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
-+ if (likely(ret == 0))
-+ bo->prime_shared_count++;
-+
- amdgpu_bo_unreserve(bo);
- return ret;
- }
-@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
- return;
-
- amdgpu_bo_unpin(bo);
-+ if (bo->prime_shared_count)
-+ bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
- }
-
-diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
-index 1f8af87..cf25607 100644
---- a/drivers/gpu/drm/i915/intel_bios.c
-+++ b/drivers/gpu/drm/i915/intel_bios.c
-@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- if (!child)
- return;
-
-- aux_channel = child->raw[25];
-+ aux_channel = child->common.aux_channel;
- ddc_pin = child->common.ddc_pin;
-
- is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
-@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
- return false;
- }
-
--bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
-+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
-+ enum port port)
- {
- static const struct {
- u16 dp, hdmi;
-@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
- [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
- [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
- };
-- int i;
-
- if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
- return false;
-
-- if (!dev_priv->vbt.child_dev_num)
-+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
-+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
- return false;
-
-+ if (p_child->common.dvo_port == port_mapping[port].dp)
-+ return true;
-+
-+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
-+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
-+ p_child->common.aux_channel != 0)
-+ return true;
-+
-+ return false;
-+}
-+
-+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
-+ enum port port)
-+{
-+ int i;
-+
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
-
-- if ((p_child->common.dvo_port == port_mapping[port].dp ||
-- p_child->common.dvo_port == port_mapping[port].hdmi) &&
-- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
-- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
-+ if (child_dev_is_dp_dual_mode(p_child, port))
- return true;
- }
-
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index 3051182..b8aeb28 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -4323,21 +4323,11 @@ static enum drm_connector_status
- intel_dp_detect(struct drm_connector *connector, bool force)
- {
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- enum drm_connector_status status = connector->status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
-- if (intel_dp->is_mst) {
-- /* MST devices are disconnected from a monitor POV */
-- intel_dp_unset_edid(intel_dp);
-- if (intel_encoder->type != INTEL_OUTPUT_EDP)
-- intel_encoder->type = INTEL_OUTPUT_DP;
-- return connector_status_disconnected;
-- }
--
- /* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done)
- status = intel_dp_long_pulse(intel_dp->attached_connector);
-diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
-index 68db962..8886cab1 100644
---- a/drivers/gpu/drm/i915/intel_vbt_defs.h
-+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
-@@ -280,7 +280,8 @@ struct common_child_dev_config {
- u8 dp_support:1;
- u8 tmds_support:1;
- u8 support_reserved:5;
-- u8 not_common3[12];
-+ u8 aux_channel;
-+ u8 not_common3[11];
- u8 iboost_level;
- } __packed;
-
-diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
-index d223650..11edabf 100644
---- a/drivers/i2c/Kconfig
-+++ b/drivers/i2c/Kconfig
-@@ -59,7 +59,6 @@ config I2C_CHARDEV
-
- config I2C_MUX
- tristate "I2C bus multiplexing support"
-- depends on HAS_IOMEM
- help
- Say Y here if you want the I2C core to support the ability to
- handle multiplexed I2C bus topologies, by presenting each
-diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
-index e280c8e..96de9ce 100644
---- a/drivers/i2c/muxes/Kconfig
-+++ b/drivers/i2c/muxes/Kconfig
-@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
-
- config I2C_MUX_REG
- tristate "Register-based I2C multiplexer"
-+ depends on HAS_IOMEM
- help
- If you say yes to this option, support will be included for a
- register based I2C multiplexer. This driver provides access to
-diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
-index 3278ebf..7e6f300 100644
---- a/drivers/i2c/muxes/i2c-mux-pca954x.c
-+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
-@@ -247,9 +247,9 @@ static int pca954x_probe(struct i2c_client *client,
- /* discard unconfigured channels */
- break;
- idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
-- data->deselect |= (idle_disconnect_pd
-- || idle_disconnect_dt) << num;
- }
-+ data->deselect |= (idle_disconnect_pd ||
-+ idle_disconnect_dt) << num;
-
- ret = i2c_mux_add_adapter(muxc, force, num, class);
-
-diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index c995255..71c7c4c 100644
---- a/drivers/infiniband/core/cm.c
-+++ b/drivers/infiniband/core/cm.c
-@@ -80,6 +80,8 @@ static struct ib_cm {
- __be32 random_id_operand;
- struct list_head timewait_list;
- struct workqueue_struct *wq;
-+ /* Sync on cm change port state */
-+ spinlock_t state_lock;
- } cm;
-
- /* Counter indexes ordered by attribute ID */
-@@ -161,6 +163,8 @@ struct cm_port {
- struct ib_mad_agent *mad_agent;
- struct kobject port_obj;
- u8 port_num;
-+ struct list_head cm_priv_prim_list;
-+ struct list_head cm_priv_altr_list;
- struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
- };
-
-@@ -241,6 +245,12 @@ struct cm_id_private {
- u8 service_timeout;
- u8 target_ack_delay;
-
-+ struct list_head prim_list;
-+ struct list_head altr_list;
-+ /* Indicates that the send port mad is registered and av is set */
-+ int prim_send_port_not_ready;
-+ int altr_send_port_not_ready;
-+
- struct list_head work_list;
- atomic_t work_count;
- };
-@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
- struct ib_mad_agent *mad_agent;
- struct ib_mad_send_buf *m;
- struct ib_ah *ah;
-+ struct cm_av *av;
-+ unsigned long flags, flags2;
-+ int ret = 0;
-
-+ /* don't let the port to be released till the agent is down */
-+ spin_lock_irqsave(&cm.state_lock, flags2);
-+ spin_lock_irqsave(&cm.lock, flags);
-+ if (!cm_id_priv->prim_send_port_not_ready)
-+ av = &cm_id_priv->av;
-+ else if (!cm_id_priv->altr_send_port_not_ready &&
-+ (cm_id_priv->alt_av.port))
-+ av = &cm_id_priv->alt_av;
-+ else {
-+ pr_info("%s: not valid CM id\n", __func__);
-+ ret = -ENODEV;
-+ spin_unlock_irqrestore(&cm.lock, flags);
-+ goto out;
-+ }
-+ spin_unlock_irqrestore(&cm.lock, flags);
-+ /* Make sure the port haven't released the mad yet */
- mad_agent = cm_id_priv->av.port->mad_agent;
-- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
-- if (IS_ERR(ah))
-- return PTR_ERR(ah);
-+ if (!mad_agent) {
-+ pr_info("%s: not a valid MAD agent\n", __func__);
-+ ret = -ENODEV;
-+ goto out;
-+ }
-+ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
-+ if (IS_ERR(ah)) {
-+ ret = PTR_ERR(ah);
-+ goto out;
-+ }
-
- m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
-- cm_id_priv->av.pkey_index,
-+ av->pkey_index,
- 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(m)) {
- ib_destroy_ah(ah);
-- return PTR_ERR(m);
-+ ret = PTR_ERR(m);
-+ goto out;
- }
-
- /* Timeout set by caller if response is expected. */
-@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
- atomic_inc(&cm_id_priv->refcount);
- m->context[0] = cm_id_priv;
- *msg = m;
-- return 0;
-+
-+out:
-+ spin_unlock_irqrestore(&cm.state_lock, flags2);
-+ return ret;
- }
-
- static int cm_alloc_response_msg(struct cm_port *port,
-@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- grh, &av->ah_attr);
- }
-
--static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
-+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
-+ struct cm_id_private *cm_id_priv)
- {
- struct cm_device *cm_dev;
- struct cm_port *port = NULL;
-@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
- &av->ah_attr);
- av->timeout = path->packet_life_time + 1;
-
-- return 0;
-+ spin_lock_irqsave(&cm.lock, flags);
-+ if (&cm_id_priv->av == av)
-+ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
-+ else if (&cm_id_priv->alt_av == av)
-+ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
-+ else
-+ ret = -EINVAL;
-+
-+ spin_unlock_irqrestore(&cm.lock, flags);
-+
-+ return ret;
- }
-
- static int cm_alloc_id(struct cm_id_private *cm_id_priv)
-@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
- spin_lock_init(&cm_id_priv->lock);
- init_completion(&cm_id_priv->comp);
- INIT_LIST_HEAD(&cm_id_priv->work_list);
-+ INIT_LIST_HEAD(&cm_id_priv->prim_list);
-+ INIT_LIST_HEAD(&cm_id_priv->altr_list);
- atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
- return &cm_id_priv->id;
-@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
- break;
- }
-
-+ spin_lock_irq(&cm.lock);
-+ if (!list_empty(&cm_id_priv->altr_list) &&
-+ (!cm_id_priv->altr_send_port_not_ready))
-+ list_del(&cm_id_priv->altr_list);
-+ if (!list_empty(&cm_id_priv->prim_list) &&
-+ (!cm_id_priv->prim_send_port_not_ready))
-+ list_del(&cm_id_priv->prim_list);
-+ spin_unlock_irq(&cm.lock);
-+
- cm_free_id(cm_id->local_id);
- cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
-@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
- goto out;
- }
-
-- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
-+ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
-+ cm_id_priv);
- if (ret)
- goto error1;
- if (param->alternate_path) {
- ret = cm_init_av_by_path(param->alternate_path,
-- &cm_id_priv->alt_av);
-+ &cm_id_priv->alt_av, cm_id_priv);
- if (ret)
- goto error1;
- }
-@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
- dev_put(gid_attr.ndev);
- }
- work->path[0].gid_type = gid_attr.gid_type;
-- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
-+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
-+ cm_id_priv);
- }
- if (ret) {
- int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
-@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
- goto rejected;
- }
- if (req_msg->alt_local_lid) {
-- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
-+ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
-+ cm_id_priv);
- if (ret) {
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
- &work->path[0].sgid,
-@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
- goto out;
- }
-
-- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
-+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
-+ cm_id_priv);
- if (ret)
- goto out;
- cm_id_priv->alt_av.timeout =
-@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
-- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
-+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
-+ cm_id_priv);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
-@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
- return -EINVAL;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
-+ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
- if (ret)
- goto out;
-
-@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
- static int cm_migrate(struct ib_cm_id *cm_id)
- {
- struct cm_id_private *cm_id_priv;
-+ struct cm_av tmp_av;
- unsigned long flags;
-+ int tmp_send_port_not_ready;
- int ret = 0;
-
- cm_id_priv = container_of(cm_id, struct cm_id_private, id);
-@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
- (cm_id->lap_state == IB_CM_LAP_UNINIT ||
- cm_id->lap_state == IB_CM_LAP_IDLE)) {
- cm_id->lap_state = IB_CM_LAP_IDLE;
-+ /* Swap address vector */
-+ tmp_av = cm_id_priv->av;
- cm_id_priv->av = cm_id_priv->alt_av;
-+ cm_id_priv->alt_av = tmp_av;
-+ /* Swap port send ready state */
-+ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
-+ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
-+ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
- } else
- ret = -EINVAL;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
- port->cm_dev = cm_dev;
- port->port_num = i;
-
-+ INIT_LIST_HEAD(&port->cm_priv_prim_list);
-+ INIT_LIST_HEAD(&port->cm_priv_altr_list);
-+
- ret = cm_create_port_fs(port);
- if (ret)
- goto error1;
-@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
- {
- struct cm_device *cm_dev = client_data;
- struct cm_port *port;
-+ struct cm_id_private *cm_id_priv;
-+ struct ib_mad_agent *cur_mad_agent;
- struct ib_port_modify port_modify = {
- .clr_port_cap_mask = IB_PORT_CM_SUP
- };
-@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
-
- port = cm_dev->port[i-1];
- ib_modify_port(ib_device, port->port_num, 0, &port_modify);
-+ /* Mark all the cm_id's as not valid */
-+ spin_lock_irq(&cm.lock);
-+ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
-+ cm_id_priv->altr_send_port_not_ready = 1;
-+ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
-+ cm_id_priv->prim_send_port_not_ready = 1;
-+ spin_unlock_irq(&cm.lock);
- /*
- * We flush the queue here after the going_down set, this
- * verify that no new works will be queued in the recv handler,
- * after that we can call the unregister_mad_agent
- */
- flush_workqueue(cm.wq);
-- ib_unregister_mad_agent(port->mad_agent);
-+ spin_lock_irq(&cm.state_lock);
-+ cur_mad_agent = port->mad_agent;
-+ port->mad_agent = NULL;
-+ spin_unlock_irq(&cm.state_lock);
-+ ib_unregister_mad_agent(cur_mad_agent);
- cm_remove_port_fs(port);
- }
-+
- device_unregister(cm_dev->device);
- kfree(cm_dev);
- }
-@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
- INIT_LIST_HEAD(&cm.device_list);
- rwlock_init(&cm.device_lock);
- spin_lock_init(&cm.lock);
-+ spin_lock_init(&cm.state_lock);
- cm.listen_service_table = RB_ROOT;
- cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
- cm.remote_id_table = RB_ROOT;
-diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
-index c68746c..bdab61d 100644
---- a/drivers/infiniband/core/umem.c
-+++ b/drivers/infiniband/core/umem.c
-@@ -174,7 +174,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-
- cur_base = addr & PAGE_MASK;
-
-- if (npages == 0) {
-+ if (npages == 0 || npages > UINT_MAX) {
- ret = -EINVAL;
- goto out;
- }
-diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
-index 0012fa5..44b1104 100644
---- a/drivers/infiniband/core/uverbs_main.c
-+++ b/drivers/infiniband/core/uverbs_main.c
-@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
- container_of(uobj, struct ib_uqp_object, uevent.uobject);
-
- idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
-- if (qp != qp->real_qp) {
-- ib_close_qp(qp);
-- } else {
-+ if (qp == qp->real_qp)
- ib_uverbs_detach_umcast(qp, uqp);
-- ib_destroy_qp(qp);
-- }
-+ ib_destroy_qp(qp);
- ib_uverbs_release_uevent(file, &uqp->uevent);
- kfree(uqp);
- }
-diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
-index bcf76c3..e362998 100644
---- a/drivers/infiniband/hw/hfi1/rc.c
-+++ b/drivers/infiniband/hw/hfi1/rc.c
-@@ -87,7 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
- struct hfi1_qp_priv *priv = qp->priv;
-
- qp->s_flags |= RVT_S_WAIT_RNR;
-- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
-+ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
- add_timer(&priv->s_rnr_timer);
- }
-
-diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
-index 1694037..8f59a4f 100644
---- a/drivers/infiniband/hw/hfi1/user_sdma.c
-+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
-@@ -1152,7 +1152,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
- rb_node = hfi1_mmu_rb_extract(pq->handler,
- (unsigned long)iovec->iov.iov_base,
- iovec->iov.iov_len);
-- if (rb_node && !IS_ERR(rb_node))
-+ if (rb_node)
- node = container_of(rb_node, struct sdma_mmu_node, rb);
- else
- rb_node = NULL;
-diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
-index 5fc6233..b9bf075 100644
---- a/drivers/infiniband/hw/mlx4/ah.c
-+++ b/drivers/infiniband/hw/mlx4/ah.c
-@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
- if (vlan_tag < 0x1000)
- vlan_tag |= (ah_attr->sl & 7) << 13;
- ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
-- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
-+ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
-+ if (ret < 0)
-+ return ERR_PTR(ret);
-+ ah->av.eth.gid_index = ret;
- ah->av.eth.vlan = cpu_to_be16(vlan_tag);
- ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
- if (ah_attr->static_rate) {
-diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
-index 5df63da..efb6414 100644
---- a/drivers/infiniband/hw/mlx4/cq.c
-+++ b/drivers/infiniband/hw/mlx4/cq.c
-@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
- if (context)
- if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
- err = -EFAULT;
-- goto err_dbmap;
-+ goto err_cq_free;
- }
-
- return &cq->ibcq;
-
-+err_cq_free:
-+ mlx4_cq_free(dev->dev, &cq->mcq);
-+
- err_dbmap:
- if (context)
- mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
-diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
-index e4fac929..ebe43cb 100644
---- a/drivers/infiniband/hw/mlx5/cq.c
-+++ b/drivers/infiniband/hw/mlx5/cq.c
-@@ -917,8 +917,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
- if (err)
- goto err_create;
- } else {
-- /* for now choose 64 bytes till we have a proper interface */
-- cqe_size = 64;
-+ cqe_size = cache_line_size() == 128 ? 128 : 64;
- err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
- &index, &inlen);
- if (err)
-diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
-index bff8707a..19f8820 100644
---- a/drivers/infiniband/hw/mlx5/main.c
-+++ b/drivers/infiniband/hw/mlx5/main.c
-@@ -2100,14 +2100,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- {
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
- struct ib_event ibev;
--
-+ bool fatal = false;
- u8 port = 0;
-
- switch (event) {
- case MLX5_DEV_EVENT_SYS_ERROR:
-- ibdev->ib_active = false;
- ibev.event = IB_EVENT_DEVICE_FATAL;
- mlx5_ib_handle_internal_error(ibdev);
-+ fatal = true;
- break;
-
- case MLX5_DEV_EVENT_PORT_UP:
-@@ -2154,6 +2154,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
-
- if (ibdev->ib_active)
- ib_dispatch_event(&ibev);
-+
-+ if (fatal)
-+ ibdev->ib_active = false;
- }
-
- static void get_ext_port_caps(struct mlx5_ib_dev *dev)
-@@ -2835,7 +2838,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
- }
- err = init_node_data(dev);
- if (err)
-- goto err_dealloc;
-+ goto err_free_port;
-
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
-@@ -2845,7 +2848,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
- if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_roce(dev);
- if (err)
-- goto err_dealloc;
-+ goto err_free_port;
- }
-
- err = create_dev_resources(&dev->devr);
-diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
-index affc3f6..19d590d 100644
---- a/drivers/infiniband/hw/mlx5/qp.c
-+++ b/drivers/infiniband/hw/mlx5/qp.c
-@@ -2037,8 +2037,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
-
- mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
- qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
-- to_mcq(init_attr->recv_cq)->mcq.cqn,
-- to_mcq(init_attr->send_cq)->mcq.cqn);
-+ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
-+ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
-
- qp->trans_qp.xrcdn = xrcdn;
-
-@@ -4702,6 +4702,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
- udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
-
-+ if (init_attr->log_ind_tbl_size >
-+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
-+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
-+ init_attr->log_ind_tbl_size,
-+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
-+ return ERR_PTR(-EINVAL);
-+ }
-+
- min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
- if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
-diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
-index 33076a5..04ebbb5 100644
---- a/drivers/infiniband/sw/rdmavt/dma.c
-+++ b/drivers/infiniband/sw/rdmavt/dma.c
-@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
- if (WARN_ON(!valid_dma_direction(direction)))
- return BAD_DMA_ADDRESS;
-
-- if (offset + size > PAGE_SIZE)
-- return BAD_DMA_ADDRESS;
--
- addr = (u64)page_address(page);
- if (addr)
- addr += offset;
-diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
-index eedf2f1..7f5d735 100644
---- a/drivers/infiniband/sw/rxe/rxe_net.c
-+++ b/drivers/infiniband/sw/rxe/rxe_net.c
-@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
- {
- int err;
- struct socket *sock;
-- struct udp_port_cfg udp_cfg;
-- struct udp_tunnel_sock_cfg tnl_cfg;
--
-- memset(&udp_cfg, 0, sizeof(udp_cfg));
-+ struct udp_port_cfg udp_cfg = {0};
-+ struct udp_tunnel_sock_cfg tnl_cfg = {0};
-
- if (ipv6) {
- udp_cfg.family = AF_INET6;
-@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
- return ERR_PTR(err);
- }
-
-- tnl_cfg.sk_user_data = NULL;
- tnl_cfg.encap_type = 1;
- tnl_cfg.encap_rcv = rxe_udp_encap_recv;
-- tnl_cfg.encap_destroy = NULL;
-
- /* Setup UDP tunnel */
- setup_udp_tunnel_sock(net, sock, &tnl_cfg);
-diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
-index 22ba24f..f724a7e 100644
---- a/drivers/infiniband/sw/rxe/rxe_qp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
-@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
- if (qp->sq.queue) {
- __rxe_do_task(&qp->comp.task);
- __rxe_do_task(&qp->req.task);
-+ rxe_queue_reset(qp->sq.queue);
- }
-
- /* cleanup attributes */
-@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
- {
- qp->req.state = QP_STATE_ERROR;
- qp->resp.state = QP_STATE_ERROR;
-+ qp->attr.qp_state = IB_QPS_ERR;
-
- /* drain work and packet queues */
- rxe_run_task(&qp->resp.task, 1);
-diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
-index 0827425..d14bf49 100644
---- a/drivers/infiniband/sw/rxe/rxe_queue.c
-+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
-@@ -84,6 +84,15 @@ int do_mmap_info(struct rxe_dev *rxe,
- return -EINVAL;
- }
-
-+inline void rxe_queue_reset(struct rxe_queue *q)
-+{
-+ /* queue is comprised from header and the memory
-+ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
-+ * reset only the queue itself and not the management header
-+ */
-+ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
-+}
-+
- struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size)
-diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
-index 239fd60..8c8641c 100644
---- a/drivers/infiniband/sw/rxe/rxe_queue.h
-+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
-@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
- size_t buf_size,
- struct rxe_mmap_info **ip_p);
-
-+void rxe_queue_reset(struct rxe_queue *q);
-+
- struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
- int *num_elem,
- unsigned int elem_size);
-diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
-index 13a848a..43bb166 100644
---- a/drivers/infiniband/sw/rxe/rxe_req.c
-+++ b/drivers/infiniband/sw/rxe/rxe_req.c
-@@ -695,7 +695,8 @@ int rxe_requester(void *arg)
- qp->req.wqe_index);
- wqe->state = wqe_state_done;
- wqe->status = IB_WC_SUCCESS;
-- goto complete;
-+ __rxe_do_task(&qp->comp.task);
-+ return 0;
- }
- payload = mtu;
- }
-@@ -744,13 +745,17 @@ int rxe_requester(void *arg)
- wqe->status = IB_WC_LOC_PROT_ERR;
- wqe->state = wqe_state_error;
-
--complete:
-- if (qp_type(qp) != IB_QPT_RC) {
-- while (rxe_completer(qp) == 0)
-- ;
-- }
--
-- return 0;
-+ /*
-+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
-+ * ---------8<---------8<-------------
-+ * ...Note that if a completion error occurs, a Work Completion
-+ * will always be generated, even if the signaling
-+ * indicator requests an Unsignaled Completion.
-+ * ---------8<---------8<-------------
-+ */
-+ wqe->wr.send_flags |= IB_SEND_SIGNALED;
-+ __rxe_do_task(&qp->comp.task);
-+ return -EAGAIN;
-
- exit:
- return -EAGAIN;
-diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
-index 41b1138..70c646b 100644
---- a/drivers/mfd/intel-lpss.c
-+++ b/drivers/mfd/intel-lpss.c
-@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
- for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
- lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
-
-- /* Put the device into reset state */
-- writel(0, lpss->priv + LPSS_PRIV_RESETS);
--
- return 0;
- }
- EXPORT_SYMBOL_GPL(intel_lpss_suspend);
-diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
-index 3ac486a..c57e407 100644
---- a/drivers/mfd/mfd-core.c
-+++ b/drivers/mfd/mfd-core.c
-@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
- clones[i]);
- }
-
-+ put_device(dev);
-+
- return 0;
- }
- EXPORT_SYMBOL(mfd_clone_cell);
-diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
-index 94c7cc0..00dd7ff 100644
---- a/drivers/mfd/stmpe.c
-+++ b/drivers/mfd/stmpe.c
-@@ -761,6 +761,8 @@ static int stmpe1801_reset(struct stmpe *stmpe)
- if (ret < 0)
- return ret;
-
-+ msleep(10);
-+
- timeout = jiffies + msecs_to_jiffies(100);
- while (time_before(jiffies, timeout)) {
- ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
-diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 1b5f531..bf3fd34 100644
---- a/drivers/net/virtio_net.c
-+++ b/drivers/net/virtio_net.c
-@@ -2010,23 +2010,33 @@ static struct virtio_device_id id_table[] = {
- { 0 },
- };
-
-+#define VIRTNET_FEATURES \
-+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
-+ VIRTIO_NET_F_MAC, \
-+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
-+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
-+ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
-+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
-+ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
-+ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
-+ VIRTIO_NET_F_CTRL_MAC_ADDR, \
-+ VIRTIO_NET_F_MTU
-+
- static unsigned int features[] = {
-- VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
-- VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
-- VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
-- VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
-- VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
-- VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
-- VIRTIO_NET_F_CTRL_MAC_ADDR,
-+ VIRTNET_FEATURES,
-+};
-+
-+static unsigned int features_legacy[] = {
-+ VIRTNET_FEATURES,
-+ VIRTIO_NET_F_GSO,
- VIRTIO_F_ANY_LAYOUT,
-- VIRTIO_NET_F_MTU,
- };
-
- static struct virtio_driver virtio_net_driver = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
-+ .feature_table_legacy = features_legacy,
-+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-index 4fdc3da..ea67ae9 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
-@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
- ret = iwl_mvm_switch_to_d3(mvm);
- if (ret)
- return ret;
-+ } else {
-+ /* In theory, we wouldn't have to stop a running sched
-+ * scan in order to start another one (for
-+ * net-detect). But in practice this doesn't seem to
-+ * work properly, so stop any running sched_scan now.
-+ */
-+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
-+ if (ret)
-+ return ret;
- }
-
- /* rfkill release can be either for wowlan or netdetect */
-@@ -2088,6 +2097,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
- iwl_mvm_update_changed_regdom(mvm);
-
- if (mvm->net_detect) {
-+ /* If this is a non-unified image, we restart the FW,
-+ * so no need to stop the netdetect scan. If that
-+ * fails, continue and try to get the wake-up reasons,
-+ * but trigger a HW restart by keeping a failure code
-+ * in ret.
-+ */
-+ if (unified_image)
-+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
-+ false);
-+
- iwl_mvm_query_netdetect_reasons(mvm, vif);
- /* has unlocked the mutex, so skip that */
- goto out;
-@@ -2271,7 +2290,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
- static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
- {
- struct iwl_mvm *mvm = inode->i_private;
-- int remaining_time = 10;
-+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
-+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-
- mvm->d3_test_active = false;
-
-@@ -2282,17 +2302,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
- iwl_abort_notification_waits(&mvm->notif_wait);
-- ieee80211_restart_hw(mvm->hw);
-+ if (!unified_image) {
-+ int remaining_time = 10;
-
-- /* wait for restart and disconnect all interfaces */
-- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
-- remaining_time > 0) {
-- remaining_time--;
-- msleep(1000);
-- }
-+ ieee80211_restart_hw(mvm->hw);
-
-- if (remaining_time == 0)
-- IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
-+ /* wait for restart and disconnect all interfaces */
-+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
-+ remaining_time > 0) {
-+ remaining_time--;
-+ msleep(1000);
-+ }
-+
-+ if (remaining_time == 0)
-+ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
-+ }
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-index 5dd77e3..90a1f4a 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-@@ -4097,7 +4097,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
- struct iwl_mvm_internal_rxq_notif *notif,
- u32 size)
- {
-- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
- u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
- int ret;
-
-@@ -4119,7 +4118,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
- }
-
- if (notif->sync)
-- ret = wait_event_timeout(notif_waitq,
-+ ret = wait_event_timeout(mvm->rx_sync_waitq,
- atomic_read(&mvm->queue_sync_counter) == 0,
- HZ);
- WARN_ON_ONCE(!ret);
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-index 6a615bb..e9cb970 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
-@@ -932,6 +932,7 @@ struct iwl_mvm {
- /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
- spinlock_t d0i3_tx_lock;
- wait_queue_head_t d0i3_exit_waitq;
-+ wait_queue_head_t rx_sync_waitq;
-
- /* BT-Coex */
- struct iwl_bt_coex_profile_notif last_bt_notif;
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-index 55d9096..30bbdec 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-@@ -618,6 +618,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- spin_lock_init(&mvm->refs_lock);
- skb_queue_head_init(&mvm->d0i3_tx);
- init_waitqueue_head(&mvm->d0i3_exit_waitq);
-+ init_waitqueue_head(&mvm->rx_sync_waitq);
-
- atomic_set(&mvm->queue_sync_counter, 0);
-
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
-index afb7eb6..2b994be 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
-@@ -545,7 +545,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- "Received expired RX queue sync message\n");
- return;
- }
-- atomic_dec(&mvm->queue_sync_counter);
-+ if (!atomic_dec_return(&mvm->queue_sync_counter))
-+ wake_up(&mvm->rx_sync_waitq);
- }
-
- switch (internal_notif->type) {
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
-index dac120f..3707ec6 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
-@@ -1185,6 +1185,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
-
- static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
- {
-+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
-+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-+
- /* This looks a bit arbitrary, but the idea is that if we run
- * out of possible simultaneous scans and the userspace is
- * trying to run a scan type that is already running, we
-@@ -1211,12 +1214,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
- return -EBUSY;
- return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
- case IWL_MVM_SCAN_NETDETECT:
-- /* No need to stop anything for net-detect since the
-- * firmware is restarted anyway. This way, any sched
-- * scans that were running will be restarted when we
-- * resume.
-- */
-- return 0;
-+ /* For non-unified images, there's no need to stop
-+ * anything for net-detect since the firmware is
-+ * restarted anyway. This way, any sched scans that
-+ * were running will be restarted when we resume.
-+ */
-+ if (!unified_image)
-+ return 0;
-+
-+ /* If this is a unified image and we ran out of scans,
-+ * we need to stop something. Prefer stopping regular
-+ * scans, because the results are useless at this
-+ * point, and we should be able to keep running
-+ * another scheduled scan while suspended.
-+ */
-+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
-+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
-+ true);
-+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
-+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
-+ true);
-+
-+ /* fall through, something is wrong if no scan was
-+ * running but we ran out of scans.
-+ */
- default:
- WARN_ON(1);
- break;
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
-index 78cf9a7..13842ca 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
-@@ -526,48 +526,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
- MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
-
- #ifdef CONFIG_ACPI
--#define SPL_METHOD "SPLC"
--#define SPL_DOMAINTYPE_MODULE BIT(0)
--#define SPL_DOMAINTYPE_WIFI BIT(1)
--#define SPL_DOMAINTYPE_WIGIG BIT(2)
--#define SPL_DOMAINTYPE_RFEM BIT(3)
-+#define ACPI_SPLC_METHOD "SPLC"
-+#define ACPI_SPLC_DOMAIN_WIFI (0x07)
-
--static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
-+static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
- {
-- union acpi_object *limits, *domain_type, *power_limit;
--
-- if (splx->type != ACPI_TYPE_PACKAGE ||
-- splx->package.count != 2 ||
-- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
-- splx->package.elements[0].integer.value != 0) {
-- IWL_ERR(trans, "Unsupported splx structure\n");
-+ union acpi_object *data_pkg, *dflt_pwr_limit;
-+ int i;
-+
-+ /* We need at least two elements, one for the revision and one
-+ * for the data itself. Also check that the revision is
-+ * supported (currently only revision 0).
-+ */
-+ if (splc->type != ACPI_TYPE_PACKAGE ||
-+ splc->package.count < 2 ||
-+ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
-+ splc->package.elements[0].integer.value != 0) {
-+ IWL_DEBUG_INFO(trans,
-+ "Unsupported structure returned by the SPLC method. Ignoring.\n");
- return 0;
- }
-
-- limits = &splx->package.elements[1];
-- if (limits->type != ACPI_TYPE_PACKAGE ||
-- limits->package.count < 2 ||
-- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
-- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
-- IWL_ERR(trans, "Invalid limits element\n");
-- return 0;
-+ /* loop through all the packages to find the one for WiFi */
-+ for (i = 1; i < splc->package.count; i++) {
-+ union acpi_object *domain;
-+
-+ data_pkg = &splc->package.elements[i];
-+
-+ /* Skip anything that is not a package with the right
-+ * amount of elements (i.e. at least 2 integers).
-+ */
-+ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
-+ data_pkg->package.count < 2 ||
-+ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
-+ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
-+ continue;
-+
-+ domain = &data_pkg->package.elements[0];
-+ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
-+ break;
-+
-+ data_pkg = NULL;
- }
-
-- domain_type = &limits->package.elements[0];
-- power_limit = &limits->package.elements[1];
-- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
-- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
-+ if (!data_pkg) {
-+ IWL_DEBUG_INFO(trans,
-+ "No element for the WiFi domain returned by the SPLC method.\n");
- return 0;
- }
-
-- return power_limit->integer.value;
-+ dflt_pwr_limit = &data_pkg->package.elements[1];
-+ return dflt_pwr_limit->integer.value;
- }
-
- static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
- {
- acpi_handle pxsx_handle;
- acpi_handle handle;
-- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
-+ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
-
- pxsx_handle = ACPI_HANDLE(&pdev->dev);
-@@ -578,23 +594,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
- }
-
- /* Get the method's handle */
-- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
-+ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
-+ &handle);
- if (ACPI_FAILURE(status)) {
-- IWL_DEBUG_INFO(trans, "SPL method not found\n");
-+ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
- return;
- }
-
- /* Call SPLC with no arguments */
-- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
-+ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
- if (ACPI_FAILURE(status)) {
- IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
- return;
- }
-
-- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
-+ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
- IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
- trans->dflt_pwr_limit);
-- kfree(splx.pointer);
-+ kfree(splc.pointer);
- }
-
- #else /* CONFIG_ACPI */
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
-index 18650dc..478bba5 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
-@@ -522,6 +522,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
- static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, u32 txq_id)
- {
-+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- txq->need_update = false;
-@@ -536,6 +537,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- return ret;
-
- spin_lock_init(&txq->lock);
-+
-+ if (txq_id == trans_pcie->cmd_queue) {
-+ static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
-+
-+ lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
-+ }
-+
- __skb_queue_head_init(&txq->overflow_q);
-
- /*
-diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
-index ec2e9c5..22394fe 100644
---- a/drivers/rtc/rtc-omap.c
-+++ b/drivers/rtc/rtc-omap.c
-@@ -109,6 +109,7 @@
- /* OMAP_RTC_OSC_REG bit fields: */
- #define OMAP_RTC_OSC_32KCLK_EN BIT(6)
- #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
-+#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
-
- /* OMAP_RTC_IRQWAKEEN bit fields: */
- #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
-@@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
- */
- if (rtc->has_ext_clk) {
- reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
-- rtc_write(rtc, OMAP_RTC_OSC_REG,
-- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
-+ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
-+ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
-+ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
- }
-
- rtc->type->lock(rtc);
-diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
-index d059ad4..97ee1b4 100644
---- a/drivers/uwb/lc-rc.c
-+++ b/drivers/uwb/lc-rc.c
-@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
-- if (dev)
-+ if (dev) {
- rc = dev_get_drvdata(dev);
-+ put_device(dev);
-+ }
-+
- return rc;
- }
-
-@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
- if (dev) {
- rc = dev_get_drvdata(dev);
- __uwb_rc_get(rc);
-+ put_device(dev);
- }
-+
- return rc;
- }
- EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
-@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
-
- dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
- find_rc_grandpa);
-- if (dev)
-+ if (dev) {
- rc = dev_get_drvdata(dev);
-+ put_device(dev);
-+ }
-+
- return rc;
- }
- EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
-@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
- struct uwb_rc *rc = NULL;
-
- dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
-- if (dev)
-+ if (dev) {
- rc = dev_get_drvdata(dev);
-+ put_device(dev);
-+ }
-
- return rc;
- }
-diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
-index c1304b8..678e937 100644
---- a/drivers/uwb/pal.c
-+++ b/drivers/uwb/pal.c
-@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
-
- dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
-
-+ put_device(dev);
-+
- return (dev != NULL);
- }
-
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index ea31931..7bd21aa 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -235,6 +235,7 @@ struct ext4_io_submit {
- #define EXT4_MAX_BLOCK_SIZE 65536
- #define EXT4_MIN_BLOCK_LOG_SIZE 10
- #define EXT4_MAX_BLOCK_LOG_SIZE 16
-+#define EXT4_MAX_CLUSTER_LOG_SIZE 30
- #ifdef __KERNEL__
- # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
- #else
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 3ec8708..ec89f50 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -3518,7 +3518,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
- blocksize > EXT4_MAX_BLOCK_SIZE) {
- ext4_msg(sb, KERN_ERR,
-- "Unsupported filesystem blocksize %d", blocksize);
-+ "Unsupported filesystem blocksize %d (%d log_block_size)",
-+ blocksize, le32_to_cpu(es->s_log_block_size));
-+ goto failed_mount;
-+ }
-+ if (le32_to_cpu(es->s_log_block_size) >
-+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-+ ext4_msg(sb, KERN_ERR,
-+ "Invalid log block size: %u",
-+ le32_to_cpu(es->s_log_block_size));
- goto failed_mount;
- }
-
-@@ -3650,6 +3658,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
- "block size (%d)", clustersize, blocksize);
- goto failed_mount;
- }
-+ if (le32_to_cpu(es->s_log_cluster_size) >
-+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-+ ext4_msg(sb, KERN_ERR,
-+ "Invalid log cluster size: %u",
-+ le32_to_cpu(es->s_log_cluster_size));
-+ goto failed_mount;
-+ }
- sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
- le32_to_cpu(es->s_log_block_size);
- sbi->s_clusters_per_group =
-diff --git a/fs/fuse/file.c b/fs/fuse/file.c
-index 3988b43..a621dd9 100644
---- a/fs/fuse/file.c
-+++ b/fs/fuse/file.c
-@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
- {
- struct inode *inode = page->mapping->host;
-
-+ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
-+ if (!copied)
-+ goto unlock;
-+
- if (!PageUptodate(page)) {
- /* Zero any unwritten bytes at the end of the page */
- size_t endoff = (pos + copied) & ~PAGE_MASK;
-@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
-
- fuse_write_update_size(inode, pos + copied);
- set_page_dirty(page);
-+
-+unlock:
- unlock_page(page);
- put_page(page);
-
-diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
-index ab02a45..e5d1934 100644
---- a/include/linux/sunrpc/svc_xprt.h
-+++ b/include/linux/sunrpc/svc_xprt.h
-@@ -25,6 +25,7 @@ struct svc_xprt_ops {
- void (*xpo_detach)(struct svc_xprt *);
- void (*xpo_free)(struct svc_xprt *);
- int (*xpo_secure_port)(struct svc_rqst *);
-+ void (*xpo_kill_temp_xprt)(struct svc_xprt *);
- };
-
- struct svc_xprt_class {
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 9530fcd..9d592c6 100644
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
-
- } else if (new->flags & IRQF_TRIGGER_MASK) {
- unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
-- unsigned int omsk = irq_settings_get_trigger_mask(desc);
-+ unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
-
- if (nmsk != omsk)
- /* hope the handler works with current trigger mode */
- pr_warn("irq %d uses trigger mode %u; requested %u\n",
-- irq, nmsk, omsk);
-+ irq, omsk, nmsk);
- }
-
- *old_ptr = new;
-diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
-index 084452e..bdff5ed 100644
---- a/kernel/power/suspend_test.c
-+++ b/kernel/power/suspend_test.c
-@@ -203,8 +203,10 @@ static int __init test_suspend(void)
-
- /* RTCs have initialized by now too ... can we use one? */
- dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
-- if (dev)
-+ if (dev) {
- rtc = rtc_class_open(dev_name(dev));
-+ put_device(dev);
-+ }
- if (!rtc) {
- printk(warn_no_rtc);
- return 0;
-diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
-index d0a1617..979e7bf 100644
---- a/kernel/trace/Makefile
-+++ b/kernel/trace/Makefile
-@@ -1,8 +1,4 @@
-
--# We are fully aware of the dangers of __builtin_return_address()
--FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
--KBUILD_CFLAGS += $(FRAME_CFLAGS)
--
- # Do not instrument the tracer itself:
-
- ifdef CONFIG_FUNCTION_TRACER
-diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 84752c8..b1d7f1b 100644
---- a/kernel/trace/ftrace.c
-+++ b/kernel/trace/ftrace.c
-@@ -1856,6 +1856,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
-
- /* Update rec->flags */
- do_for_each_ftrace_rec(pg, rec) {
-+
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- /* We need to update only differences of filter_hash */
- in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
- in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
-@@ -1878,6 +1882,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
-
- /* Roll back what we did above */
- do_for_each_ftrace_rec(pg, rec) {
-+
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- if (rec == end)
- goto err_out;
-
-@@ -2391,6 +2399,10 @@ void __weak ftrace_replace_code(int enable)
- return;
-
- do_for_each_ftrace_rec(pg, rec) {
-+
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- failed = __ftrace_replace_code(rec, enable);
- if (failed) {
- ftrace_bug(failed, rec);
-@@ -2757,7 +2769,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
- struct dyn_ftrace *rec;
-
- do_for_each_ftrace_rec(pg, rec) {
-- if (FTRACE_WARN_ON_ONCE(rec->flags))
-+ if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
- pr_warn(" %pS flags:%lx\n",
- (void *)rec->ip, rec->flags);
- } while_for_each_ftrace_rec();
-@@ -3592,6 +3604,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
- goto out_unlock;
-
- do_for_each_ftrace_rec(pg, rec) {
-+
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
- ret = enter_record(hash, rec, clear_filter);
- if (ret < 0) {
-@@ -3787,6 +3803,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
-
- do_for_each_ftrace_rec(pg, rec) {
-
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- if (!ftrace_match_record(rec, &func_g, NULL, 0))
- continue;
-
-@@ -4679,6 +4698,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
-
- do_for_each_ftrace_rec(pg, rec) {
-
-+ if (rec->flags & FTRACE_FL_DISABLED)
-+ continue;
-+
- if (ftrace_match_record(rec, &func_g, NULL, 0)) {
- /* if it is in the array */
- exists = false;
-diff --git a/mm/Makefile b/mm/Makefile
-index 2ca1faf..295bd7a 100644
---- a/mm/Makefile
-+++ b/mm/Makefile
-@@ -21,9 +21,6 @@ KCOV_INSTRUMENT_memcontrol.o := n
- KCOV_INSTRUMENT_mmzone.o := n
- KCOV_INSTRUMENT_vmstat.o := n
-
--# Since __builtin_frame_address does work as used, disable the warning.
--CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
--
- mmu-y := nommu.o
- mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
- mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-diff --git a/net/can/bcm.c b/net/can/bcm.c
-index 8e999ff..8af9d25 100644
---- a/net/can/bcm.c
-+++ b/net/can/bcm.c
-@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
- struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
- struct sock *sk = sock->sk;
- struct bcm_sock *bo = bcm_sk(sk);
-+ int ret = 0;
-
- if (len < sizeof(*addr))
- return -EINVAL;
-
-- if (bo->bound)
-- return -EISCONN;
-+ lock_sock(sk);
-+
-+ if (bo->bound) {
-+ ret = -EISCONN;
-+ goto fail;
-+ }
-
- /* bind a device to this socket */
- if (addr->can_ifindex) {
- struct net_device *dev;
-
- dev = dev_get_by_index(&init_net, addr->can_ifindex);
-- if (!dev)
-- return -ENODEV;
--
-+ if (!dev) {
-+ ret = -ENODEV;
-+ goto fail;
-+ }
- if (dev->type != ARPHRD_CAN) {
- dev_put(dev);
-- return -ENODEV;
-+ ret = -ENODEV;
-+ goto fail;
- }
-
- bo->ifindex = dev->ifindex;
-@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
- bo->ifindex = 0;
- }
-
-- bo->bound = 1;
--
- if (proc_dir) {
- /* unique socket address as filename */
- sprintf(bo->procname, "%lu", sock_i_ino(sk));
- bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
- proc_dir,
- &bcm_proc_fops, sk);
-+ if (!bo->bcm_proc_read) {
-+ ret = -ENOMEM;
-+ goto fail;
-+ }
- }
-
-- return 0;
-+ bo->bound = 1;
-+
-+fail:
-+ release_sock(sk);
-+
-+ return ret;
- }
-
- static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
-diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
-index 0af2669..584ac76 100644
---- a/net/netfilter/nft_dynset.c
-+++ b/net/netfilter/nft_dynset.c
-@@ -143,7 +143,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
- if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
- if (!(set->flags & NFT_SET_TIMEOUT))
- return -EINVAL;
-- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
-+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
-+ tb[NFTA_DYNSET_TIMEOUT])));
- }
-
- priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
-@@ -230,7 +231,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
- goto nla_put_failure;
- if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
- goto nla_put_failure;
-- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
-+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
-+ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
- NFTA_DYNSET_PAD))
- goto nla_put_failure;
- if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
-diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
-index c3f6523..3bc1d61 100644
---- a/net/sunrpc/svc_xprt.c
-+++ b/net/sunrpc/svc_xprt.c
-@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
- void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
- {
- struct svc_xprt *xprt;
-- struct svc_sock *svsk;
-- struct socket *sock;
- struct list_head *le, *next;
- LIST_HEAD(to_be_closed);
-- struct linger no_linger = {
-- .l_onoff = 1,
-- .l_linger = 0,
-- };
-
- spin_lock_bh(&serv->sv_lock);
- list_for_each_safe(le, next, &serv->sv_tempsocks) {
-@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
- list_del_init(le);
- xprt = list_entry(le, struct svc_xprt, xpt_list);
- dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
-- svsk = container_of(xprt, struct svc_sock, sk_xprt);
-- sock = svsk->sk_sock;
-- kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
-- (char *)&no_linger, sizeof(no_linger));
-+ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
- svc_close_xprt(xprt);
- }
- }
-diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
-index 57625f6..a4bc982 100644
---- a/net/sunrpc/svcsock.c
-+++ b/net/sunrpc/svcsock.c
-@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
- return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- }
-
-+static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
-+{
-+ struct svc_sock *svsk;
-+ struct socket *sock;
-+ struct linger no_linger = {
-+ .l_onoff = 1,
-+ .l_linger = 0,
-+ };
-+
-+ svsk = container_of(xprt, struct svc_sock, sk_xprt);
-+ sock = svsk->sk_sock;
-+ kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
-+ (char *)&no_linger, sizeof(no_linger));
-+}
-+
- /*
- * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
- */
-@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
- return NULL;
- }
-
-+static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
-+{
-+}
-+
- static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
- struct net *net,
- struct sockaddr *sa, int salen,
-@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
- .xpo_has_wspace = svc_udp_has_wspace,
- .xpo_accept = svc_udp_accept,
- .xpo_secure_port = svc_sock_secure_port,
-+ .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
- };
-
- static struct svc_xprt_class svc_udp_class = {
-@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
- .xpo_has_wspace = svc_tcp_has_wspace,
- .xpo_accept = svc_tcp_accept,
- .xpo_secure_port = svc_sock_secure_port,
-+ .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
- };
-
- static struct svc_xprt_class svc_tcp_class = {
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-index 924271c..a55b809 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
-@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
- static void svc_rdma_free(struct svc_xprt *xprt);
- static int svc_rdma_has_wspace(struct svc_xprt *xprt);
- static int svc_rdma_secure_port(struct svc_rqst *);
-+static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
-
- static struct svc_xprt_ops svc_rdma_ops = {
- .xpo_create = svc_rdma_create,
-@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
- .xpo_has_wspace = svc_rdma_has_wspace,
- .xpo_accept = svc_rdma_accept,
- .xpo_secure_port = svc_rdma_secure_port,
-+ .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
- };
-
- struct svc_xprt_class svc_rdma_class = {
-@@ -1285,6 +1287,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
- return 1;
- }
-
-+static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
-+{
-+}
-+
- int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
- {
- struct ib_send_wr *bad_wr, *n_wr;
-diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
-index 973e8c1..17867e7 100755
---- a/scripts/gcc-x86_64-has-stack-protector.sh
-+++ b/scripts/gcc-x86_64-has-stack-protector.sh
-@@ -1,6 +1,6 @@
- #!/bin/sh
-
--echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
-+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
- if [ "$?" -eq "0" ] ; then
- echo y
- else
-diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 26e866f..1628180 100644
---- a/sound/pci/hda/patch_realtek.c
-+++ b/sound/pci/hda/patch_realtek.c
-@@ -6905,8 +6905,6 @@ static const struct hda_fixup alc662_fixups[] = {
- .v.pins = (const struct hda_pintbl[]) {
- { 0x15, 0x40f000f0 }, /* disabled */
- { 0x16, 0x40f000f0 }, /* disabled */
-- { 0x18, 0x01014011 }, /* LO */
-- { 0x1a, 0x01014012 }, /* LO */
- { }
- }
- },
-diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
-index 6a23302..4d9d320 100644
---- a/sound/pci/hda/thinkpad_helper.c
-+++ b/sound/pci/hda/thinkpad_helper.c
-@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
- static bool is_thinkpad(struct hda_codec *codec)
- {
- return (codec->core.subsystem_id >> 16 == 0x17aa) &&
-- (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
-+ (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
-+ acpi_dev_found("IBM0068"));
- }
-
- static void update_tpacpi_mute_led(void *private_data, int enabled)
-diff --git a/sound/usb/card.c b/sound/usb/card.c
-index 9e5276d..2ddc034 100644
---- a/sound/usb/card.c
-+++ b/sound/usb/card.c
-@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
- snd_usb_endpoint_free(ep);
-
- mutex_destroy(&chip->mutex);
-- dev_set_drvdata(&chip->dev->dev, NULL);
-+ if (!atomic_read(&chip->shutdown))
-+ dev_set_drvdata(&chip->dev->dev, NULL);
- kfree(chip);
- return 0;
- }
-diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
-index de15dbc..7214913 100644
---- a/tools/perf/util/hist.c
-+++ b/tools/perf/util/hist.c
-@@ -1596,18 +1596,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
- if (prog)
- ui_progress__update(prog, 1);
-
-+ hists->nr_entries++;
-+ if (!he->filtered) {
-+ hists->nr_non_filtered_entries++;
-+ hists__calc_col_len(hists, he);
-+ }
-+
- if (!he->leaf) {
- hists__hierarchy_output_resort(hists, prog,
- &he->hroot_in,
- &he->hroot_out,
- min_callchain_hits,
- use_callchain);
-- hists->nr_entries++;
-- if (!he->filtered) {
-- hists->nr_non_filtered_entries++;
-- hists__calc_col_len(hists, he);
-- }
--
- continue;
- }
-
-diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
-index 6e9c40e..69ccce3 100644
---- a/virt/kvm/arm/pmu.c
-+++ b/virt/kvm/arm/pmu.c
-@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
- continue;
- type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
- & ARMV8_PMU_EVTYPE_EVENT;
-- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
-+ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
- && (enable & BIT(i))) {
- reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
- reg = lower_32_bits(reg);
-@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
- eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
-
- /* Software increment event does't need to be backed by a perf event */
-- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
-+ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
-+ select_idx != ARMV8_PMU_CYCLE_IDX)
- return;
-
- memset(&attr, 0, sizeof(struct perf_event_attr));
-@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
- attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
- attr.exclude_hv = 1; /* Don't count EL2 events */
- attr.exclude_host = 1; /* Don't count host events */
-- attr.config = eventsel;
-+ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
-+ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
-
- counter = kvm_pmu_get_counter_value(vcpu, select_idx);
- /* The initial sample period (overflow count) of an event. */
diff --git a/4.8.12/1011_linux-4.8.12.patch b/4.8.12/1011_linux-4.8.12.patch
deleted file mode 100644
index 6e460f1..0000000
--- a/4.8.12/1011_linux-4.8.12.patch
+++ /dev/null
@@ -1,1563 +0,0 @@
-diff --git a/Makefile b/Makefile
-index 2b1bcba..7b0c92f 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 8
--SUBLEVEL = 11
-+SUBLEVEL = 12
- EXTRAVERSION =
- NAME = Psychotic Stoned Sheep
-
-diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
-index af12c2d..81c11a6 100644
---- a/arch/parisc/Kconfig
-+++ b/arch/parisc/Kconfig
-@@ -33,7 +33,9 @@ config PARISC
- select HAVE_ARCH_HASH
- select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_TRACEHOOK
-- select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
-+ select GENERIC_SCHED_CLOCK
-+ select HAVE_UNSTABLE_SCHED_CLOCK if SMP
-+ select GENERIC_CLOCKEVENTS
- select ARCH_NO_COHERENT_DMA_MMAP
- select CPU_NO_EFFICIENT_FFS
-
-diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
-index 6700127..c2259d4 100644
---- a/arch/parisc/kernel/cache.c
-+++ b/arch/parisc/kernel/cache.c
-@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
- {
- unsigned long rangetime, alltime;
- unsigned long size, start;
-+ unsigned long threshold;
-
- alltime = mfctl(16);
- flush_data_cache();
-@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
- printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
- alltime, size, rangetime);
-
-- /* Racy, but if we see an intermediate value, it's ok too... */
-- parisc_cache_flush_threshold = size * alltime / rangetime;
--
-- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
-- if (!parisc_cache_flush_threshold)
-- parisc_cache_flush_threshold = FLUSH_THRESHOLD;
--
-- if (parisc_cache_flush_threshold > cache_info.dc_size)
-- parisc_cache_flush_threshold = cache_info.dc_size;
--
-- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
-+ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
-+ if (threshold > cache_info.dc_size)
-+ threshold = cache_info.dc_size;
-+ if (threshold)
-+ parisc_cache_flush_threshold = threshold;
-+ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
- parisc_cache_flush_threshold/1024);
-
- /* calculate TLB flush threshold */
-@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
- flush_tlb_all();
- alltime = mfctl(16) - alltime;
-
-- size = PAGE_SIZE;
-+ size = 0;
- start = (unsigned long) _text;
- rangetime = mfctl(16);
- while (start < (unsigned long) _end) {
-@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
- printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
- alltime, size, rangetime);
-
-- parisc_tlb_flush_threshold = size * alltime / rangetime;
-- parisc_tlb_flush_threshold *= num_online_cpus();
-- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
-- if (!parisc_tlb_flush_threshold)
-- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
--
-- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
-+ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
-+ if (threshold)
-+ parisc_tlb_flush_threshold = threshold;
-+ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
- parisc_tlb_flush_threshold/1024);
- }
-
-diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
-index b743a80..6755219 100644
---- a/arch/parisc/kernel/pacache.S
-+++ b/arch/parisc/kernel/pacache.S
-@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
-
- fitmanymiddle: /* Loop if LOOP >= 2 */
- addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
-- pitlbe 0(%sr1, %r28)
-+ pitlbe %r0(%sr1, %r28)
- pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
- addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
- copy %arg3, %r31 /* Re-init inner loop count */
-@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
-
- fdtmanymiddle: /* Loop if LOOP >= 2 */
- addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
-- pdtlbe 0(%sr1, %r28)
-+ pdtlbe %r0(%sr1, %r28)
- pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
- addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
- copy %arg3, %r31 /* Re-init inner loop count */
-@@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm)
- /* Purge any old translations */
-
- #ifdef CONFIG_PA20
-- pdtlb,l 0(%r28)
-- pdtlb,l 0(%r29)
-+ pdtlb,l %r0(%r28)
-+ pdtlb,l %r0(%r29)
- #else
- tlb_lock %r20,%r21,%r22
-- pdtlb 0(%r28)
-- pdtlb 0(%r29)
-+ pdtlb %r0(%r28)
-+ pdtlb %r0(%r29)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-@@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm)
- /* Purge any old translation */
-
- #ifdef CONFIG_PA20
-- pdtlb,l 0(%r28)
-+ pdtlb,l %r0(%r28)
- #else
- tlb_lock %r20,%r21,%r22
-- pdtlb 0(%r28)
-+ pdtlb %r0(%r28)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-@@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm)
- /* Purge any old translation */
-
- #ifdef CONFIG_PA20
-- pdtlb,l 0(%r28)
-+ pdtlb,l %r0(%r28)
- #else
- tlb_lock %r20,%r21,%r22
-- pdtlb 0(%r28)
-+ pdtlb %r0(%r28)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-@@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm)
- sync
-
- #ifdef CONFIG_PA20
-- pdtlb,l 0(%r25)
-+ pdtlb,l %r0(%r25)
- #else
- tlb_lock %r20,%r21,%r22
-- pdtlb 0(%r25)
-+ pdtlb %r0(%r25)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-@@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm)
- depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
- #endif
-
-- /* Purge any old translation */
-+ /* Purge any old translation. Note that the FIC instruction
-+ * may use either the instruction or data TLB. Given that we
-+ * have a flat address space, it's not clear which TLB will be
-+ * used. So, we purge both entries. */
-
- #ifdef CONFIG_PA20
-+ pdtlb,l %r0(%r28)
- pitlb,l %r0(%sr4,%r28)
- #else
- tlb_lock %r20,%r21,%r22
-- pitlb (%sr4,%r28)
-+ pdtlb %r0(%r28)
-+ pitlb %r0(%sr4,%r28)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-@@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm)
- sync
-
- #ifdef CONFIG_PA20
-+ pdtlb,l %r0(%r28)
- pitlb,l %r0(%sr4,%r25)
- #else
- tlb_lock %r20,%r21,%r22
-- pitlb (%sr4,%r25)
-+ pdtlb %r0(%r28)
-+ pitlb %r0(%sr4,%r25)
- tlb_unlock %r20,%r21,%r22
- #endif
-
-diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
-index 02d9ed0..494ff6e 100644
---- a/arch/parisc/kernel/pci-dma.c
-+++ b/arch/parisc/kernel/pci-dma.c
-@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
-
- if (!pte_none(*pte))
- printk(KERN_ERR "map_pte_uncached: page already exists\n");
-- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
- purge_tlb_start(flags);
-+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
- pdtlb_kernel(orig_vaddr);
- purge_tlb_end(flags);
- vaddr += PAGE_SIZE;
-diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
-index 81d6f63..2e66a88 100644
---- a/arch/parisc/kernel/setup.c
-+++ b/arch/parisc/kernel/setup.c
-@@ -334,6 +334,10 @@ static int __init parisc_init(void)
- /* tell PDC we're Linux. Nevermind failure. */
- pdc_stable_write(0x40, &osid, sizeof(osid));
-
-+ /* start with known state */
-+ flush_cache_all_local();
-+ flush_tlb_all_local(NULL);
-+
- processor_init();
- #ifdef CONFIG_SMP
- pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
-diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
-index 9b63b87..325f30d 100644
---- a/arch/parisc/kernel/time.c
-+++ b/arch/parisc/kernel/time.c
-@@ -14,6 +14,7 @@
- #include <linux/module.h>
- #include <linux/rtc.h>
- #include <linux/sched.h>
-+#include <linux/sched_clock.h>
- #include <linux/kernel.h>
- #include <linux/param.h>
- #include <linux/string.h>
-@@ -39,18 +40,6 @@
-
- static unsigned long clocktick __read_mostly; /* timer cycles per tick */
-
--#ifndef CONFIG_64BIT
--/*
-- * The processor-internal cycle counter (Control Register 16) is used as time
-- * source for the sched_clock() function. This register is 64bit wide on a
-- * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
-- * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
-- * with a per-cpu variable which we increase every time the counter
-- * wraps-around (which happens every ~4 secounds).
-- */
--static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
--#endif
--
- /*
- * We keep time on PA-RISC Linux by using the Interval Timer which is
- * a pair of registers; one is read-only and one is write-only; both
-@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
- */
- mtctl(next_tick, 16);
-
--#if !defined(CONFIG_64BIT)
-- /* check for overflow on a 32bit kernel (every ~4 seconds). */
-- if (unlikely(next_tick < now))
-- this_cpu_inc(cr16_high_32_bits);
--#endif
--
- /* Skip one clocktick on purpose if we missed next_tick.
- * The new CR16 must be "later" than current CR16 otherwise
- * itimer would not fire until CR16 wrapped - e.g 4 seconds
-@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
-
- /* clock source code */
-
--static cycle_t read_cr16(struct clocksource *cs)
-+static cycle_t notrace read_cr16(struct clocksource *cs)
- {
- return get_cycles();
- }
-@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
- }
-
-
--/*
-- * sched_clock() framework
-- */
--
--static u32 cyc2ns_mul __read_mostly;
--static u32 cyc2ns_shift __read_mostly;
--
--u64 sched_clock(void)
-+static u64 notrace read_cr16_sched_clock(void)
- {
-- u64 now;
--
-- /* Get current cycle counter (Control Register 16). */
--#ifdef CONFIG_64BIT
-- now = mfctl(16);
--#else
-- now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
--#endif
--
-- /* return the value in ns (cycles_2_ns) */
-- return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
-+ return get_cycles();
- }
-
-
-@@ -316,17 +282,16 @@ u64 sched_clock(void)
-
- void __init time_init(void)
- {
-- unsigned long current_cr16_khz;
-+ unsigned long cr16_hz;
-
-- current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
- clocktick = (100 * PAGE0->mem_10msec) / HZ;
--
-- /* calculate mult/shift values for cr16 */
-- clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
-- NSEC_PER_MSEC, 0);
--
- start_cpu_itimer(); /* get CPU 0 started */
-
-+ cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
-+
- /* register at clocksource framework */
-- clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
-+ clocksource_register_hz(&clocksource_cr16, cr16_hz);
-+
-+ /* register as sched_clock source */
-+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
- }
-diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
-index d80161b..60522d2 100644
---- a/arch/powerpc/boot/main.c
-+++ b/arch/powerpc/boot/main.c
-@@ -217,8 +217,12 @@ void start(void)
- console_ops.close();
-
- kentry = (kernel_entry_t) vmlinux.addr;
-- if (ft_addr)
-- kentry(ft_addr, 0, NULL);
-+ if (ft_addr) {
-+ if(platform_ops.kentry)
-+ platform_ops.kentry(ft_addr, vmlinux.addr);
-+ else
-+ kentry(ft_addr, 0, NULL);
-+ }
- else
- kentry((unsigned long)initrd.addr, initrd.size,
- loader_info.promptr);
-diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
-index ff2f1b9..2a99fc9 100644
---- a/arch/powerpc/boot/opal-calls.S
-+++ b/arch/powerpc/boot/opal-calls.S
-@@ -12,6 +12,19 @@
-
- .text
-
-+ .globl opal_kentry
-+opal_kentry:
-+ /* r3 is the fdt ptr */
-+ mtctr r4
-+ li r4, 0
-+ li r5, 0
-+ li r6, 0
-+ li r7, 0
-+ ld r11,opal@got(r2)
-+ ld r8,0(r11)
-+ ld r9,8(r11)
-+ bctr
-+
- #define OPAL_CALL(name, token) \
- .globl name; \
- name: \
-diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
-index 1f37e1c..d7b4fd4 100644
---- a/arch/powerpc/boot/opal.c
-+++ b/arch/powerpc/boot/opal.c
-@@ -23,14 +23,25 @@ struct opal {
-
- static u32 opal_con_id;
-
-+/* see opal-wrappers.S */
- int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
- int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
- int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
- int64_t opal_console_flush(uint64_t term_number);
- int64_t opal_poll_events(uint64_t *outstanding_event_mask);
-
-+void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
-+
- static int opal_con_open(void)
- {
-+ /*
-+ * When OPAL loads the boot kernel it stashes the OPAL base and entry
-+ * address in r8 and r9 so the kernel can use the OPAL console
-+ * before unflattening the devicetree. While executing the wrapper will
-+ * probably trash r8 and r9 so this kentry hook restores them before
-+ * entering the decompressed kernel.
-+ */
-+ platform_ops.kentry = opal_kentry;
- return 0;
- }
-
-diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
-index e19b64e..deeae6f 100644
---- a/arch/powerpc/boot/ops.h
-+++ b/arch/powerpc/boot/ops.h
-@@ -30,6 +30,7 @@ struct platform_ops {
- void * (*realloc)(void *ptr, unsigned long size);
- void (*exit)(void);
- void * (*vmlinux_alloc)(unsigned long size);
-+ void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
- };
- extern struct platform_ops platform_ops;
-
-diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
-index e2fb408..fd10b58 100644
---- a/arch/powerpc/include/asm/mmu.h
-+++ b/arch/powerpc/include/asm/mmu.h
-@@ -29,6 +29,12 @@
- */
-
- /*
-+ * Kernel read only support.
-+ * We added the ppp value 0b110 in ISA 2.04.
-+ */
-+#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
-+
-+/*
- * We need to clear top 16bits of va (from the remaining 64 bits )in
- * tlbie* instructions
- */
-@@ -103,10 +109,10 @@
- #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
- #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
- #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
--#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
--#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
--#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
--#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-+#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-+#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-+#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-+#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
- #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
- MMU_FTR_CI_LARGE_PAGE
- #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
-diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index 978dada..52cbf04 100644
---- a/arch/powerpc/include/asm/reg.h
-+++ b/arch/powerpc/include/asm/reg.h
-@@ -355,6 +355,7 @@
- #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
- #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
- #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
-+#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
- #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
- #define LPCR_MER_SH 11
- #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
-diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
-index 52ff3f0..37c027c 100644
---- a/arch/powerpc/kernel/cpu_setup_power.S
-+++ b/arch/powerpc/kernel/cpu_setup_power.S
-@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
- li r0,0
- mtspr SPRN_LPID,r0
- mfspr r3,SPRN_LPCR
-- ori r3, r3, LPCR_PECEDH
-- ori r3, r3, LPCR_HVICE
-+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
-+ or r3, r3, r4
- bl __init_LPCR
- bl __init_HFSCR
- bl __init_tlb_power9
-@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
- li r0,0
- mtspr SPRN_LPID,r0
- mfspr r3,SPRN_LPCR
-- ori r3, r3, LPCR_PECEDH
-- ori r3, r3, LPCR_HVICE
-+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
-+ or r3, r3, r4
- bl __init_LPCR
- bl __init_HFSCR
- bl __init_tlb_power9
-diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
-index 28923b2..8dff9ce 100644
---- a/arch/powerpc/mm/hash_utils_64.c
-+++ b/arch/powerpc/mm/hash_utils_64.c
-@@ -190,8 +190,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
- /*
- * Kernel read only mapped with ppp bits 0b110
- */
-- if (!(pteflags & _PAGE_WRITE))
-- rflags |= (HPTE_R_PP0 | 0x2);
-+ if (!(pteflags & _PAGE_WRITE)) {
-+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
-+ rflags |= (HPTE_R_PP0 | 0x2);
-+ else
-+ rflags |= 0x3;
-+ }
- } else {
- if (pteflags & _PAGE_RWX)
- rflags |= 0x2;
-diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
-index 178989e..ea960d6 100644
---- a/arch/tile/kernel/time.c
-+++ b/arch/tile/kernel/time.c
-@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
- */
- unsigned long long sched_clock(void)
- {
-- return clocksource_cyc2ns(get_cycles(),
-- sched_clock_mult, SCHED_CLOCK_SHIFT);
-+ return mult_frac(get_cycles(),
-+ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
- }
-
- int setup_profiling_timer(unsigned int multiplier)
-diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
-index 9b983a4..8fc714b 100644
---- a/arch/x86/events/intel/ds.c
-+++ b/arch/x86/events/intel/ds.c
-@@ -1070,20 +1070,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
- }
-
- /*
-- * We use the interrupt regs as a base because the PEBS record
-- * does not contain a full regs set, specifically it seems to
-- * lack segment descriptors, which get used by things like
-- * user_mode().
-+ * We use the interrupt regs as a base because the PEBS record does not
-+ * contain a full regs set, specifically it seems to lack segment
-+ * descriptors, which get used by things like user_mode().
- *
-- * In the simple case fix up only the IP and BP,SP regs, for
-- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
-- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
-+ * In the simple case fix up only the IP for PERF_SAMPLE_IP.
-+ *
-+ * We must however always use BP,SP from iregs for the unwinder to stay
-+ * sane; the record BP,SP can point into thin air when the record is
-+ * from a previous PMI context or an (I)RET happend between the record
-+ * and PMI.
- */
- *regs = *iregs;
- regs->flags = pebs->flags;
- set_linear_ip(regs, pebs->ip);
-- regs->bp = pebs->bp;
-- regs->sp = pebs->sp;
-
- if (sample_type & PERF_SAMPLE_REGS_INTR) {
- regs->ax = pebs->ax;
-@@ -1092,10 +1092,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
- regs->dx = pebs->dx;
- regs->si = pebs->si;
- regs->di = pebs->di;
-- regs->bp = pebs->bp;
-- regs->sp = pebs->sp;
-
-- regs->flags = pebs->flags;
-+ /*
-+ * Per the above; only set BP,SP if we don't need callchains.
-+ *
-+ * XXX: does this make sense?
-+ */
-+ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
-+ regs->bp = pebs->bp;
-+ regs->sp = pebs->sp;
-+ }
-+
-+ /*
-+ * Preserve PERF_EFLAGS_VM from set_linear_ip().
-+ */
-+ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
- #ifndef CONFIG_X86_32
- regs->r8 = pebs->r8;
- regs->r9 = pebs->r9;
-diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
-index 8c4a477..181c238 100644
---- a/arch/x86/events/perf_event.h
-+++ b/arch/x86/events/perf_event.h
-@@ -113,7 +113,7 @@ struct debug_store {
- * Per register state.
- */
- struct er_account {
-- raw_spinlock_t lock; /* per-core: protect structure */
-+ raw_spinlock_t lock; /* per-core: protect structure */
- u64 config; /* extra MSR config */
- u64 reg; /* extra MSR number */
- atomic_t ref; /* reference count */
-diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
-index 3fc03a0..c289e2f 100644
---- a/arch/x86/kernel/fpu/core.c
-+++ b/arch/x86/kernel/fpu/core.c
-@@ -517,14 +517,14 @@ void fpu__clear(struct fpu *fpu)
- {
- WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
-
-- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
-- /* FPU state will be reallocated lazily at the first use. */
-- fpu__drop(fpu);
-- } else {
-- if (!fpu->fpstate_active) {
-- fpu__activate_curr(fpu);
-- user_fpu_begin();
-- }
-+ fpu__drop(fpu);
-+
-+ /*
-+ * Make sure fpstate is cleared and initialized.
-+ */
-+ if (static_cpu_has(X86_FEATURE_FPU)) {
-+ fpu__activate_curr(fpu);
-+ user_fpu_begin();
- copy_init_fpstate_to_fpregs();
- }
- }
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index cbd7b92..a3ce9d2 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
- static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
- {
- int rc;
-- unsigned short sel, old_sel;
-- struct desc_struct old_desc, new_desc;
-- const struct x86_emulate_ops *ops = ctxt->ops;
-+ unsigned short sel;
-+ struct desc_struct new_desc;
- u8 cpl = ctxt->ops->cpl(ctxt);
-
-- /* Assignment of RIP may only fail in 64-bit mode */
-- if (ctxt->mode == X86EMUL_MODE_PROT64)
-- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
-- VCPU_SREG_CS);
--
- memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
-
- rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
-@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
- return rc;
-
- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
-- if (rc != X86EMUL_CONTINUE) {
-- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
-- /* assigning eip failed; restore the old cs */
-- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
-- return rc;
-- }
-+ /* Error handling is not implemented. */
-+ if (rc != X86EMUL_CONTINUE)
-+ return X86EMUL_UNHANDLEABLE;
-+
- return rc;
- }
-
-@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
- {
- int rc;
- unsigned long eip, cs;
-- u16 old_cs;
- int cpl = ctxt->ops->cpl(ctxt);
-- struct desc_struct old_desc, new_desc;
-- const struct x86_emulate_ops *ops = ctxt->ops;
--
-- if (ctxt->mode == X86EMUL_MODE_PROT64)
-- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
-- VCPU_SREG_CS);
-+ struct desc_struct new_desc;
-
- rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
- if (rc != X86EMUL_CONTINUE)
-@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
- if (rc != X86EMUL_CONTINUE)
- return rc;
- rc = assign_eip_far(ctxt, eip, &new_desc);
-- if (rc != X86EMUL_CONTINUE) {
-- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
-- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
-- }
-+ /* Error handling is not implemented. */
-+ if (rc != X86EMUL_CONTINUE)
-+ return X86EMUL_UNHANDLEABLE;
-+
- return rc;
- }
-
-diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
-index 1a22de7..6e219e5 100644
---- a/arch/x86/kvm/ioapic.c
-+++ b/arch/x86/kvm/ioapic.c
-@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
- static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
- {
- ioapic->rtc_status.pending_eoi = 0;
-- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
-+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
- }
-
- static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
-diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
-index 7d2692a..1cc6e54 100644
---- a/arch/x86/kvm/ioapic.h
-+++ b/arch/x86/kvm/ioapic.h
-@@ -42,13 +42,13 @@ struct kvm_vcpu;
-
- struct dest_map {
- /* vcpu bitmap where IRQ has been sent */
-- DECLARE_BITMAP(map, KVM_MAX_VCPUS);
-+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
-
- /*
- * Vector sent to a given vcpu, only valid when
- * the vcpu's bit in map is set
- */
-- u8 vectors[KVM_MAX_VCPUS];
-+ u8 vectors[KVM_MAX_VCPU_ID];
- };
-
-
-diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
-index 25810b1..e7a112a 100644
---- a/arch/x86/kvm/irq_comm.c
-+++ b/arch/x86/kvm/irq_comm.c
-@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
- bool line_status)
- {
- struct kvm_pic *pic = pic_irqchip(kvm);
-+
-+ /*
-+ * XXX: rejecting pic routes when pic isn't in use would be better,
-+ * but the default routing table is installed while kvm->arch.vpic is
-+ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
-+ */
-+ if (!pic)
-+ return -1;
-+
- return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
- }
-
-@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
- bool line_status)
- {
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-+
-+ if (!ioapic)
-+ return -1;
-+
- return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
- line_status);
- }
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index b62c852..d2255e4 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
- *mask = dest_id & 0xff;
- return true;
- case KVM_APIC_MODE_XAPIC_CLUSTER:
-- *cluster = map->xapic_cluster_map[dest_id >> 4];
-+ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
- *mask = dest_id & 0xf;
- return true;
- default:
-diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
-index 832b98f..a3a983f 100644
---- a/arch/x86/mm/extable.c
-+++ b/arch/x86/mm/extable.c
-@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
- if (early_recursion_flag > 2)
- goto halt_loop;
-
-- if (regs->cs != __KERNEL_CS)
-+ /*
-+ * Old CPUs leave the high bits of CS on the stack
-+ * undefined. I'm not sure which CPUs do this, but at least
-+ * the 486 DX works this way.
-+ */
-+ if ((regs->cs & 0xFFFF) != __KERNEL_CS)
- goto fail;
-
- /*
-diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
-index 865f46e..c80765b 100644
---- a/crypto/asymmetric_keys/x509_cert_parser.c
-+++ b/crypto/asymmetric_keys/x509_cert_parser.c
-@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
- return cert;
-
- error_decode:
-- kfree(cert->pub->key);
- kfree(ctx);
- error_no_ctx:
- x509_free_certificate(cert);
-diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
-index 29f600f..ff64313 100644
---- a/drivers/dax/dax.c
-+++ b/drivers/dax/dax.c
-@@ -323,8 +323,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
- if (!dax_dev->alive)
- return -ENXIO;
-
-- /* prevent private / writable mappings from being established */
-- if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
-+ /* prevent private mappings from being established */
-+ if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
- dev_info(dev, "%s: %s: fail, attempted private mapping\n",
- current->comm, func);
- return -EINVAL;
-diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
-index 73ae849..76dd42d 100644
---- a/drivers/dax/pmem.c
-+++ b/drivers/dax/pmem.c
-@@ -77,7 +77,9 @@ static int dax_pmem_probe(struct device *dev)
- nsio = to_nd_namespace_io(&ndns->dev);
-
- /* parse the 'pfn' info block via ->rw_bytes */
-- devm_nsio_enable(dev, nsio);
-+ rc = devm_nsio_enable(dev, nsio);
-+ if (rc)
-+ return rc;
- altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
- if (IS_ERR(altmap))
- return PTR_ERR(altmap);
-diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
-index 58470f5..8c53748 100644
---- a/drivers/iommu/dmar.c
-+++ b/drivers/iommu/dmar.c
-@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
- struct pci_dev *pdev = to_pci_dev(data);
- struct dmar_pci_notify_info *info;
-
-- /* Only care about add/remove events for physical functions */
-+ /* Only care about add/remove events for physical functions.
-+ * For VFs we actually do the lookup based on the corresponding
-+ * PF in device_to_iommu() anyway. */
- if (pdev->is_virtfn)
- return NOTIFY_DONE;
- if (action != BUS_NOTIFY_ADD_DEVICE &&
-diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
-index 1257b0b..7fb5387 100644
---- a/drivers/iommu/intel-iommu.c
-+++ b/drivers/iommu/intel-iommu.c
-@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
- return NULL;
-
- if (dev_is_pci(dev)) {
-+ struct pci_dev *pf_pdev;
-+
- pdev = to_pci_dev(dev);
-+ /* VFs aren't listed in scope tables; we need to look up
-+ * the PF instead to find the IOMMU. */
-+ pf_pdev = pci_physfn(pdev);
-+ dev = &pf_pdev->dev;
- segment = pci_domain_nr(pdev->bus);
- } else if (has_acpi_companion(dev))
- dev = &ACPI_COMPANION(dev)->dev;
-@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, tmp) {
- if (tmp == dev) {
-+ /* For a VF use its original BDF# not that of the PF
-+ * which we used for the IOMMU lookup. Strictly speaking
-+ * we could do this for all PCI devices; we only need to
-+ * get the BDF# from the scope table for ACPI matches. */
-+ if (pdev->is_virtfn)
-+ goto got_pdev;
-+
- *bus = drhd->devices[i].bus;
- *devfn = drhd->devices[i].devfn;
- goto out;
-diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
-index 8ebb353..cb72e00 100644
---- a/drivers/iommu/intel-svm.c
-+++ b/drivers/iommu/intel-svm.c
-@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
- struct page *pages;
- int order;
-
-- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
-- if (order < 0)
-- order = 0;
--
-+ /* Start at 2 because it's defined as 2^(1+PSS) */
-+ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
-+
-+ /* Eventually I'm promised we will get a multi-level PASID table
-+ * and it won't have to be physically contiguous. Until then,
-+ * limit the size because 8MiB contiguous allocations can be hard
-+ * to come by. The limit of 0x20000, which is 1MiB for each of
-+ * the PASID and PASID-state tables, is somewhat arbitrary. */
-+ if (iommu->pasid_max > 0x20000)
-+ iommu->pasid_max = 0x20000;
-+
-+ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!pages) {
- pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
-@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
- pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
-
- if (ecap_dis(iommu->ecap)) {
-+ /* Just making it explicit... */
-+ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (pages)
- iommu->pasid_state_table = page_address(pages);
-@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
-
- int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
- {
-- int order;
--
-- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
-- if (order < 0)
-- order = 0;
-+ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
-
- if (iommu->pasid_table) {
- free_pages((unsigned long)iommu->pasid_table, order);
-@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
- }
- svm->iommu = iommu;
-
-- if (pasid_max > 2 << ecap_pss(iommu->ecap))
-- pasid_max = 2 << ecap_pss(iommu->ecap);
-+ if (pasid_max > iommu->pasid_max)
-+ pasid_max = iommu->pasid_max;
-
- /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
- ret = idr_alloc(&iommu->pasid_idr, svm,
-diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
-index 317ef63..8d96a22 100644
---- a/drivers/media/tuners/tuner-xc2028.c
-+++ b/drivers/media/tuners/tuner-xc2028.c
-@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
- int i;
- tuner_dbg("%s called\n", __func__);
-
-+ /* free allocated f/w string */
-+ if (priv->fname != firmware_name)
-+ kfree(priv->fname);
-+ priv->fname = NULL;
-+
-+ priv->state = XC2028_NO_FIRMWARE;
-+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
-+
- if (!priv->firm)
- return;
-
-@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
-
- priv->firm = NULL;
- priv->firm_size = 0;
-- priv->state = XC2028_NO_FIRMWARE;
--
-- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
- }
-
- static int load_all_firmwares(struct dvb_frontend *fe,
-@@ -884,9 +889,8 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
- return 0;
-
- fail:
-- priv->state = XC2028_NO_FIRMWARE;
-+ free_firmware(priv);
-
-- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
- if (retry_count < 8) {
- msleep(50);
- retry_count++;
-@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
- mutex_lock(&xc2028_list_mutex);
-
- /* only perform final cleanup if this is the last instance */
-- if (hybrid_tuner_report_instance_count(priv) == 1) {
-+ if (hybrid_tuner_report_instance_count(priv) == 1)
- free_firmware(priv);
-- kfree(priv->ctrl.fname);
-- priv->ctrl.fname = NULL;
-- }
-
- if (priv)
- hybrid_tuner_release_state(priv);
-@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
-
- /*
- * Copy the config data.
-- * For the firmware name, keep a local copy of the string,
-- * in order to avoid troubles during device release.
- */
-- kfree(priv->ctrl.fname);
-- priv->ctrl.fname = NULL;
- memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
-- if (p->fname) {
-- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
-- if (priv->ctrl.fname == NULL) {
-- rc = -ENOMEM;
-- goto unlock;
-- }
-- }
-
- /*
- * If firmware name changed, frees firmware. As free_firmware will
-@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
-
- if (priv->state == XC2028_NO_FIRMWARE) {
- if (!firmware_name[0])
-- priv->fname = priv->ctrl.fname;
-+ priv->fname = kstrdup(p->fname, GFP_KERNEL);
- else
- priv->fname = firmware_name;
-
-+ if (!priv->fname) {
-+ rc = -ENOMEM;
-+ goto unlock;
-+ }
-+
- rc = request_firmware_nowait(THIS_MODULE, 1,
- priv->fname,
- priv->i2c_props.adap->dev.parent,
-diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
-index 239be2f..2267601 100644
---- a/drivers/mmc/host/sdhci-of-esdhc.c
-+++ b/drivers/mmc/host/sdhci-of-esdhc.c
-@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
- return ret;
- }
- }
-+ /*
-+ * The DAT[3:0] line signal levels and the CMD line signal level are
-+ * not compatible with standard SDHC register. The line signal levels
-+ * DAT[7:0] are at bits 31:24 and the command line signal level is at
-+ * bit 23. All other bits are the same as in the standard SDHC
-+ * register.
-+ */
-+ if (spec_reg == SDHCI_PRESENT_STATE) {
-+ ret = value & 0x000fffff;
-+ ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
-+ ret |= (value << 1) & SDHCI_CMD_LVL;
-+ return ret;
-+ }
-+
- ret = value;
- return ret;
- }
-diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
-index 0411c9f..1b3bd1c 100644
---- a/drivers/mmc/host/sdhci.h
-+++ b/drivers/mmc/host/sdhci.h
-@@ -73,6 +73,7 @@
- #define SDHCI_DATA_LVL_MASK 0x00F00000
- #define SDHCI_DATA_LVL_SHIFT 20
- #define SDHCI_DATA_0_LVL_MASK 0x00100000
-+#define SDHCI_CMD_LVL 0x01000000
-
- #define SDHCI_HOST_CONTROL 0x28
- #define SDHCI_CTRL_LED 0x01
-diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
-index 46c0f5e..58e6029 100644
---- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
-+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
-@@ -3894,6 +3894,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
- }
- }
-
-+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
-+{
-+ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
-+}
-+
- /**
- * _scsih_flush_running_cmds - completing outstanding commands.
- * @ioc: per adapter object
-@@ -3915,6 +3920,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
- if (!scmd)
- continue;
- count++;
-+ if (ata_12_16_cmd(scmd))
-+ scsi_internal_device_unblock(scmd->device,
-+ SDEV_RUNNING);
- mpt3sas_base_free_smid(ioc, smid);
- scsi_dma_unmap(scmd);
- if (ioc->pci_error_recovery)
-@@ -4019,8 +4027,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
- SAM_STAT_CHECK_CONDITION;
- }
-
--
--
- /**
- * scsih_qcmd - main scsi request entry point
- * @scmd: pointer to scsi command object
-@@ -4047,6 +4053,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
- if (ioc->logging_level & MPT_DEBUG_SCSI)
- scsi_print_command(scmd);
-
-+ /*
-+ * Lock the device for any subsequent command until command is
-+ * done.
-+ */
-+ if (ata_12_16_cmd(scmd))
-+ scsi_internal_device_block(scmd->device);
-+
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- scmd->result = DID_NO_CONNECT << 16;
-@@ -4622,6 +4635,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
- if (scmd == NULL)
- return 1;
-
-+ if (ata_12_16_cmd(scmd))
-+ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
-+
- mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
-
- if (mpi_reply == NULL) {
-diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
-index 7a22307..afada65 100644
---- a/drivers/thermal/intel_powerclamp.c
-+++ b/drivers/thermal/intel_powerclamp.c
-@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
- .set_cur_state = powerclamp_set_cur_state,
- };
-
-+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
-+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-+
- static int __init powerclamp_probe(void)
- {
-- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
-+
-+ if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("CPU does not support MWAIT");
- return -ENODEV;
- }
-diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
-index 69426e6..3dbb4a2 100644
---- a/drivers/usb/chipidea/core.c
-+++ b/drivers/usb/chipidea/core.c
-@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
- if (!ci)
- return -ENOMEM;
-
-+ spin_lock_init(&ci->lock);
- ci->dev = dev;
- ci->platdata = dev_get_platdata(dev);
- ci->imx28_write_fix = !!(ci->platdata->flags &
-diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
-index b933568..bced28f 100644
---- a/drivers/usb/chipidea/udc.c
-+++ b/drivers/usb/chipidea/udc.c
-@@ -1895,8 +1895,6 @@ static int udc_start(struct ci_hdrc *ci)
- struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
- int retval = 0;
-
-- spin_lock_init(&ci->lock);
--
- ci->gadget.ops = &usb_gadget_ops;
- ci->gadget.speed = USB_SPEED_UNKNOWN;
- ci->gadget.max_speed = USB_SPEED_HIGH;
-diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
-index f61477b..243ac5e 100644
---- a/drivers/usb/serial/cp210x.c
-+++ b/drivers/usb/serial/cp210x.c
-@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
- { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
- { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
-+ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
- { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
- { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
- { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
-diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
-index 0ff7f38..6e9fc8b 100644
---- a/drivers/usb/serial/ftdi_sio.c
-+++ b/drivers/usb/serial/ftdi_sio.c
-@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
- { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
- { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
- { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
-+ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
-+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { } /* Terminating entry */
- };
-
-diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
-index 21011c0..48ee04c 100644
---- a/drivers/usb/serial/ftdi_sio_ids.h
-+++ b/drivers/usb/serial/ftdi_sio_ids.h
-@@ -596,6 +596,12 @@
- #define STK541_PID 0x2109 /* Zigbee Controller */
-
- /*
-+ * Texas Instruments
-+ */
-+#define TI_VID 0x0451
-+#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
-+
-+/*
- * Blackfin gnICE JTAG
- * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
- */
-diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
-index ffd0867..1a59f33 100644
---- a/drivers/usb/storage/transport.c
-+++ b/drivers/usb/storage/transport.c
-@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
-
- /* COMMAND STAGE */
- /* let's send the command via the control pipe */
-+ /*
-+ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
-+ * Stack may be vmallocated. So no DMA for us. Make a copy.
-+ */
-+ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
- result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
- US_CBI_ADSC,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
-- us->ifnum, srb->cmnd, srb->cmd_len);
-+ us->ifnum, us->iobuf, srb->cmd_len);
-
- /* check the return code for the command */
- usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
-diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
-index 52a2831..48efe62 100644
---- a/fs/nfs/callback.c
-+++ b/fs/nfs/callback.c
-@@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
- }
-
- ret = -EPROTONOSUPPORT;
-- if (minorversion == 0)
-+ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
- ret = nfs4_callback_up_net(serv, net);
- else if (xprt->ops->bc_up)
- ret = xprt->ops->bc_up(serv, net);
-diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
-index 2d9b6500..d49e26c 100644
---- a/include/linux/intel-iommu.h
-+++ b/include/linux/intel-iommu.h
-@@ -429,6 +429,7 @@ struct intel_iommu {
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
-+ u32 pasid_max;
- #endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index fc9bb22..f8c5f5e 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -7908,6 +7908,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
- * if <size> is not specified, the range is treated as a single address.
- */
- enum {
-+ IF_ACT_NONE = -1,
- IF_ACT_FILTER,
- IF_ACT_START,
- IF_ACT_STOP,
-@@ -7931,6 +7932,7 @@ static const match_table_t if_tokens = {
- { IF_SRC_KERNEL, "%u/%u" },
- { IF_SRC_FILEADDR, "%u@%s" },
- { IF_SRC_KERNELADDR, "%u" },
-+ { IF_ACT_NONE, NULL },
- };
-
- /*
-diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
-index 5464c87..e24388a 100644
---- a/lib/mpi/mpi-pow.c
-+++ b/lib/mpi/mpi-pow.c
-@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
- if (!esize) {
- /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
- * depending on if MOD equals 1. */
-- rp[0] = 1;
- res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
-+ if (res->nlimbs) {
-+ if (mpi_resize(res, 1) < 0)
-+ goto enomem;
-+ rp = res->d;
-+ rp[0] = 1;
-+ }
- res->sign = 0;
- goto leave;
- }
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index a2214c6..7401e99 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -3161,6 +3161,16 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
- if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
- return false;
-
-+#ifdef CONFIG_COMPACTION
-+ /*
-+ * This is a gross workaround to compensate a lack of reliable compaction
-+ * operation. We cannot simply go OOM with the current state of the compaction
-+ * code because this can lead to pre mature OOM declaration.
-+ */
-+ if (order <= PAGE_ALLOC_COSTLY_ORDER)
-+ return true;
-+#endif
-+
- /*
- * There are setups with compaction disabled which would prefer to loop
- * inside the allocator rather than hit the oom killer prematurely.
-diff --git a/net/can/bcm.c b/net/can/bcm.c
-index 8af9d25..436a753 100644
---- a/net/can/bcm.c
-+++ b/net/can/bcm.c
-@@ -77,7 +77,7 @@
- (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
- (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-
--#define CAN_BCM_VERSION "20160617"
-+#define CAN_BCM_VERSION "20161123"
-
- MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
- MODULE_LICENSE("Dual BSD/GPL");
-@@ -109,8 +109,9 @@ struct bcm_op {
- u32 count;
- u32 nframes;
- u32 currframe;
-- struct canfd_frame *frames;
-- struct canfd_frame *last_frames;
-+ /* void pointers to arrays of struct can[fd]_frame */
-+ void *frames;
-+ void *last_frames;
- struct canfd_frame sframe;
- struct canfd_frame last_sframe;
- struct sock *sk;
-@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
-
- if (op->flags & RX_FILTER_ID) {
- /* the easiest case */
-- bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
-+ bcm_rx_update_and_send(op, op->last_frames, rxframe);
- goto rx_starttimer;
- }
-
-@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
-
- if (msg_head->nframes) {
- /* update CAN frames content */
-- err = memcpy_from_msg((u8 *)op->frames, msg,
-+ err = memcpy_from_msg(op->frames, msg,
- msg_head->nframes * op->cfsiz);
- if (err < 0)
- return err;
-@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
- }
-
- if (msg_head->nframes) {
-- err = memcpy_from_msg((u8 *)op->frames, msg,
-+ err = memcpy_from_msg(op->frames, msg,
- msg_head->nframes * op->cfsiz);
- if (err < 0) {
- if (op->frames != &op->sframe)
-@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
- /* check flags */
-
- if (op->flags & RX_RTR_FRAME) {
-+ struct canfd_frame *frame0 = op->frames;
-
- /* no timers in RTR-mode */
- hrtimer_cancel(&op->thrtimer);
-@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
- * prevent a full-load-loopback-test ... ;-]
- */
- if ((op->flags & TX_CP_CAN_ID) ||
-- (op->frames[0].can_id == op->can_id))
-- op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
-+ (frame0->can_id == op->can_id))
-+ frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
-
- } else {
- if (op->flags & SETTIMER) {
-diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
-index 5550a86..396aac7 100644
---- a/net/core/flow_dissector.c
-+++ b/net/core/flow_dissector.c
-@@ -945,4 +945,4 @@ static int __init init_default_flow_dissectors(void)
- return 0;
- }
-
--late_initcall_sync(init_default_flow_dissectors);
-+core_initcall(init_default_flow_dissectors);
-diff --git a/net/wireless/core.h b/net/wireless/core.h
-index eee9144..66f2a11 100644
---- a/net/wireless/core.h
-+++ b/net/wireless/core.h
-@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
- struct list_head bss_list;
- struct rb_root bss_tree;
- u32 bss_generation;
-+ u32 bss_entries;
- struct cfg80211_scan_request *scan_req; /* protected by RTNL */
- struct sk_buff *scan_msg;
- struct cfg80211_sched_scan_request __rcu *sched_scan_req;
-diff --git a/net/wireless/scan.c b/net/wireless/scan.c
-index 0358e12..438143a 100644
---- a/net/wireless/scan.c
-+++ b/net/wireless/scan.c
-@@ -57,6 +57,19 @@
- * also linked into the probe response struct.
- */
-
-+/*
-+ * Limit the number of BSS entries stored in mac80211. Each one is
-+ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
-+ * If somebody wants to really attack this though, they'd likely
-+ * use small beacons, and only one type of frame, limiting each of
-+ * the entries to a much smaller size (in order to generate more
-+ * entries in total, so overhead is bigger.)
-+ */
-+static int bss_entries_limit = 1000;
-+module_param(bss_entries_limit, int, 0644);
-+MODULE_PARM_DESC(bss_entries_limit,
-+ "limit to number of scan BSS entries (per wiphy, default 1000)");
-+
- #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
-
- static void bss_free(struct cfg80211_internal_bss *bss)
-@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
-
- list_del_init(&bss->list);
- rb_erase(&bss->rbn, &rdev->bss_tree);
-+ rdev->bss_entries--;
-+ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
-+ "rdev bss entries[%d]/list[empty:%d] corruption\n",
-+ rdev->bss_entries, list_empty(&rdev->bss_list));
- bss_ref_put(rdev, bss);
- return true;
- }
-@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
- rdev->bss_generation++;
- }
-
-+static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
-+{
-+ struct cfg80211_internal_bss *bss, *oldest = NULL;
-+ bool ret;
-+
-+ lockdep_assert_held(&rdev->bss_lock);
-+
-+ list_for_each_entry(bss, &rdev->bss_list, list) {
-+ if (atomic_read(&bss->hold))
-+ continue;
-+
-+ if (!list_empty(&bss->hidden_list) &&
-+ !bss->pub.hidden_beacon_bss)
-+ continue;
-+
-+ if (oldest && time_before(oldest->ts, bss->ts))
-+ continue;
-+ oldest = bss;
-+ }
-+
-+ if (WARN_ON(!oldest))
-+ return false;
-+
-+ /*
-+ * The callers make sure to increase rdev->bss_generation if anything
-+ * gets removed (and a new entry added), so there's no need to also do
-+ * it here.
-+ */
-+
-+ ret = __cfg80211_unlink_bss(rdev, oldest);
-+ WARN_ON(!ret);
-+ return ret;
-+}
-+
- void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
- bool send_message)
- {
-@@ -693,6 +744,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
- const u8 *ie;
- int i, ssidlen;
- u8 fold = 0;
-+ u32 n_entries = 0;
-
- ies = rcu_access_pointer(new->pub.beacon_ies);
- if (WARN_ON(!ies))
-@@ -716,6 +768,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
- /* This is the bad part ... */
-
- list_for_each_entry(bss, &rdev->bss_list, list) {
-+ /*
-+ * we're iterating all the entries anyway, so take the
-+ * opportunity to validate the list length accounting
-+ */
-+ n_entries++;
-+
- if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
- continue;
- if (bss->pub.channel != new->pub.channel)
-@@ -744,6 +802,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
- new->pub.beacon_ies);
- }
-
-+ WARN_ONCE(n_entries != rdev->bss_entries,
-+ "rdev bss entries[%d]/list[len:%d] corruption\n",
-+ rdev->bss_entries, n_entries);
-+
- return true;
- }
-
-@@ -898,7 +960,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
- }
- }
-
-+ if (rdev->bss_entries >= bss_entries_limit &&
-+ !cfg80211_bss_expire_oldest(rdev)) {
-+ kfree(new);
-+ goto drop;
-+ }
-+
- list_add_tail(&new->list, &rdev->bss_list);
-+ rdev->bss_entries++;
- rb_insert_bss(rdev, new);
- found = new;
- }
-diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
-index fc3036b..a4d90aa 100644
---- a/security/apparmor/domain.c
-+++ b/security/apparmor/domain.c
-@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
- /* released below */
- cred = get_current_cred();
- cxt = cred_cxt(cred);
-- profile = aa_cred_profile(cred);
-- previous_profile = cxt->previous;
-+ profile = aa_get_newest_profile(aa_cred_profile(cred));
-+ previous_profile = aa_get_newest_profile(cxt->previous);
-
- if (unconfined(profile)) {
- info = "unconfined";
-@@ -718,6 +718,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
- out:
- aa_put_profile(hat);
- kfree(name);
-+ aa_put_profile(profile);
-+ aa_put_profile(previous_profile);
- put_cred(cred);
-
- return error;
diff --git a/4.8.12/0000_README b/4.8.13/0000_README
index 99a02b8..4bb277a 100644
--- a/4.8.12/0000_README
+++ b/4.8.13/0000_README
@@ -2,15 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1010_linux-4.8.11.patch
+Patch: 1012_linux-4.8.13.patch
From: http://www.kernel.org
-Desc: Linux 4.8.11
+Desc: Linux 4.8.13
-Patch: 1011_linux-4.8.12.patch
-From: http://www.kernel.org
-Desc: Linux 4.8.12
-
-Patch: 4420_grsecurity-3.1-4.8.12-201612062306.patch
+Patch: 4420_grsecurity-3.1-4.8.13-201612082118.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
@@ -18,6 +14,10 @@ Patch: 4425_grsec_remove_EI_PAX.patch
From: Anthony G. Basile <blueness@gentoo.org>
Desc: Remove EI_PAX option and force off
+Patch: 4427_force_XATTR_PAX_tmpfs.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Force XATTR_PAX on tmpfs
+
Patch: 4430_grsec-remove-localversion-grsec.patch
From: Kerin Millar <kerframil@gmail.com>
Desc: Removes grsecurity's localversion-grsec file
diff --git a/4.8.13/1012_linux-4.8.13.patch b/4.8.13/1012_linux-4.8.13.patch
new file mode 100644
index 0000000..c742393
--- /dev/null
+++ b/4.8.13/1012_linux-4.8.13.patch
@@ -0,0 +1,1063 @@
+diff --git a/Makefile b/Makefile
+index 7b0c92f..b38abe9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
+index 08e7e2a..a36e860 100644
+--- a/arch/arc/include/asm/delay.h
++++ b/arch/arc/include/asm/delay.h
+@@ -22,10 +22,11 @@
+ static inline void __delay(unsigned long loops)
+ {
+ __asm__ __volatile__(
+- " lp 1f \n"
+- " nop \n"
+- "1: \n"
+- : "+l"(loops));
++ " mov lp_count, %0 \n"
++ " lp 1f \n"
++ " nop \n"
++ "1: \n"
++ : : "r"(loops));
+ }
+
+ extern void __bad_udelay(void);
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 89eeb37..e94ca72 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+ #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+
+ /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
+index 123a58b..f0b857d 100644
+--- a/arch/arm64/boot/dts/arm/juno-r1.dts
++++ b/arch/arm64/boot/dts/arm/juno-r1.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
+index 007be82..26aaa6a 100644
+--- a/arch/arm64/boot/dts/arm/juno-r2.dts
++++ b/arch/arm64/boot/dts/arm/juno-r2.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
+index a7270ef..6e154d9 100644
+--- a/arch/arm64/boot/dts/arm/juno.dts
++++ b/arch/arm64/boot/dts/arm/juno.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 7099f26..b96346b 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -90,7 +90,7 @@ struct arm64_cpu_capabilities {
+ u16 capability;
+ int def_scope; /* default scope */
+ bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
+- void (*enable)(void *); /* Called on all active CPUs */
++ int (*enable)(void *); /* Called on all active CPUs */
+ union {
+ struct { /* To be used for erratum handling only */
+ u32 midr_model;
+diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
+index db0563c..f7865dd 100644
+--- a/arch/arm64/include/asm/exec.h
++++ b/arch/arm64/include/asm/exec.h
+@@ -18,6 +18,9 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
++#include <linux/sched.h>
++
+ extern unsigned long arch_align_stack(unsigned long sp);
++void uao_thread_switch(struct task_struct *next);
+
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index ace0a96..3be0ab0 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -190,8 +190,8 @@ static inline void spin_lock_prefetch(const void *ptr)
+
+ #endif
+
+-void cpu_enable_pan(void *__unused);
+-void cpu_enable_uao(void *__unused);
+-void cpu_enable_cache_maint_trap(void *__unused);
++int cpu_enable_pan(void *__unused);
++int cpu_enable_uao(void *__unused);
++int cpu_enable_cache_maint_trap(void *__unused);
+
+ #endif /* __ASM_PROCESSOR_H */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 62272ea..94a0330 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -19,7 +19,9 @@
+ #define pr_fmt(fmt) "CPU features: " fmt
+
+ #include <linux/bsearch.h>
++#include <linux/cpumask.h>
+ #include <linux/sort.h>
++#include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+@@ -936,7 +938,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+ for (; caps->matches; caps++)
+ if (caps->enable && cpus_have_cap(caps->capability))
+- on_each_cpu(caps->enable, NULL, true);
++ /*
++ * Use stop_machine() as it schedules the work allowing
++ * us to modify PSTATE, instead of on_each_cpu() which
++ * uses an IPI, giving us a PSTATE that disappears when
++ * we return.
++ */
++ stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 6cd2612..9cc8667 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -49,6 +49,7 @@
+ #include <asm/alternative.h>
+ #include <asm/compat.h>
+ #include <asm/cacheflush.h>
++#include <asm/exec.h>
+ #include <asm/fpsimd.h>
+ #include <asm/mmu_context.h>
+ #include <asm/processor.h>
+@@ -303,7 +304,7 @@ static void tls_thread_switch(struct task_struct *next)
+ }
+
+ /* Restore the UAO state depending on next's addr_limit */
+-static void uao_thread_switch(struct task_struct *next)
++void uao_thread_switch(struct task_struct *next)
+ {
+ if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+ if (task_thread_info(next)->addr_limit == KERNEL_DS)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index b616e365..23ddf55 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,8 +1,11 @@
+ #include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
++#include <asm/alternative.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
++#include <asm/exec.h>
+ #include <asm/pgtable.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -48,6 +51,14 @@ void notrace __cpu_suspend_exit(void)
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
+ /*
++ * PSTATE was not saved over suspend/resume, re-enable any detected
++ * features that might not have been set correctly.
++ */
++ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
++ CONFIG_ARM64_PAN));
++ uao_thread_switch(current);
++
++ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 771a01a7f..9595d3d 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ }
+
+-void cpu_enable_cache_maint_trap(void *__unused)
++int cpu_enable_cache_maint_trap(void *__unused)
+ {
+ config_sctlr_el1(SCTLR_EL1_UCI, 0);
++ return 0;
+ }
+
+ #define __user_cache_maint(insn, address, res) \
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 05d2bd7..67506c3 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -29,7 +29,9 @@
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
+ #include <linux/perf_event.h>
++#include <linux/preempt.h>
+
++#include <asm/bug.h>
+ #include <asm/cpufeature.h>
+ #include <asm/exception.h>
+ #include <asm/debug-monitors.h>
+@@ -671,9 +673,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
+ NOKPROBE_SYMBOL(do_debug_exception);
+
+ #ifdef CONFIG_ARM64_PAN
+-void cpu_enable_pan(void *__unused)
++int cpu_enable_pan(void *__unused)
+ {
++ /*
++ * We modify PSTATE. This won't work from irq context as the PSTATE
++ * is discarded once we return from the exception.
++ */
++ WARN_ON_ONCE(in_interrupt());
++
+ config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++ asm(SET_PSTATE_PAN(1));
++ return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
+
+@@ -684,8 +694,9 @@ void cpu_enable_pan(void *__unused)
+ * We need to enable the feature at runtime (instead of adding it to
+ * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+ */
+-void cpu_enable_uao(void *__unused)
++int cpu_enable_uao(void *__unused)
+ {
+ asm(SET_PSTATE_UAO(1));
++ return 0;
+ }
+ #endif /* CONFIG_ARM64_UAO */
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index d0efb5c..a4e070a 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2344,7 +2344,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ frame.next_frame = 0;
+ frame.return_address = 0;
+
+- if (!access_ok(VERIFY_READ, fp, 8))
++ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
+@@ -2354,9 +2354,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ if (bytes != 0)
+ break;
+
+- if (!valid_user_frame(fp, sizeof(frame)))
+- break;
+-
+ perf_callchain_store(entry, cs_base + frame.return_address);
+ fp = compat_ptr(ss_base + frame.next_frame);
+ }
+@@ -2405,7 +2402,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ frame.next_frame = NULL;
+ frame.return_address = 0;
+
+- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
++ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
+@@ -2415,9 +2412,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ if (bytes != 0)
+ break;
+
+- if (!valid_user_frame(fp, sizeof(frame)))
+- break;
+-
+ perf_callchain_store(entry, frame.return_address);
+ fp = (void __user *)frame.next_frame;
+ }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e207b33..1e007a9 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ desc[1] = tf->command; /* status */
+ desc[2] = tf->device;
+ desc[3] = tf->nsect;
+- desc[0] = 0;
++ desc[7] = 0;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[8] |= 0x80;
+ if (tf->hob_nsect)
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 04365b1..5163c8f 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
+ zram = idr_find(&zram_index_idr, dev_id);
+ if (zram) {
+ ret = zram_remove(zram);
+- idr_remove(&zram_index_idr, dev_id);
++ if (!ret)
++ idr_remove(&zram_index_idr, dev_id);
+ } else {
+ ret = -ENODEV;
+ }
+diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
+index 838b22a..f2c9274 100644
+--- a/drivers/clk/sunxi/clk-sunxi.c
++++ b/drivers/clk/sunxi/clk-sunxi.c
+@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
+ else
+ calcp = 3;
+
+- calcm = (req->parent_rate >> calcp) - 1;
++ calcm = (div >> calcp) - 1;
+
+ req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+ req->m = calcm;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 10b5ddf..1ed085f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -33,6 +33,7 @@ struct amdgpu_atpx {
+
+ static struct amdgpu_atpx_priv {
+ bool atpx_detected;
++ bool bridge_pm_usable;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle other_handle;
+@@ -200,7 +201,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+- atpx->functions.power_cntl = false;
++ /*
++ * Disable legacy PM methods only when pcie port PM is usable,
++ * otherwise the device might fail to power off or power on.
++ */
++ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
+
+@@ -546,17 +551,25 @@ static bool amdgpu_atpx_detect(void)
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
++ bool d3_supported = false;
++ struct pci_dev *parent_pdev;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ if (has_atpx && vga_count == 2) {
+@@ -564,6 +577,7 @@ static bool amdgpu_atpx_detect(void)
+ printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ amdgpu_atpx_priv.atpx_detected = true;
++ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
+ amdgpu_atpx_init();
+ return true;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index a77ce99..b8e3854 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2540,7 +2540,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+- goto err_pages;
++ goto err_sg;
+ }
+ }
+ #ifdef CONFIG_SWIOTLB
+@@ -2583,8 +2583,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+
+ return 0;
+
+-err_pages:
++err_sg:
+ sg_mark_end(sg);
++err_pages:
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e26f889..35d385d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -11791,7 +11791,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+ ret = -EIO;
+- goto cleanup;
++ goto unlock;
+ }
+
+ atomic_inc(&intel_crtc->unpin_work_count);
+@@ -11877,6 +11877,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ if (!IS_ERR_OR_NULL(request))
+ i915_add_request_no_flush(request);
+ atomic_dec(&intel_crtc->unpin_work_count);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+ cleanup:
+ crtc->primary->fb = old_fb;
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 8f62671f..54acfcc 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -249,13 +249,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
+ if (irq < 0)
+ return irq;
+
+- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+- IRQF_TRIGGER_NONE, dev_name(dev), priv);
+- if (ret < 0) {
+- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+- return ret;
+- }
+-
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+@@ -271,6 +264,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, priv);
+
++ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
++ IRQF_TRIGGER_NONE, dev_name(dev), priv);
++ if (ret < 0) {
++ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
++ return ret;
++ }
++
+ ret = component_add(dev, &mtk_disp_ovl_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index ddef0d4..34b4ace 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -33,6 +33,7 @@ struct radeon_atpx {
+
+ static struct radeon_atpx_priv {
+ bool atpx_detected;
++ bool bridge_pm_usable;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ struct radeon_atpx atpx;
+@@ -198,7 +199,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+- atpx->functions.power_cntl = false;
++ /*
++ * Disable legacy PM methods only when pcie port PM is usable,
++ * otherwise the device might fail to power off or power on.
++ */
++ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
+
+@@ -543,11 +548,16 @@ static bool radeon_atpx_detect(void)
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
++ bool d3_supported = false;
++ struct pci_dev *parent_pdev;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ /* some newer PX laptops mark the dGPU as a non-VGA display device */
+@@ -555,6 +565,9 @@ static bool radeon_atpx_detect(void)
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ if (has_atpx && vga_count == 2) {
+@@ -562,6 +575,7 @@ static bool radeon_atpx_detect(void)
+ printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
++ radeon_atpx_priv.bridge_pm_usable = d3_supported;
+ radeon_atpx_init();
+ return true;
+ }
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index 5784e20..9f6203c 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
+ if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
+ &max_proto, set_properties, true))
+ return PSMOUSE_TOUCHKIT_PS2;
+-
+- if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
+- &max_proto, set_properties, true))
+- return PSMOUSE_BYD;
+ }
+
+ /*
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index a8ff969..cbc7dfa 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -2203,8 +2203,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ is_scanning_required = 1;
+ } else {
+ mwifiex_dbg(priv->adapter, MSG,
+- "info: trying to associate to '%s' bssid %pM\n",
+- (char *)req_ssid.ssid, bss->bssid);
++ "info: trying to associate to '%.*s' bssid %pM\n",
++ req_ssid.ssid_len, (char *)req_ssid.ssid,
++ bss->bssid);
+ memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+ break;
+ }
+@@ -2264,8 +2265,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ }
+
+ mwifiex_dbg(adapter, INFO,
+- "info: Trying to associate to %s and bssid %pM\n",
+- (char *)sme->ssid, sme->bssid);
++ "info: Trying to associate to %.*s and bssid %pM\n",
++ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+@@ -2398,8 +2399,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ }
+
+ mwifiex_dbg(priv->adapter, MSG,
+- "info: trying to join to %s and bssid %pM\n",
+- (char *)params->ssid, params->bssid);
++ "info: trying to join to %.*s and bssid %pM\n",
++ params->ssid_len, (char *)params->ssid, params->bssid);
+
+ mwifiex_set_ibss_params(priv, params);
+
+diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
+index db553dc..2b6a592 100644
+--- a/drivers/pci/pcie/aer/aer_inject.c
++++ b/drivers/pci/pcie/aer/aer_inject.c
+@@ -307,20 +307,6 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
+ return 0;
+ }
+
+-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+-{
+- while (1) {
+- if (!pci_is_pcie(dev))
+- break;
+- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+- return dev;
+- if (!dev->bus->self)
+- break;
+- dev = dev->bus->self;
+- }
+- return NULL;
+-}
+-
+ static int find_aer_device_iter(struct device *device, void *data)
+ {
+ struct pcie_device **result = data;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 93f280d..f6eff4a 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ }
+
++static bool pcie_root_rcb_set(struct pci_dev *dev)
++{
++ struct pci_dev *rp = pcie_find_root_port(dev);
++ u16 lnkctl;
++
++ if (!rp)
++ return false;
++
++ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
++ if (lnkctl & PCI_EXP_LNKCTL_RCB)
++ return true;
++
++ return false;
++}
++
+ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ {
+ int pos;
+@@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+
+ /* Initialize Link Control Register */
+- if (pcie_cap_has_lnkctl(dev))
++ if (pcie_cap_has_lnkctl(dev)) {
++
++ /*
++ * If the Root Port supports Read Completion Boundary of
++ * 128, set RCB to 128. Otherwise, clear it.
++ */
++ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
++ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
++ if (pcie_root_rcb_set(dev))
++ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
++
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+ ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
++ }
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 0296d81..a813239 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+ if (test_bit(PWMF_EXPORTED, &pwm->flags))
+ pwm_unexport_child(parent, pwm);
+ }
++
++ put_device(parent);
+ }
+
+ static int __init pwm_sysfs_init(void)
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 030d002..5138a84 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2007,7 +2007,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+
+ static int hpsa_slave_alloc(struct scsi_device *sdev)
+ {
+- struct hpsa_scsi_dev_t *sd;
++ struct hpsa_scsi_dev_t *sd = NULL;
+ unsigned long flags;
+ struct ctlr_info *h;
+
+@@ -2024,7 +2024,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
+ sd->target = sdev_id(sdev);
+ sd->lun = sdev->lun;
+ }
+- } else
++ }
++ if (!sd)
+ sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+
+@@ -3805,6 +3806,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
++ this_device->rev = inq_buff[2];
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
+@@ -3887,10 +3889,14 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* physical device, target and lun filled in later */
+- if (is_hba_lunid(lunaddrbytes))
++ if (is_hba_lunid(lunaddrbytes)) {
++ int bus = HPSA_HBA_BUS;
++
++ if (!device->rev)
++ bus = HPSA_LEGACY_HBA_BUS;
+ hpsa_set_bus_target_lun(device,
+- HPSA_HBA_BUS, 0, lunid & 0x3fff);
+- else
++ bus, 0, lunid & 0x3fff);
++ } else
+ /* defer target, lun assignment for physical devices */
+ hpsa_set_bus_target_lun(device,
+ HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index a1487e6..9d45dde 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t {
+ u64 sas_address;
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
++ unsigned char rev; /* byte 2 of inquiry data */
+ unsigned char raid_level; /* from inquiry page 0xC1 */
+ unsigned char volume_offline; /* discovered via TUR or VPD */
+ u16 queue_depth; /* max queue_depth for this device */
+@@ -403,6 +404,7 @@ struct offline_device_entry {
+ #define HPSA_RAID_VOLUME_BUS 1
+ #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
+ #define HPSA_HBA_BUS 0
++#define HPSA_LEGACY_HBA_BUS 3
+
+ /*
+ Send the command to the hardware
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 04ce7cf..50c7167 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+ fc_stats = &lport->host_stats;
+ memset(fc_stats, 0, sizeof(struct fc_host_statistics));
+
+- fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
++ fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
+
+ for_each_possible_cpu(cpu) {
+ struct fc_stats *stats;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a78415d..78be4ae 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -329,11 +329,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
+ if (!real)
+ goto bug;
+
++ /* Handle recursion */
++ real = d_real(real, inode, open_flags);
++
+ if (!inode || inode == d_inode(real))
+ return real;
+-
+- /* Handle recursion */
+- return d_real(real, inode, open_flags);
+ bug:
+ WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
+ inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 573c5a1..0a0b2d5 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -256,7 +256,9 @@
+ #endif
+ #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+
+-#if GCC_VERSION >= 50000
++#if GCC_VERSION >= 70000
++#define KASAN_ABI_VERSION 5
++#elif GCC_VERSION >= 50000
+ #define KASAN_ABI_VERSION 4
+ #elif GCC_VERSION >= 40902
+ #define KASAN_ABI_VERSION 3
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 01e8443..d47cc4a 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -364,16 +364,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
+ }
+
+ /*
+- * Get the offset in PAGE_SIZE.
+- * (TODO: hugepage should have ->index in PAGE_SIZE)
++ * Get index of the page with in radix-tree
++ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ */
+-static inline pgoff_t page_to_pgoff(struct page *page)
++static inline pgoff_t page_to_index(struct page *page)
+ {
+ pgoff_t pgoff;
+
+- if (unlikely(PageHeadHuge(page)))
+- return page->index << compound_order(page);
+-
+ if (likely(!PageTransTail(page)))
+ return page->index;
+
+@@ -387,6 +384,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
+ }
+
+ /*
++ * Get the offset in PAGE_SIZE.
++ * (TODO: hugepage should have ->index in PAGE_SIZE)
++ */
++static inline pgoff_t page_to_pgoff(struct page *page)
++{
++ if (unlikely(PageHeadHuge(page)))
++ return page->index << compound_order(page);
++
++ return page_to_index(page);
++}
++
++/*
+ * Return byte-offset into filesystem object for page.
+ */
+ static inline loff_t page_offset(struct page *page)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 0ab8359..03f3df0 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1896,6 +1896,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
+ return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+ }
+
++static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
++{
++ while (1) {
++ if (!pci_is_pcie(dev))
++ break;
++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
++ return dev;
++ if (!dev->bus->self)
++ break;
++ dev = dev->bus->self;
++ }
++ return NULL;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index d6d071f..3af60ee 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -640,7 +640,7 @@
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+-#define KEY_DATA 0x275
++#define KEY_DATA 0x277
+
+ #define BTN_TRIGGER_HAPPY 0x2c0
+ #define BTN_TRIGGER_HAPPY1 0x2c0
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 0082fce..85c5a88 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2173,6 +2173,7 @@ static int rcu_nocb_kthread(void *arg)
+ cl++;
+ c++;
+ local_bh_enable();
++ cond_resched_rcu_qs();
+ list = next;
+ }
+ trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
+index e5c2181f..03f4545 100644
+--- a/mm/kasan/kasan.h
++++ b/mm/kasan/kasan.h
+@@ -53,6 +53,9 @@ struct kasan_global {
+ #if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+ #endif
++#if KASAN_ABI_VERSION >= 5
++ char *odr_indicator;
++#endif
+ };
+
+ /**
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 728d779..87e1a7ca 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
+ .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
+ };
+
++#ifdef CONFIG_SYSFS
+ static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
+ .attrs = khugepaged_attr,
+ .name = "khugepaged",
+ };
++#endif /* CONFIG_SYSFS */
+
+ #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
+
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 14645be..9c91acc 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
+ */
+ spin_lock_irq(zone_lru_lock(zone));
+
+- nr_pages = hpage_nr_pages(page);
+- if (!TestClearPageMlocked(page))
++ if (!TestClearPageMlocked(page)) {
++ /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
++ nr_pages = 1;
+ goto unlock_out;
++ }
+
++ nr_pages = hpage_nr_pages(page);
+ __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
+
+ if (__munlock_isolate_lru_page(page, true)) {
+diff --git a/mm/truncate.c b/mm/truncate.c
+index a01cce4..8d8c62d 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+
+ if (!trylock_page(page))
+ continue;
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ if (PageWriteback(page)) {
+ unlock_page(page);
+ continue;
+@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ }
+
+ lock_page(page);
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ wait_on_page_writeback(page);
+ truncate_inode_page(mapping, page);
+ unlock_page(page);
+@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ if (!trylock_page(page))
+ continue;
+
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+
+ /* Middle of THP: skip */
+ if (PageTransTail(page)) {
+@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ }
+
+ lock_page(page);
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ if (page->mapping != mapping) {
+ unlock_page(page);
+ continue;
+diff --git a/mm/workingset.c b/mm/workingset.c
+index 617475f..fb1f918 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
+ local_irq_enable();
+
+- if (memcg_kmem_enabled()) {
++ if (sc->memcg) {
+ pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+ LRU_ALL_FILE);
+ } else {
+diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
+index 2333777..8af1611 100644
+--- a/net/batman-adv/tp_meter.c
++++ b/net/batman-adv/tp_meter.c
+@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (unlikely(!primary_if)) {
+ err = BATADV_TP_REASON_DST_UNREACHABLE;
++ tp_vars->reason = err;
+ goto out;
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 0bf6709..6fb4314 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+
+ WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
+
+- kvm_notify_acked_irq(vcpu->kvm, 0,
+- intid - VGIC_NR_PRIVATE_IRQS);
++ /* Only SPIs require notification */
++ if (vgic_valid_spi(vcpu->kvm, intid))
++ kvm_notify_acked_irq(vcpu->kvm, 0,
++ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 9f0dae3..5c9f974 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+
+ WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
+
+- kvm_notify_acked_irq(vcpu->kvm, 0,
+- intid - VGIC_NR_PRIVATE_IRQS);
++ /* Only SPIs require notification */
++ if (vgic_valid_spi(vcpu->kvm, intid))
++ kvm_notify_acked_irq(vcpu->kvm, 0,
++ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+
+ /*
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1950782..690d15e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2852,10 +2852,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+
+ ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
+ if (ret < 0) {
+- ops->destroy(dev);
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ mutex_unlock(&kvm->lock);
++ ops->destroy(dev);
+ return ret;
+ }
+
diff --git a/4.8.12/4420_grsecurity-3.1-4.8.12-201612062306.patch b/4.8.13/4420_grsecurity-3.1-4.8.13-201612082118.patch
index 5929283..a2b5f41 100644
--- a/4.8.12/4420_grsecurity-3.1-4.8.12-201612062306.patch
+++ b/4.8.13/4420_grsecurity-3.1-4.8.13-201612082118.patch
@@ -407,7 +407,7 @@ index ffab8b5..b8fcd61 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index 7b0c92f..8fa3c72 100644
+index b38abe9..edadcba 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -5301,10 +5301,10 @@ index 37e47a9..f8597fc 100644
}
#endif
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 6cd2612..56d72e5c 100644
+index 9cc8667..5edbcff 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
-@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
+@@ -64,7 +64,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
/*
* Function pointers to optional machine specific functions
*/
@@ -5313,7 +5313,7 @@ index 6cd2612..56d72e5c 100644
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-@@ -109,7 +109,7 @@ void machine_shutdown(void)
+@@ -110,7 +110,7 @@ void machine_shutdown(void)
* activity (executing tasks, handling interrupts). smp_send_stop()
* achieves this.
*/
@@ -5322,7 +5322,7 @@ index 6cd2612..56d72e5c 100644
{
local_irq_disable();
smp_send_stop();
-@@ -122,12 +122,13 @@ void machine_halt(void)
+@@ -123,12 +123,13 @@ void machine_halt(void)
* achieves this. When the system power is turned off, it will take all CPUs
* with it.
*/
@@ -5337,7 +5337,7 @@ index 6cd2612..56d72e5c 100644
}
/*
-@@ -139,7 +140,7 @@ void machine_power_off(void)
+@@ -140,7 +141,7 @@ void machine_power_off(void)
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
@@ -5362,10 +5362,10 @@ index d34fd72..8b6faee 100644
/* orig_sp is the saved pt_regs, find the elr */
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
-index 771a01a7f..db6d9cc 100644
+index 9595d3d..7ee5abb 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
-@@ -511,7 +511,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+@@ -512,7 +512,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
__show_regs(regs);
}
@@ -19051,7 +19051,7 @@ index b28200d..e93e14d 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index d0efb5c..10f0a95 100644
+index a4e070a..6804f87 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1545,7 +1545,7 @@ static void __init pmu_check_apic(void)
@@ -19081,7 +19081,7 @@ index d0efb5c..10f0a95 100644
}
return get_desc_base(desc);
-@@ -2419,7 +2419,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+@@ -2413,7 +2413,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
break;
perf_callchain_store(entry, frame.return_address);
@@ -41996,7 +41996,7 @@ index 223a770..295a507 100644
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
-index e207b33..145ebf0 100644
+index 1e007a9..67c25db 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4689,7 +4689,7 @@ int ata_sas_port_init(struct ata_port *ap)
@@ -46692,10 +46692,10 @@ index e443073..2ce0ad5 100644
int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
-index 10b5ddf..ed2f78d 100644
+index 1ed085f..76d5db1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
-@@ -519,7 +519,7 @@ static int amdgpu_atpx_init(void)
+@@ -524,7 +524,7 @@ static int amdgpu_atpx_init(void)
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
@@ -48649,10 +48649,10 @@ index 1c2aec3..f807515 100644
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index e26f889..5d197b7 100644
+index 35d385d..7ed8f57 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -15592,13 +15592,13 @@ struct intel_quirk {
+@@ -15593,13 +15593,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -48669,7 +48669,7 @@ index e26f889..5d197b7 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -15606,18 +15606,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -15607,18 +15607,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -48700,7 +48700,7 @@ index e26f889..5d197b7 100644
.hook = quirk_invert_brightness,
},
};
-@@ -15700,7 +15702,7 @@ static void intel_init_quirks(struct drm_device *dev)
+@@ -15701,7 +15703,7 @@ static void intel_init_quirks(struct drm_device *dev)
q->hook(dev);
}
for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
@@ -49638,10 +49638,10 @@ index b928c17..e5d9400 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
-index ddef0d4..c4f3351 100644
+index 34b4ace..2f6e0e6 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
-@@ -516,7 +516,7 @@ static int radeon_atpx_init(void)
+@@ -521,7 +521,7 @@ static int radeon_atpx_init(void)
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
@@ -50932,7 +50932,7 @@ index 56dd261..493d7e0 100644
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
-index a1c086b..b45a999 100644
+index a1c086b..b205fcb 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -183,6 +183,7 @@ static struct clocksource hyperv_cs_tsc = {
@@ -50963,7 +50963,7 @@ index a1c086b..b45a999 100644
hypercall_msr.enable = 1;
- hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
-+ hypercall_msr.guest_physical_address = __phys_to_pfn(__pa(ktla_ktva((unsigned long)hv_hypercall_page)));
++ hypercall_msr.guest_physical_address = __phys_to_pfn(slow_virt_to_phys((ktla_ktva((unsigned long)hv_hypercall_page))));
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/* Confirm that hypercall page did get setup. */
@@ -71550,7 +71550,7 @@ index 70d7ad8..66f87d6 100644
* Boxes that should not use MSI for PCIe PME signaling.
*/
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 93f280d..a349035 100644
+index f6eff4a..9ef8afb 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -180,7 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -74473,7 +74473,7 @@ index 7028dd3..7392dc6 100644
int rc = -ENODEV;
struct net_device *netdev = NULL;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
-index 030d002..cbf90d1 100644
+index 5138a84..de71237 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -942,10 +942,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
@@ -74498,7 +74498,7 @@ index 030d002..cbf90d1 100644
}
}
-@@ -7020,17 +7020,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
+@@ -7026,17 +7026,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
{
@@ -74519,7 +74519,7 @@ index 030d002..cbf90d1 100644
(h->interrupts_enabled == 0);
}
-@@ -7958,7 +7958,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
+@@ -7964,7 +7964,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
if (prod_index < 0)
return prod_index;
h->product_name = products[prod_index].product_name;
@@ -74528,7 +74528,7 @@ index 030d002..cbf90d1 100644
h->needs_abort_tags_swizzled =
ctlr_needs_abort_tags_swizzled(h->board_id);
-@@ -8357,7 +8357,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
+@@ -8363,7 +8363,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
unsigned long flags;
u32 lockup_detected;
@@ -74537,7 +74537,7 @@ index 030d002..cbf90d1 100644
spin_lock_irqsave(&h->lock, flags);
lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
if (!lockup_detected) {
-@@ -8695,7 +8695,7 @@ reinit_after_soft_reset:
+@@ -8701,7 +8701,7 @@ reinit_after_soft_reset:
}
/* make sure the board interrupts are off */
@@ -74546,7 +74546,7 @@ index 030d002..cbf90d1 100644
rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
if (rc)
-@@ -8748,7 +8748,7 @@ reinit_after_soft_reset:
+@@ -8754,7 +8754,7 @@ reinit_after_soft_reset:
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
@@ -74555,7 +74555,7 @@ index 030d002..cbf90d1 100644
spin_unlock_irqrestore(&h->lock, flags);
hpsa_free_irqs(h);
rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
-@@ -8778,9 +8778,9 @@ reinit_after_soft_reset:
+@@ -8784,9 +8784,9 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
@@ -74567,7 +74567,7 @@ index 030d002..cbf90d1 100644
rc = controller_reset_failed(h->cfgtable);
if (rc)
-@@ -8807,7 +8807,7 @@ reinit_after_soft_reset:
+@@ -8813,7 +8813,7 @@ reinit_after_soft_reset:
/* Turn the interrupts on so we can service requests */
@@ -74576,7 +74576,7 @@ index 030d002..cbf90d1 100644
hpsa_hba_inquiry(h);
-@@ -8833,7 +8833,7 @@ reinit_after_soft_reset:
+@@ -8839,7 +8839,7 @@ reinit_after_soft_reset:
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
@@ -74585,7 +74585,7 @@ index 030d002..cbf90d1 100644
clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
hpsa_free_sg_chain_blocks(h);
clean5: /* cmd, irq, shost, pci, lu, aer/h */
-@@ -8968,7 +8968,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+@@ -8974,7 +8974,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
* To write all data in the battery backed cache to disks
*/
hpsa_flush_cache(h);
@@ -74594,7 +74594,7 @@ index 030d002..cbf90d1 100644
hpsa_free_irqs(h); /* init_one 4 */
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
}
-@@ -9110,7 +9110,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+@@ -9116,7 +9116,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
CFGTBL_Trans_enable_directed_msix |
(trans_support & (CFGTBL_Trans_io_accel1 |
CFGTBL_Trans_io_accel2));
@@ -74603,7 +74603,7 @@ index 030d002..cbf90d1 100644
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
-@@ -9152,7 +9152,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+@@ -9158,7 +9158,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
* perform the superfluous readl() after each command submission.
*/
if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
@@ -74612,7 +74612,7 @@ index 030d002..cbf90d1 100644
/* Controller spec: zero out this buffer. */
for (i = 0; i < h->nreply_queues; i++)
-@@ -9182,12 +9182,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
+@@ -9188,12 +9188,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
* enable outbound interrupt coalescing in accelerator mode;
*/
if (trans_support & CFGTBL_Trans_io_accel1) {
@@ -74628,10 +74628,10 @@ index 030d002..cbf90d1 100644
writel(4, &h->cfgtable->HostWrite.CoalIntCount);
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
-index a1487e6..53a2c5d 100644
+index 9d45dde..622095a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
-@@ -179,7 +179,7 @@ struct ctlr_info {
+@@ -180,7 +180,7 @@ struct ctlr_info {
unsigned int msix_vector;
unsigned int msi_vector;
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
@@ -74640,7 +74640,7 @@ index a1487e6..53a2c5d 100644
/* queue and queue Info */
unsigned int Qdepth;
-@@ -579,38 +579,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
+@@ -581,38 +581,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
}
static struct access_method SA5_access = {
@@ -116847,7 +116847,7 @@ index 767377e..2e4b2e8 100644
if (res < 0) {
free_page((unsigned long) buf);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index a78415d..557e8db 100644
+index 78be4ae..37a3935 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -148,8 +148,8 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -133305,7 +133305,7 @@ index f964ef7..0679632 100644
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
/*
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
-index 573c5a1..b902c3f 100644
+index 0a0b2d5..3ae2773 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -116,9 +116,9 @@
@@ -137705,7 +137705,7 @@ index 113ee62..70198a7 100644
spinlock_t lock ____cacheline_aligned;
unsigned int processed;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
-index 01e8443..3a4d158 100644
+index d47cc4a..e1c3886 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -215,7 +215,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
@@ -147795,7 +147795,7 @@ index 6d86ab6..7046dff 100644
}
mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
-index 0082fce..29572cb 100644
+index 85c5a88..4479872 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1174,7 +1174,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
@@ -147816,7 +147816,7 @@ index 0082fce..29572cb 100644
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
-@@ -2177,8 +2177,8 @@ static int rcu_nocb_kthread(void *arg)
+@@ -2178,8 +2178,8 @@ static int rcu_nocb_kthread(void *arg)
}
trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
smp_mb__before_atomic(); /* _add after CB invocation. */
@@ -147827,7 +147827,7 @@ index 0082fce..29572cb 100644
rdp->n_nocbs_invoked += c;
}
return 0;
-@@ -2533,9 +2533,9 @@ static void rcu_sysidle_enter(int irq)
+@@ -2534,9 +2534,9 @@ static void rcu_sysidle_enter(int irq)
j = jiffies;
WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
smp_mb__before_atomic();
@@ -147839,7 +147839,7 @@ index 0082fce..29572cb 100644
}
/*
-@@ -2606,9 +2606,9 @@ static void rcu_sysidle_exit(int irq)
+@@ -2607,9 +2607,9 @@ static void rcu_sysidle_exit(int irq)
/* Record end of idle period. */
smp_mb__before_atomic();
@@ -147851,7 +147851,7 @@ index 0082fce..29572cb 100644
/*
* If we are the timekeeping CPU, we are permitted to be non-idle
-@@ -2654,7 +2654,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+@@ -2655,7 +2655,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
/* Pick up current idle and NMI-nesting counter and check. */
@@ -153452,7 +153452,7 @@ index f7ee04a..41da9dc 100644
err = -EPERM;
goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
-index 14645be..e2c7aa1 100644
+index 9c91acc..d377a3b 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -14,6 +14,7 @@
@@ -153463,7 +153463,7 @@ index 14645be..e2c7aa1 100644
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/rmap.h>
-@@ -573,7 +574,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+@@ -576,7 +577,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
{
unsigned long nstart, end, tmp;
struct vm_area_struct * vma, * prev;
@@ -153472,7 +153472,7 @@ index 14645be..e2c7aa1 100644
VM_BUG_ON(offset_in_page(start));
VM_BUG_ON(len != PAGE_ALIGN(len));
-@@ -582,6 +583,9 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+@@ -585,6 +586,9 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
return -EINVAL;
if (end == start)
return 0;
@@ -153482,7 +153482,7 @@ index 14645be..e2c7aa1 100644
vma = find_vma(current->mm, start);
if (!vma || vma->vm_start > start)
return -ENOMEM;
-@@ -591,8 +595,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
+@@ -594,8 +598,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
prev = vma;
for (nstart = start ; ; ) {
@@ -153498,7 +153498,7 @@ index 14645be..e2c7aa1 100644
newflags |= flags;
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
-@@ -641,6 +651,10 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
+@@ -644,6 +654,10 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
locked += current->mm->locked_vm;
/* check against resource limits */
@@ -153509,7 +153509,7 @@ index 14645be..e2c7aa1 100644
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = apply_vma_lock_flags(start, len, flags);
-@@ -722,6 +736,11 @@ static int apply_mlockall_flags(int flags)
+@@ -725,6 +739,11 @@ static int apply_mlockall_flags(int flags)
for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
vm_flags_t newflags;
@@ -153521,7 +153521,7 @@ index 14645be..e2c7aa1 100644
newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
newflags |= to_add;
-@@ -754,6 +773,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+@@ -757,6 +776,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
return -EINTR;
ret = -ENOMEM;
@@ -162014,10 +162014,23 @@ index 37874e2..d5cd498 100644
+ atomic_read_unchecked(&sp->sk_drops));
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
-index 7370ad2..7002ca8 100644
+index 7370ad2..7bbfa23 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
-@@ -1073,7 +1073,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
+@@ -447,8 +447,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+
+ if (__ipv6_addr_needs_scope_id(addr_type))
+ iif = skb->dev->ifindex;
+- else
+- iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
++ else {
++ dst = skb_dst(skb);
++ iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
++ }
+
+ /*
+ * Must not send error if the source does not uniquely
+@@ -1073,7 +1075,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
{
@@ -170235,10 +170248,10 @@ index 0000000..7514850
+fi
diff --git a/scripts/gcc-plugins/initify_plugin.c b/scripts/gcc-plugins/initify_plugin.c
new file mode 100644
-index 0000000..bdd5d64
+index 0000000..46bd9b9
--- /dev/null
+++ b/scripts/gcc-plugins/initify_plugin.c
-@@ -0,0 +1,1811 @@
+@@ -0,0 +1,1809 @@
+/*
+ * Copyright 2015-2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2
@@ -170288,7 +170301,7 @@ index 0000000..bdd5d64
+__visible int plugin_is_GPL_compatible;
+
+static struct plugin_info initify_plugin_info = {
-+ .version = "20161115",
++ .version = "20161208",
+ .help = "disable\tturn off the initify plugin\n"
+ "verbose\tprint all initified strings and all"
+ " functions which should be __init/__exit\n"
@@ -171715,10 +171728,8 @@ index 0000000..bdd5d64
+ arg = chain_index(i, arg_list);
+ gcc_assert(arg != NULL_TREE);
+
-+ if (has_capture_use_local_var(arg)) {
-+ error("%qE captures its %u (%qD) parameter, please remove it from the nocapture attribute.", current_function_decl, i + 1, arg);
-+ gcc_unreachable();
-+ }
++ if (has_capture_use_local_var(arg))
++ warning(0, "%qE captures its %u (%qD) parameter, please remove it from the nocapture attribute.", current_function_decl, i + 1, arg);
+ }
+}
+
@@ -171866,10 +171877,10 @@ index 0000000..bdd5d64
+ if (!section_name)
+ return true;
+
-+ if (!strcmp(section_name, ".ref.text\000"))
-+ return true;
++ if (!strcmp(section_name, ".ref.text"))
++ return false;
+
-+ if (!strcmp(section_name, ".meminit.text\000"))
++ if (!strcmp(section_name, ".meminit.text"))
+ return false;
+
+ inform(DECL_SOURCE_LOCATION(fndecl), "Section of %qE: %s\n", fndecl, section_name);
@@ -221173,7 +221184,7 @@ index 0a578fe..b81f62d 100644
})
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 1950782..098c717 100644
+index 690d15e..2d4e146 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -93,12 +93,17 @@ LIST_HEAD(vm_list);
diff --git a/4.8.12/4425_grsec_remove_EI_PAX.patch b/4.8.13/4425_grsec_remove_EI_PAX.patch
index 594598a..594598a 100644
--- a/4.8.12/4425_grsec_remove_EI_PAX.patch
+++ b/4.8.13/4425_grsec_remove_EI_PAX.patch
diff --git a/4.8.12/4427_force_XATTR_PAX_tmpfs.patch b/4.8.13/4427_force_XATTR_PAX_tmpfs.patch
index 2562d2f..2562d2f 100644
--- a/4.8.12/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.8.13/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.8.12/4430_grsec-remove-localversion-grsec.patch b/4.8.13/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.8.12/4430_grsec-remove-localversion-grsec.patch
+++ b/4.8.13/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.8.12/4435_grsec-mute-warnings.patch b/4.8.13/4435_grsec-mute-warnings.patch
index 8929222..8929222 100644
--- a/4.8.12/4435_grsec-mute-warnings.patch
+++ b/4.8.13/4435_grsec-mute-warnings.patch
diff --git a/4.8.12/4440_grsec-remove-protected-paths.patch b/4.8.13/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.8.12/4440_grsec-remove-protected-paths.patch
+++ b/4.8.13/4440_grsec-remove-protected-paths.patch
diff --git a/4.8.12/4450_grsec-kconfig-default-gids.patch b/4.8.13/4450_grsec-kconfig-default-gids.patch
index 6fd0511..6fd0511 100644
--- a/4.8.12/4450_grsec-kconfig-default-gids.patch
+++ b/4.8.13/4450_grsec-kconfig-default-gids.patch
diff --git a/4.8.12/4465_selinux-avc_audit-log-curr_ip.patch b/4.8.13/4465_selinux-avc_audit-log-curr_ip.patch
index 7248385..7248385 100644
--- a/4.8.12/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.8.13/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.8.12/4470_disable-compat_vdso.patch b/4.8.13/4470_disable-compat_vdso.patch
index 1e4b84a..1e4b84a 100644
--- a/4.8.12/4470_disable-compat_vdso.patch
+++ b/4.8.13/4470_disable-compat_vdso.patch
diff --git a/4.8.12/4475_emutramp_default_on.patch b/4.8.13/4475_emutramp_default_on.patch
index 7b468ee..7b468ee 100644
--- a/4.8.12/4475_emutramp_default_on.patch
+++ b/4.8.13/4475_emutramp_default_on.patch