summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.8.12/1010_linux-4.8.11.patch')
-rw-r--r--4.8.12/1010_linux-4.8.11.patch2351
1 files changed, 2351 insertions, 0 deletions
diff --git a/4.8.12/1010_linux-4.8.11.patch b/4.8.12/1010_linux-4.8.11.patch
new file mode 100644
index 0000000..5c67d71
--- /dev/null
+++ b/4.8.12/1010_linux-4.8.11.patch
@@ -0,0 +1,2351 @@
+diff --git a/Makefile b/Makefile
+index 7cf2b49..2b1bcba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -fno-strict-aliasing -fno-common \
+ -Werror-implicit-function-declaration \
+ -Wno-format-security \
+- -std=gnu89
++ -std=gnu89 $(call cc-option,-fno-PIE)
++
+
+ KBUILD_AFLAGS_KERNEL :=
+ KBUILD_CFLAGS_KERNEL :=
+-KBUILD_AFLAGS := -D__ASSEMBLY__
++KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS_MODULE := -DMODULE
+ KBUILD_CFLAGS_MODULE := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+@@ -621,6 +622,7 @@ include arch/$(SRCARCH)/Makefile
+
+ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
+ KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS += -Os
+diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
+index dec4b07..3799396 100644
+--- a/arch/arm/boot/dts/imx53-qsb.dts
++++ b/arch/arm/boot/dts/imx53-qsb.dts
+@@ -64,8 +64,8 @@
+ };
+
+ ldo3_reg: ldo3 {
+- regulator-min-microvolt = <600000>;
+- regulator-max-microvolt = <1800000>;
++ regulator-min-microvolt = <1725000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+@@ -76,8 +76,8 @@
+ };
+
+ ldo5_reg: ldo5 {
+- regulator-min-microvolt = <1725000>;
+- regulator-max-microvolt = <3300000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+@@ -100,14 +100,14 @@
+ };
+
+ ldo9_reg: ldo9 {
+- regulator-min-microvolt = <1200000>;
++ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+
+ ldo10_reg: ldo10 {
+- regulator-min-microvolt = <1250000>;
+- regulator-max-microvolt = <3650000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <3600000>;
+ regulator-always-on;
+ };
+ };
+diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
+index 2065f46..38b6a2b 100644
+--- a/arch/arm64/include/asm/perf_event.h
++++ b/arch/arm64/include/asm/perf_event.h
+@@ -46,7 +46,15 @@
+ #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
+ #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+
+-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
++/*
++ * PMUv3 event types: required events
++ */
++#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
++#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
++#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
++#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+
+ /*
+ * Event filters for PMUv3
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 838ccf1..2c4df15 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -30,17 +30,9 @@
+
+ /*
+ * ARMv8 PMUv3 Performance Events handling code.
+- * Common event types.
++ * Common event types (some are defined in asm/perf_event.h).
+ */
+
+-/* Required events. */
+-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+-
+ /* At least one of the following is required. */
+ #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+ #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index e51367d..31c144f 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -602,8 +602,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+
+ idx = ARMV8_PMU_CYCLE_IDX;
+ } else {
+- BUG();
++ return false;
+ }
++ } else if (r->CRn == 0 && r->CRm == 9) {
++ /* PMCCNTR */
++ if (pmu_access_event_counter_el0_disabled(vcpu))
++ return false;
++
++ idx = ARMV8_PMU_CYCLE_IDX;
+ } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
+ /* PMEVCNTRn_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+@@ -611,7 +617,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+ } else {
+- BUG();
++ return false;
+ }
+
+ if (!pmu_counter_idx_valid(vcpu, idx))
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 7ac8e6e..8d586cf 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_configure_cores();
+
+- /* Enable AIL if supported, and we are in hypervisor mode */
+- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+- unsigned long lpcr = mfspr(SPRN_LPCR);
+- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+- }
++ /* AIL on native is done in cpu_ready_for_interrupts() */
+ }
+ }
+
+ static void cpu_ready_for_interrupts(void)
+ {
++ /*
++ * Enable AIL if supported, and we are in hypervisor mode. This
++ * is called once for every processor.
++ *
++ * If we are not in hypervisor mode the job is done once for
++ * the whole partition in configure_exceptions().
++ */
++ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
++ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
++ unsigned long lpcr = mfspr(SPRN_LPCR);
++ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
++ }
++
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+ }
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index b81fe2d..1e81a37 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ unsigned bits;
+ int cpu = smp_processor_id();
+- unsigned int socket_id, core_complex_id;
+
+ bits = c->x86_coreid_bits;
+ /* Low order bits define the core id (index of core in socket) */
+@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
+ if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
+ return;
+
+- socket_id = (c->apicid >> bits) - 1;
+- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
+-
+- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
++ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+ #endif
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 46f74d4..2fff657 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ struct kvm_shared_msrs *locals
+ = container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;
++ unsigned long flags;
+
++ /*
++ * Disabling irqs at this point since the following code could be
++ * interrupted and executed through kvm_arch_hardware_disable()
++ */
++ local_irq_save(flags);
++ if (locals->registered) {
++ locals->registered = false;
++ user_return_notifier_unregister(urn);
++ }
++ local_irq_restore(flags);
+ for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
+ values->curr = values->host;
+ }
+ }
+- locals->registered = false;
+- user_return_notifier_unregister(urn);
+ }
+
+ static void shared_msr_update(unsigned slot, u32 msr)
+@@ -3372,6 +3381,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ };
+ case KVM_SET_VAPIC_ADDR: {
+ struct kvm_vapic_addr va;
++ int idx;
+
+ r = -EINVAL;
+ if (!lapic_in_kernel(vcpu))
+@@ -3379,7 +3389,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index ac58c16..555b9fa 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
+
+ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
+ KBUILD_CFLAGS += -m$(BITS)
++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index e44944f..2932a5b 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
++ dpm_wait_for_children(dev, async);
++
+ if (async_error)
+ goto Complete;
+
+@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+- dpm_wait_for_children(dev, async);
+-
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
+
+ __pm_runtime_disable(dev, false);
+
++ dpm_wait_for_children(dev, async);
++
+ if (async_error)
+ goto Complete;
+
+@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+- dpm_wait_for_children(dev, async);
+-
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
+index 19f9b62..7a6acc3 100644
+--- a/drivers/clk/imx/clk-pllv3.c
++++ b/drivers/clk/imx/clk-pllv3.c
+@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+- return (parent_rate * div) + (u32)temp64;
++ return parent_rate * div + (unsigned long)temp64;
+ }
+
+ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+- return parent_rate * div + parent_rate * mfn / mfd;
++ temp64 = (u64)parent_rate;
++ temp64 *= mfn;
++ do_div(temp64, mfd);
++
++ return parent_rate * div + (unsigned long)temp64;
+ }
+
+ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
+index 3a51fff..9adaf48 100644
+--- a/drivers/clk/mmp/clk-of-mmp2.c
++++ b/drivers/clk/mmp/clk-of-mmp2.c
+@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
+index 87f2317..f110c02 100644
+--- a/drivers/clk/mmp/clk-of-pxa168.c
++++ b/drivers/clk/mmp/clk-of-pxa168.c
+@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
+index e22a67f..64d1ef4 100644
+--- a/drivers/clk/mmp/clk-of-pxa910.c
++++ b/drivers/clk/mmp/clk-of-pxa910.c
+@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apmu_base = of_iomap(np, 1);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apmu_base) {
+ pr_err("failed to map apmu registers\n");
+ return;
+ }
+@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
+ }
+
+ pxa_unit->apbcp_base = of_iomap(np, 3);
+- if (!pxa_unit->mpmu_base) {
++ if (!pxa_unit->apbcp_base) {
+ pr_err("failed to map apbcp registers\n");
+ return;
+ }
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index b304421..2cde379 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -4542,6 +4542,15 @@ static int __init caam_algapi_init(void)
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /*
++ * Check support for AES modes not available
++ * on LP devices.
++ */
++ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
++ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_XTS)
++ continue;
++
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 02f2a56..47d08b9 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ break;
+ }
+
+- memcpy(reg_val, chip->reg_output, NBANK(chip));
+ mutex_lock(&chip->i2c_lock);
++ memcpy(reg_val, chip->reg_output, NBANK(chip));
+ for(bank=0; bank<NBANK(chip); bank++) {
+ unsigned bankmask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
+ if(bankmask) {
+ unsigned bankval = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
++ bankval &= bankmask;
+ reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+ }
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index b2dee10..15704aa 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2667,8 +2667,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+- /* Flush direction if something changed behind our back */
+- if (chip->get_direction) {
++ /*
++ * If it's fast: flush the direction setting if something changed
++ * behind our back
++ */
++ if (!chip->can_sleep && chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 700c56b..e443073 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -492,6 +492,7 @@ struct amdgpu_bo {
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
++ unsigned prime_shared_count;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 651115d..c02db01f6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
+ entry->priority = min(info[i].bo_priority,
+ AMDGPU_BO_LIST_MAX_PRIORITY);
+ entry->tv.bo = &entry->robj->tbo;
+- entry->tv.shared = true;
++ entry->tv.shared = !entry->robj->prime_shared_count;
+
+ if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ gds_obj = entry->robj;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+index 7700dc2..3826d5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ if (ret)
+ return ERR_PTR(ret);
+
++ bo->prime_shared_count = 1;
+ return &bo->gem_base;
+ }
+
+ int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
+ {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+- int ret = 0;
++ long ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
++ /*
++ * Wait for all shared fences to complete before we switch to future
++ * use of exclusive fence on this prime shared bo.
++ */
++ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
++ MAX_SCHEDULE_TIMEOUT);
++ if (unlikely(ret < 0)) {
++ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
++ amdgpu_bo_unreserve(bo);
++ return ret;
++ }
++
+ /* pin buffer into GTT */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
++ if (likely(ret == 0))
++ bo->prime_shared_count++;
++
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
+ return;
+
+ amdgpu_bo_unpin(bo);
++ if (bo->prime_shared_count)
++ bo->prime_shared_count--;
+ amdgpu_bo_unreserve(bo);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 1f8af87..cf25607 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ if (!child)
+ return;
+
+- aux_channel = child->raw[25];
++ aux_channel = child->common.aux_channel;
+ ddc_pin = child->common.ddc_pin;
+
+ is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+ return false;
+ }
+
+-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
++static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
++ enum port port)
+ {
+ static const struct {
+ u16 dp, hdmi;
+@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+- int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+- if (!dev_priv->vbt.child_dev_num)
++ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
++ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return false;
+
++ if (p_child->common.dvo_port == port_mapping[port].dp)
++ return true;
++
++ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
++ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
++ p_child->common.aux_channel != 0)
++ return true;
++
++ return false;
++}
++
++bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
++ enum port port)
++{
++ int i;
++
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+- if ((p_child->common.dvo_port == port_mapping[port].dp ||
+- p_child->common.dvo_port == port_mapping[port].hdmi) &&
+- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
++ if (child_dev_is_dp_dual_mode(p_child, port))
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 3051182..b8aeb28 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4323,21 +4323,11 @@ static enum drm_connector_status
+ intel_dp_detect(struct drm_connector *connector, bool force)
+ {
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+- struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ enum drm_connector_status status = connector->status;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+- if (intel_dp->is_mst) {
+- /* MST devices are disconnected from a monitor POV */
+- intel_dp_unset_edid(intel_dp);
+- if (intel_encoder->type != INTEL_OUTPUT_EDP)
+- intel_encoder->type = INTEL_OUTPUT_DP;
+- return connector_status_disconnected;
+- }
+-
+ /* If full detect is not performed yet, do a full detect */
+ if (!intel_dp->detect_done)
+ status = intel_dp_long_pulse(intel_dp->attached_connector);
+diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
+index 68db962..8886cab1 100644
+--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
++++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
+@@ -280,7 +280,8 @@ struct common_child_dev_config {
+ u8 dp_support:1;
+ u8 tmds_support:1;
+ u8 support_reserved:5;
+- u8 not_common3[12];
++ u8 aux_channel;
++ u8 not_common3[11];
+ u8 iboost_level;
+ } __packed;
+
+diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
+index d223650..11edabf 100644
+--- a/drivers/i2c/Kconfig
++++ b/drivers/i2c/Kconfig
+@@ -59,7 +59,6 @@ config I2C_CHARDEV
+
+ config I2C_MUX
+ tristate "I2C bus multiplexing support"
+- depends on HAS_IOMEM
+ help
+ Say Y here if you want the I2C core to support the ability to
+ handle multiplexed I2C bus topologies, by presenting each
+diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
+index e280c8e..96de9ce 100644
+--- a/drivers/i2c/muxes/Kconfig
++++ b/drivers/i2c/muxes/Kconfig
+@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
+
+ config I2C_MUX_REG
+ tristate "Register-based I2C multiplexer"
++ depends on HAS_IOMEM
+ help
+ If you say yes to this option, support will be included for a
+ register based I2C multiplexer. This driver provides access to
+diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
+index 3278ebf..7e6f300 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
+@@ -247,9 +247,9 @@ static int pca954x_probe(struct i2c_client *client,
+ /* discard unconfigured channels */
+ break;
+ idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
+- data->deselect |= (idle_disconnect_pd
+- || idle_disconnect_dt) << num;
+ }
++ data->deselect |= (idle_disconnect_pd ||
++ idle_disconnect_dt) << num;
+
+ ret = i2c_mux_add_adapter(muxc, force, num, class);
+
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index c995255..71c7c4c 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -80,6 +80,8 @@ static struct ib_cm {
+ __be32 random_id_operand;
+ struct list_head timewait_list;
+ struct workqueue_struct *wq;
++ /* Sync on cm change port state */
++ spinlock_t state_lock;
+ } cm;
+
+ /* Counter indexes ordered by attribute ID */
+@@ -161,6 +163,8 @@ struct cm_port {
+ struct ib_mad_agent *mad_agent;
+ struct kobject port_obj;
+ u8 port_num;
++ struct list_head cm_priv_prim_list;
++ struct list_head cm_priv_altr_list;
+ struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
+ };
+
+@@ -241,6 +245,12 @@ struct cm_id_private {
+ u8 service_timeout;
+ u8 target_ack_delay;
+
++ struct list_head prim_list;
++ struct list_head altr_list;
++ /* Indicates that the send port mad is registered and av is set */
++ int prim_send_port_not_ready;
++ int altr_send_port_not_ready;
++
+ struct list_head work_list;
+ atomic_t work_count;
+ };
+@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ struct ib_mad_agent *mad_agent;
+ struct ib_mad_send_buf *m;
+ struct ib_ah *ah;
++ struct cm_av *av;
++ unsigned long flags, flags2;
++ int ret = 0;
+
++ /* don't let the port to be released till the agent is down */
++ spin_lock_irqsave(&cm.state_lock, flags2);
++ spin_lock_irqsave(&cm.lock, flags);
++ if (!cm_id_priv->prim_send_port_not_ready)
++ av = &cm_id_priv->av;
++ else if (!cm_id_priv->altr_send_port_not_ready &&
++ (cm_id_priv->alt_av.port))
++ av = &cm_id_priv->alt_av;
++ else {
++ pr_info("%s: not valid CM id\n", __func__);
++ ret = -ENODEV;
++ spin_unlock_irqrestore(&cm.lock, flags);
++ goto out;
++ }
++ spin_unlock_irqrestore(&cm.lock, flags);
++ /* Make sure the port haven't released the mad yet */
+ mad_agent = cm_id_priv->av.port->mad_agent;
+- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
+- if (IS_ERR(ah))
+- return PTR_ERR(ah);
++ if (!mad_agent) {
++ pr_info("%s: not a valid MAD agent\n", __func__);
++ ret = -ENODEV;
++ goto out;
++ }
++ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
++ if (IS_ERR(ah)) {
++ ret = PTR_ERR(ah);
++ goto out;
++ }
+
+ m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
+- cm_id_priv->av.pkey_index,
++ av->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
+ if (IS_ERR(m)) {
+ ib_destroy_ah(ah);
+- return PTR_ERR(m);
++ ret = PTR_ERR(m);
++ goto out;
+ }
+
+ /* Timeout set by caller if response is expected. */
+@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
+ atomic_inc(&cm_id_priv->refcount);
+ m->context[0] = cm_id_priv;
+ *msg = m;
+- return 0;
++
++out:
++ spin_unlock_irqrestore(&cm.state_lock, flags2);
++ return ret;
+ }
+
+ static int cm_alloc_response_msg(struct cm_port *port,
+@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ grh, &av->ah_attr);
+ }
+
+-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
++static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
++ struct cm_id_private *cm_id_priv)
+ {
+ struct cm_device *cm_dev;
+ struct cm_port *port = NULL;
+@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+ &av->ah_attr);
+ av->timeout = path->packet_life_time + 1;
+
+- return 0;
++ spin_lock_irqsave(&cm.lock, flags);
++ if (&cm_id_priv->av == av)
++ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
++ else if (&cm_id_priv->alt_av == av)
++ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
++ else
++ ret = -EINVAL;
++
++ spin_unlock_irqrestore(&cm.lock, flags);
++
++ return ret;
+ }
+
+ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
+@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
+ spin_lock_init(&cm_id_priv->lock);
+ init_completion(&cm_id_priv->comp);
+ INIT_LIST_HEAD(&cm_id_priv->work_list);
++ INIT_LIST_HEAD(&cm_id_priv->prim_list);
++ INIT_LIST_HEAD(&cm_id_priv->altr_list);
+ atomic_set(&cm_id_priv->work_count, -1);
+ atomic_set(&cm_id_priv->refcount, 1);
+ return &cm_id_priv->id;
+@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
+ break;
+ }
+
++ spin_lock_irq(&cm.lock);
++ if (!list_empty(&cm_id_priv->altr_list) &&
++ (!cm_id_priv->altr_send_port_not_ready))
++ list_del(&cm_id_priv->altr_list);
++ if (!list_empty(&cm_id_priv->prim_list) &&
++ (!cm_id_priv->prim_send_port_not_ready))
++ list_del(&cm_id_priv->prim_list);
++ spin_unlock_irq(&cm.lock);
++
+ cm_free_id(cm_id->local_id);
+ cm_deref_id(cm_id_priv);
+ wait_for_completion(&cm_id_priv->comp);
+@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ goto out;
+ }
+
+- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
++ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
++ cm_id_priv);
+ if (ret)
+ goto error1;
+ if (param->alternate_path) {
+ ret = cm_init_av_by_path(param->alternate_path,
+- &cm_id_priv->alt_av);
++ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+ goto error1;
+ }
+@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
+ dev_put(gid_attr.ndev);
+ }
+ work->path[0].gid_type = gid_attr.gid_type;
+- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
++ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
++ cm_id_priv);
+ }
+ if (ret) {
+ int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
+ goto rejected;
+ }
+ if (req_msg->alt_local_lid) {
+- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
++ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
++ cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ &work->path[0].sgid,
+@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
+ goto out;
+ }
+
+- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
++ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
++ cm_id_priv);
+ if (ret)
+ goto out;
+ cm_id_priv->alt_av.timeout =
+@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
+ cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
++ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
++ cm_id_priv);
+ ret = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!ret)
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
+ return -EINVAL;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
++ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
+ if (ret)
+ goto out;
+
+@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
+ static int cm_migrate(struct ib_cm_id *cm_id)
+ {
+ struct cm_id_private *cm_id_priv;
++ struct cm_av tmp_av;
+ unsigned long flags;
++ int tmp_send_port_not_ready;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
+ (cm_id->lap_state == IB_CM_LAP_UNINIT ||
+ cm_id->lap_state == IB_CM_LAP_IDLE)) {
+ cm_id->lap_state = IB_CM_LAP_IDLE;
++ /* Swap address vector */
++ tmp_av = cm_id_priv->av;
+ cm_id_priv->av = cm_id_priv->alt_av;
++ cm_id_priv->alt_av = tmp_av;
++ /* Swap port send ready state */
++ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
++ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
++ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
+ port->cm_dev = cm_dev;
+ port->port_num = i;
+
++ INIT_LIST_HEAD(&port->cm_priv_prim_list);
++ INIT_LIST_HEAD(&port->cm_priv_altr_list);
++
+ ret = cm_create_port_fs(port);
+ if (ret)
+ goto error1;
+@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ {
+ struct cm_device *cm_dev = client_data;
+ struct cm_port *port;
++ struct cm_id_private *cm_id_priv;
++ struct ib_mad_agent *cur_mad_agent;
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_CM_SUP
+ };
+@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+
+ port = cm_dev->port[i-1];
+ ib_modify_port(ib_device, port->port_num, 0, &port_modify);
++ /* Mark all the cm_id's as not valid */
++ spin_lock_irq(&cm.lock);
++ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
++ cm_id_priv->altr_send_port_not_ready = 1;
++ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
++ cm_id_priv->prim_send_port_not_ready = 1;
++ spin_unlock_irq(&cm.lock);
+ /*
+ * We flush the queue here after the going_down set, this
+ * verify that no new works will be queued in the recv handler,
+ * after that we can call the unregister_mad_agent
+ */
+ flush_workqueue(cm.wq);
+- ib_unregister_mad_agent(port->mad_agent);
++ spin_lock_irq(&cm.state_lock);
++ cur_mad_agent = port->mad_agent;
++ port->mad_agent = NULL;
++ spin_unlock_irq(&cm.state_lock);
++ ib_unregister_mad_agent(cur_mad_agent);
+ cm_remove_port_fs(port);
+ }
++
+ device_unregister(cm_dev->device);
+ kfree(cm_dev);
+ }
+@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
+ INIT_LIST_HEAD(&cm.device_list);
+ rwlock_init(&cm.device_lock);
+ spin_lock_init(&cm.lock);
++ spin_lock_init(&cm.state_lock);
+ cm.listen_service_table = RB_ROOT;
+ cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
+ cm.remote_id_table = RB_ROOT;
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index c68746c..bdab61d 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -174,7 +174,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+
+ cur_base = addr & PAGE_MASK;
+
+- if (npages == 0) {
++ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 0012fa5..44b1104 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+
+ idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
+- if (qp != qp->real_qp) {
+- ib_close_qp(qp);
+- } else {
++ if (qp == qp->real_qp)
+ ib_uverbs_detach_umcast(qp, uqp);
+- ib_destroy_qp(qp);
+- }
++ ib_destroy_qp(qp);
+ ib_uverbs_release_uevent(file, &uqp->uevent);
+ kfree(uqp);
+ }
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index bcf76c3..e362998 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -87,7 +87,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ qp->s_flags |= RVT_S_WAIT_RNR;
+- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
++ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
+ add_timer(&priv->s_rnr_timer);
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index 1694037..8f59a4f 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -1152,7 +1152,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
+ rb_node = hfi1_mmu_rb_extract(pq->handler,
+ (unsigned long)iovec->iov.iov_base,
+ iovec->iov.iov_len);
+- if (rb_node && !IS_ERR(rb_node))
++ if (rb_node)
+ node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index 5fc6233..b9bf075 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & 7) << 13;
+ ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
++ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
++ if (ret < 0)
++ return ERR_PTR(ret);
++ ah->av.eth.gid_index = ret;
+ ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+ ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
+ if (ah_attr->static_rate) {
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 5df63da..efb6414 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
+ if (context)
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
+ err = -EFAULT;
+- goto err_dbmap;
++ goto err_cq_free;
+ }
+
+ return &cq->ibcq;
+
++err_cq_free:
++ mlx4_cq_free(dev->dev, &cq->mcq);
++
+ err_dbmap:
+ if (context)
+ mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index e4fac929..ebe43cb 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -917,8 +917,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
+ if (err)
+ goto err_create;
+ } else {
+- /* for now choose 64 bytes till we have a proper interface */
+- cqe_size = 64;
++ cqe_size = cache_line_size() == 128 ? 128 : 64;
+ err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
+ &index, &inlen);
+ if (err)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index bff8707a..19f8820 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2100,14 +2100,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ {
+ struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct ib_event ibev;
+-
++ bool fatal = false;
+ u8 port = 0;
+
+ switch (event) {
+ case MLX5_DEV_EVENT_SYS_ERROR:
+- ibdev->ib_active = false;
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
++ fatal = true;
+ break;
+
+ case MLX5_DEV_EVENT_PORT_UP:
+@@ -2154,6 +2154,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+
+ if (ibdev->ib_active)
+ ib_dispatch_event(&ibev);
++
++ if (fatal)
++ ibdev->ib_active = false;
+ }
+
+ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+@@ -2835,7 +2838,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+ }
+ err = init_node_data(dev);
+ if (err)
+- goto err_dealloc;
++ goto err_free_port;
+
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+@@ -2845,7 +2848,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ err = mlx5_enable_roce(dev);
+ if (err)
+- goto err_dealloc;
++ goto err_free_port;
+ }
+
+ err = create_dev_resources(&dev->devr);
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index affc3f6..19d590d 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2037,8 +2037,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
+
+ mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
+ qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+- to_mcq(init_attr->recv_cq)->mcq.cqn,
+- to_mcq(init_attr->send_cq)->mcq.cqn);
++ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
++ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
+
+ qp->trans_qp.xrcdn = xrcdn;
+
+@@ -4702,6 +4702,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
++ if (init_attr->log_ind_tbl_size >
++ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
++ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
++ init_attr->log_ind_tbl_size,
++ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
++ return ERR_PTR(-EINVAL);
++ }
++
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
+index 33076a5..04ebbb5 100644
+--- a/drivers/infiniband/sw/rdmavt/dma.c
++++ b/drivers/infiniband/sw/rdmavt/dma.c
+@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+- if (offset + size > PAGE_SIZE)
+- return BAD_DMA_ADDRESS;
+-
+ addr = (u64)page_address(page);
+ if (addr)
+ addr += offset;
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index eedf2f1..7f5d735 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ {
+ int err;
+ struct socket *sock;
+- struct udp_port_cfg udp_cfg;
+- struct udp_tunnel_sock_cfg tnl_cfg;
+-
+- memset(&udp_cfg, 0, sizeof(udp_cfg));
++ struct udp_port_cfg udp_cfg = {0};
++ struct udp_tunnel_sock_cfg tnl_cfg = {0};
+
+ if (ipv6) {
+ udp_cfg.family = AF_INET6;
+@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ return ERR_PTR(err);
+ }
+
+- tnl_cfg.sk_user_data = NULL;
+ tnl_cfg.encap_type = 1;
+ tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+- tnl_cfg.encap_destroy = NULL;
+
+ /* Setup UDP tunnel */
+ setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 22ba24f..f724a7e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
++ rxe_queue_reset(qp->sq.queue);
+ }
+
+ /* cleanup attributes */
+@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
+ {
+ qp->req.state = QP_STATE_ERROR;
+ qp->resp.state = QP_STATE_ERROR;
++ qp->attr.qp_state = IB_QPS_ERR;
+
+ /* drain work and packet queues */
+ rxe_run_task(&qp->resp.task, 1);
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
+index 0827425..d14bf49 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.c
++++ b/drivers/infiniband/sw/rxe/rxe_queue.c
+@@ -84,6 +84,15 @@ int do_mmap_info(struct rxe_dev *rxe,
+ return -EINVAL;
+ }
+
++inline void rxe_queue_reset(struct rxe_queue *q)
++{
++ /* queue is comprised from header and the memory
++ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
++ * reset only the queue itself and not the management header
++ */
++ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
++}
++
+ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size)
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
+index 239fd60..8c8641c 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.h
++++ b/drivers/infiniband/sw/rxe/rxe_queue.h
+@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p);
+
++void rxe_queue_reset(struct rxe_queue *q);
++
+ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 13a848a..43bb166 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -695,7 +695,8 @@ int rxe_requester(void *arg)
+ qp->req.wqe_index);
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+- goto complete;
++ __rxe_do_task(&qp->comp.task);
++ return 0;
+ }
+ payload = mtu;
+ }
+@@ -744,13 +745,17 @@ int rxe_requester(void *arg)
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ wqe->state = wqe_state_error;
+
+-complete:
+- if (qp_type(qp) != IB_QPT_RC) {
+- while (rxe_completer(qp) == 0)
+- ;
+- }
+-
+- return 0;
++ /*
++ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
++ * ---------8<---------8<-------------
++ * ...Note that if a completion error occurs, a Work Completion
++ * will always be generated, even if the signaling
++ * indicator requests an Unsignaled Completion.
++ * ---------8<---------8<-------------
++ */
++ wqe->wr.send_flags |= IB_SEND_SIGNALED;
++ __rxe_do_task(&qp->comp.task);
++ return -EAGAIN;
+
+ exit:
+ return -EAGAIN;
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 41b1138..70c646b 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
+ for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
+ lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
+
+- /* Put the device into reset state */
+- writel(0, lpss->priv + LPSS_PRIV_RESETS);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 3ac486a..c57e407 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
+ clones[i]);
+ }
+
++ put_device(dev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(mfd_clone_cell);
+diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
+index 94c7cc0..00dd7ff 100644
+--- a/drivers/mfd/stmpe.c
++++ b/drivers/mfd/stmpe.c
+@@ -761,6 +761,8 @@ static int stmpe1801_reset(struct stmpe *stmpe)
+ if (ret < 0)
+ return ret;
+
++ msleep(10);
++
+ timeout = jiffies + msecs_to_jiffies(100);
+ while (time_before(jiffies, timeout)) {
+ ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 1b5f531..bf3fd34 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2010,23 +2010,33 @@ static struct virtio_device_id id_table[] = {
+ { 0 },
+ };
+
++#define VIRTNET_FEATURES \
++ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
++ VIRTIO_NET_F_MAC, \
++ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
++ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
++ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
++ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
++ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
++ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
++ VIRTIO_NET_F_CTRL_MAC_ADDR, \
++ VIRTIO_NET_F_MTU
++
+ static unsigned int features[] = {
+- VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+- VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+- VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
+- VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+- VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+- VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+- VIRTIO_NET_F_CTRL_MAC_ADDR,
++ VIRTNET_FEATURES,
++};
++
++static unsigned int features_legacy[] = {
++ VIRTNET_FEATURES,
++ VIRTIO_NET_F_GSO,
+ VIRTIO_F_ANY_LAYOUT,
+- VIRTIO_NET_F_MTU,
+ };
+
+ static struct virtio_driver virtio_net_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
++ .feature_table_legacy = features_legacy,
++ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 4fdc3da..ea67ae9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
++ } else {
++ /* In theory, we wouldn't have to stop a running sched
++ * scan in order to start another one (for
++ * net-detect). But in practice this doesn't seem to
++ * work properly, so stop any running sched_scan now.
++ */
++ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
++ if (ret)
++ return ret;
+ }
+
+ /* rfkill release can be either for wowlan or netdetect */
+@@ -2088,6 +2097,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+ iwl_mvm_update_changed_regdom(mvm);
+
+ if (mvm->net_detect) {
++ /* If this is a non-unified image, we restart the FW,
++ * so no need to stop the netdetect scan. If that
++ * fails, continue and try to get the wake-up reasons,
++ * but trigger a HW restart by keeping a failure code
++ * in ret.
++ */
++ if (unified_image)
++ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
++ false);
++
+ iwl_mvm_query_netdetect_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+@@ -2271,7 +2290,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+ {
+ struct iwl_mvm *mvm = inode->i_private;
+- int remaining_time = 10;
++ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ mvm->d3_test_active = false;
+
+@@ -2282,17 +2302,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+- ieee80211_restart_hw(mvm->hw);
++ if (!unified_image) {
++ int remaining_time = 10;
+
+- /* wait for restart and disconnect all interfaces */
+- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+- remaining_time > 0) {
+- remaining_time--;
+- msleep(1000);
+- }
++ ieee80211_restart_hw(mvm->hw);
+
+- if (remaining_time == 0)
+- IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
++ /* wait for restart and disconnect all interfaces */
++ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
++ remaining_time > 0) {
++ remaining_time--;
++ msleep(1000);
++ }
++
++ if (remaining_time == 0)
++ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
++ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5dd77e3..90a1f4a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -4097,7 +4097,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
+ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ int ret;
+
+@@ -4119,7 +4118,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ }
+
+ if (notif->sync)
+- ret = wait_event_timeout(notif_waitq,
++ ret = wait_event_timeout(mvm->rx_sync_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0,
+ HZ);
+ WARN_ON_ONCE(!ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 6a615bb..e9cb970 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -932,6 +932,7 @@ struct iwl_mvm {
+ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+ spinlock_t d0i3_tx_lock;
+ wait_queue_head_t d0i3_exit_waitq;
++ wait_queue_head_t rx_sync_waitq;
+
+ /* BT-Coex */
+ struct iwl_bt_coex_profile_notif last_bt_notif;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 55d9096..30bbdec 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -618,6 +618,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ spin_lock_init(&mvm->refs_lock);
+ skb_queue_head_init(&mvm->d0i3_tx);
+ init_waitqueue_head(&mvm->d0i3_exit_waitq);
++ init_waitqueue_head(&mvm->rx_sync_waitq);
+
+ atomic_set(&mvm->queue_sync_counter, 0);
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index afb7eb6..2b994be 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -545,7 +545,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ "Received expired RX queue sync message\n");
+ return;
+ }
+- atomic_dec(&mvm->queue_sync_counter);
++ if (!atomic_dec_return(&mvm->queue_sync_counter))
++ wake_up(&mvm->rx_sync_waitq);
+ }
+
+ switch (internal_notif->type) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index dac120f..3707ec6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -1185,6 +1185,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+
+ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+ {
++ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
++
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+@@ -1211,12 +1214,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+- /* No need to stop anything for net-detect since the
+- * firmware is restarted anyway. This way, any sched
+- * scans that were running will be restarted when we
+- * resume.
+- */
+- return 0;
++ /* For non-unified images, there's no need to stop
++ * anything for net-detect since the firmware is
++ * restarted anyway. This way, any sched scans that
++ * were running will be restarted when we resume.
++ */
++ if (!unified_image)
++ return 0;
++
++ /* If this is a unified image and we ran out of scans,
++ * we need to stop something. Prefer stopping regular
++ * scans, because the results are useless at this
++ * point, and we should be able to keep running
++ * another scheduled scan while suspended.
++ */
++ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
++ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
++ true);
++ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
++ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
++ true);
++
++ /* fall through, something is wrong if no scan was
++ * running but we ran out of scans.
++ */
+ default:
+ WARN_ON(1);
+ break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 78cf9a7..13842ca 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -526,48 +526,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+ #ifdef CONFIG_ACPI
+-#define SPL_METHOD "SPLC"
+-#define SPL_DOMAINTYPE_MODULE BIT(0)
+-#define SPL_DOMAINTYPE_WIFI BIT(1)
+-#define SPL_DOMAINTYPE_WIGIG BIT(2)
+-#define SPL_DOMAINTYPE_RFEM BIT(3)
++#define ACPI_SPLC_METHOD "SPLC"
++#define ACPI_SPLC_DOMAIN_WIFI (0x07)
+
+-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
++static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
+ {
+- union acpi_object *limits, *domain_type, *power_limit;
+-
+- if (splx->type != ACPI_TYPE_PACKAGE ||
+- splx->package.count != 2 ||
+- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+- splx->package.elements[0].integer.value != 0) {
+- IWL_ERR(trans, "Unsupported splx structure\n");
++ union acpi_object *data_pkg, *dflt_pwr_limit;
++ int i;
++
++ /* We need at least two elements, one for the revision and one
++ * for the data itself. Also check that the revision is
++ * supported (currently only revision 0).
++ */
++ if (splc->type != ACPI_TYPE_PACKAGE ||
++ splc->package.count < 2 ||
++ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
++ splc->package.elements[0].integer.value != 0) {
++ IWL_DEBUG_INFO(trans,
++ "Unsupported structure returned by the SPLC method. Ignoring.\n");
+ return 0;
+ }
+
+- limits = &splx->package.elements[1];
+- if (limits->type != ACPI_TYPE_PACKAGE ||
+- limits->package.count < 2 ||
+- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+- IWL_ERR(trans, "Invalid limits element\n");
+- return 0;
++ /* loop through all the packages to find the one for WiFi */
++ for (i = 1; i < splc->package.count; i++) {
++ union acpi_object *domain;
++
++ data_pkg = &splc->package.elements[i];
++
++ /* Skip anything that is not a package with the right
++ * amount of elements (i.e. at least 2 integers).
++ */
++ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
++ data_pkg->package.count < 2 ||
++ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
++ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
++ continue;
++
++ domain = &data_pkg->package.elements[0];
++ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
++ break;
++
++ data_pkg = NULL;
+ }
+
+- domain_type = &limits->package.elements[0];
+- power_limit = &limits->package.elements[1];
+- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
++ if (!data_pkg) {
++ IWL_DEBUG_INFO(trans,
++ "No element for the WiFi domain returned by the SPLC method.\n");
+ return 0;
+ }
+
+- return power_limit->integer.value;
++ dflt_pwr_limit = &data_pkg->package.elements[1];
++ return dflt_pwr_limit->integer.value;
+ }
+
+ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+ {
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
++ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+@@ -578,23 +594,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+ }
+
+ /* Get the method's handle */
+- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
++ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
++ &handle);
+ if (ACPI_FAILURE(status)) {
+- IWL_DEBUG_INFO(trans, "SPL method not found\n");
++ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
++ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
+ return;
+ }
+
+- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
++ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
+ trans->dflt_pwr_limit);
+- kfree(splx.pointer);
++ kfree(splc.pointer);
+ }
+
+ #else /* CONFIG_ACPI */
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 18650dc..478bba5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -522,6 +522,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, u32 txq_id)
+ {
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ txq->need_update = false;
+@@ -536,6 +537,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ return ret;
+
+ spin_lock_init(&txq->lock);
++
++ if (txq_id == trans_pcie->cmd_queue) {
++ static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
++
++ lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
++ }
++
+ __skb_queue_head_init(&txq->overflow_q);
+
+ /*
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
+index ec2e9c5..22394fe 100644
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -109,6 +109,7 @@
+ /* OMAP_RTC_OSC_REG bit fields: */
+ #define OMAP_RTC_OSC_32KCLK_EN BIT(6)
+ #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
++#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
+
+ /* OMAP_RTC_IRQWAKEEN bit fields: */
+ #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
+@@ -646,8 +647,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
+ */
+ if (rtc->has_ext_clk) {
+ reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
+- rtc_write(rtc, OMAP_RTC_OSC_REG,
+- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
++ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
++ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
++ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
+ }
+
+ rtc->type->lock(rtc);
+diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
+index d059ad4..97ee1b4 100644
+--- a/drivers/uwb/lc-rc.c
++++ b/drivers/uwb/lc-rc.c
+@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
++
+ return rc;
+ }
+
+@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
+ if (dev) {
+ rc = dev_get_drvdata(dev);
+ __uwb_rc_get(rc);
++ put_device(dev);
+ }
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
+@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
+
+ dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
+ find_rc_grandpa);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
+@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
+- if (dev)
++ if (dev) {
+ rc = dev_get_drvdata(dev);
++ put_device(dev);
++ }
+
+ return rc;
+ }
+diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
+index c1304b8..678e937 100644
+--- a/drivers/uwb/pal.c
++++ b/drivers/uwb/pal.c
+@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
+
+ dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+
++ put_device(dev);
++
+ return (dev != NULL);
+ }
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index ea31931..7bd21aa 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -235,6 +235,7 @@ struct ext4_io_submit {
+ #define EXT4_MAX_BLOCK_SIZE 65536
+ #define EXT4_MIN_BLOCK_LOG_SIZE 10
+ #define EXT4_MAX_BLOCK_LOG_SIZE 16
++#define EXT4_MAX_CLUSTER_LOG_SIZE 30
+ #ifdef __KERNEL__
+ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
+ #else
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3ec8708..ec89f50 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3518,7 +3518,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+- "Unsupported filesystem blocksize %d", blocksize);
++ "Unsupported filesystem blocksize %d (%d log_block_size)",
++ blocksize, le32_to_cpu(es->s_log_block_size));
++ goto failed_mount;
++ }
++ if (le32_to_cpu(es->s_log_block_size) >
++ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++ ext4_msg(sb, KERN_ERR,
++ "Invalid log block size: %u",
++ le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+
+@@ -3650,6 +3658,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ "block size (%d)", clustersize, blocksize);
+ goto failed_mount;
+ }
++ if (le32_to_cpu(es->s_log_cluster_size) >
++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++ ext4_msg(sb, KERN_ERR,
++ "Invalid log cluster size: %u",
++ le32_to_cpu(es->s_log_cluster_size));
++ goto failed_mount;
++ }
+ sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ le32_to_cpu(es->s_log_block_size);
+ sbi->s_clusters_per_group =
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3988b43..a621dd9 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
+ {
+ struct inode *inode = page->mapping->host;
+
++ /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
++ if (!copied)
++ goto unlock;
++
+ if (!PageUptodate(page)) {
+ /* Zero any unwritten bytes at the end of the page */
+ size_t endoff = (pos + copied) & ~PAGE_MASK;
+@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
+
+ fuse_write_update_size(inode, pos + copied);
+ set_page_dirty(page);
++
++unlock:
+ unlock_page(page);
+ put_page(page);
+
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index ab02a45..e5d1934 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -25,6 +25,7 @@ struct svc_xprt_ops {
+ void (*xpo_detach)(struct svc_xprt *);
+ void (*xpo_free)(struct svc_xprt *);
+ int (*xpo_secure_port)(struct svc_rqst *);
++ void (*xpo_kill_temp_xprt)(struct svc_xprt *);
+ };
+
+ struct svc_xprt_class {
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 9530fcd..9d592c6 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+
+ } else if (new->flags & IRQF_TRIGGER_MASK) {
+ unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
+- unsigned int omsk = irq_settings_get_trigger_mask(desc);
++ unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
+
+ if (nmsk != omsk)
+ /* hope the handler works with current trigger mode */
+ pr_warn("irq %d uses trigger mode %u; requested %u\n",
+- irq, nmsk, omsk);
++ irq, omsk, nmsk);
+ }
+
+ *old_ptr = new;
+diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
+index 084452e..bdff5ed 100644
+--- a/kernel/power/suspend_test.c
++++ b/kernel/power/suspend_test.c
+@@ -203,8 +203,10 @@ static int __init test_suspend(void)
+
+ /* RTCs have initialized by now too ... can we use one? */
+ dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
+- if (dev)
++ if (dev) {
+ rtc = rtc_class_open(dev_name(dev));
++ put_device(dev);
++ }
+ if (!rtc) {
+ printk(warn_no_rtc);
+ return 0;
+diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
+index d0a1617..979e7bf 100644
+--- a/kernel/trace/Makefile
++++ b/kernel/trace/Makefile
+@@ -1,8 +1,4 @@
+
+-# We are fully aware of the dangers of __builtin_return_address()
+-FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
+-KBUILD_CFLAGS += $(FRAME_CFLAGS)
+-
+ # Do not instrument the tracer itself:
+
+ ifdef CONFIG_FUNCTION_TRACER
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 84752c8..b1d7f1b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1856,6 +1856,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+
+ /* Update rec->flags */
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ /* We need to update only differences of filter_hash */
+ in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+ in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+@@ -1878,6 +1882,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+
+ /* Roll back what we did above */
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (rec == end)
+ goto err_out;
+
+@@ -2391,6 +2399,10 @@ void __weak ftrace_replace_code(int enable)
+ return;
+
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ failed = __ftrace_replace_code(rec, enable);
+ if (failed) {
+ ftrace_bug(failed, rec);
+@@ -2757,7 +2769,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ struct dyn_ftrace *rec;
+
+ do_for_each_ftrace_rec(pg, rec) {
+- if (FTRACE_WARN_ON_ONCE(rec->flags))
++ if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
+ pr_warn(" %pS flags:%lx\n",
+ (void *)rec->ip, rec->flags);
+ } while_for_each_ftrace_rec();
+@@ -3592,6 +3604,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
+ goto out_unlock;
+
+ do_for_each_ftrace_rec(pg, rec) {
++
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
+ ret = enter_record(hash, rec, clear_filter);
+ if (ret < 0) {
+@@ -3787,6 +3803,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+
+ do_for_each_ftrace_rec(pg, rec) {
+
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (!ftrace_match_record(rec, &func_g, NULL, 0))
+ continue;
+
+@@ -4679,6 +4698,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
+
+ do_for_each_ftrace_rec(pg, rec) {
+
++ if (rec->flags & FTRACE_FL_DISABLED)
++ continue;
++
+ if (ftrace_match_record(rec, &func_g, NULL, 0)) {
+ /* if it is in the array */
+ exists = false;
+diff --git a/mm/Makefile b/mm/Makefile
+index 2ca1faf..295bd7a 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -21,9 +21,6 @@ KCOV_INSTRUMENT_memcontrol.o := n
+ KCOV_INSTRUMENT_mmzone.o := n
+ KCOV_INSTRUMENT_vmstat.o := n
+
+-# Since __builtin_frame_address does work as used, disable the warning.
+-CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+-
+ mmu-y := nommu.o
+ mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
+ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 8e999ff..8af9d25 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1549,24 +1549,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ struct sock *sk = sock->sk;
+ struct bcm_sock *bo = bcm_sk(sk);
++ int ret = 0;
+
+ if (len < sizeof(*addr))
+ return -EINVAL;
+
+- if (bo->bound)
+- return -EISCONN;
++ lock_sock(sk);
++
++ if (bo->bound) {
++ ret = -EISCONN;
++ goto fail;
++ }
+
+ /* bind a device to this socket */
+ if (addr->can_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(&init_net, addr->can_ifindex);
+- if (!dev)
+- return -ENODEV;
+-
++ if (!dev) {
++ ret = -ENODEV;
++ goto fail;
++ }
+ if (dev->type != ARPHRD_CAN) {
+ dev_put(dev);
+- return -ENODEV;
++ ret = -ENODEV;
++ goto fail;
+ }
+
+ bo->ifindex = dev->ifindex;
+@@ -1577,17 +1584,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
+ bo->ifindex = 0;
+ }
+
+- bo->bound = 1;
+-
+ if (proc_dir) {
+ /* unique socket address as filename */
+ sprintf(bo->procname, "%lu", sock_i_ino(sk));
+ bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
+ proc_dir,
+ &bcm_proc_fops, sk);
++ if (!bo->bcm_proc_read) {
++ ret = -ENOMEM;
++ goto fail;
++ }
+ }
+
+- return 0;
++ bo->bound = 1;
++
++fail:
++ release_sock(sk);
++
++ return ret;
+ }
+
+ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 0af2669..584ac76 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -143,7 +143,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
+ if (!(set->flags & NFT_SET_TIMEOUT))
+ return -EINVAL;
+- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
++ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
++ tb[NFTA_DYNSET_TIMEOUT])));
+ }
+
+ priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
+@@ -230,7 +231,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ goto nla_put_failure;
+ if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
+ goto nla_put_failure;
+- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
++ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
++ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
+ NFTA_DYNSET_PAD))
+ goto nla_put_failure;
+ if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c3f6523..3bc1d61 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
+ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
+ {
+ struct svc_xprt *xprt;
+- struct svc_sock *svsk;
+- struct socket *sock;
+ struct list_head *le, *next;
+ LIST_HEAD(to_be_closed);
+- struct linger no_linger = {
+- .l_onoff = 1,
+- .l_linger = 0,
+- };
+
+ spin_lock_bh(&serv->sv_lock);
+ list_for_each_safe(le, next, &serv->sv_tempsocks) {
+@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
+ list_del_init(le);
+ xprt = list_entry(le, struct svc_xprt, xpt_list);
+ dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
+- svsk = container_of(xprt, struct svc_sock, sk_xprt);
+- sock = svsk->sk_sock;
+- kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+- (char *)&no_linger, sizeof(no_linger));
++ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
+ svc_close_xprt(xprt);
+ }
+ }
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 57625f6..a4bc982 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
+ return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
+ }
+
++static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
++{
++ struct svc_sock *svsk;
++ struct socket *sock;
++ struct linger no_linger = {
++ .l_onoff = 1,
++ .l_linger = 0,
++ };
++
++ svsk = container_of(xprt, struct svc_sock, sk_xprt);
++ sock = svsk->sk_sock;
++ kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
++ (char *)&no_linger, sizeof(no_linger));
++}
++
+ /*
+ * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
+ */
+@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
+ return NULL;
+ }
+
++static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
++{
++}
++
+ static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
+ struct net *net,
+ struct sockaddr *sa, int salen,
+@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
+ .xpo_has_wspace = svc_udp_has_wspace,
+ .xpo_accept = svc_udp_accept,
+ .xpo_secure_port = svc_sock_secure_port,
++ .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
+ };
+
+ static struct svc_xprt_class svc_udp_class = {
+@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
+ .xpo_has_wspace = svc_tcp_has_wspace,
+ .xpo_accept = svc_tcp_accept,
+ .xpo_secure_port = svc_sock_secure_port,
++ .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
+ };
+
+ static struct svc_xprt_class svc_tcp_class = {
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 924271c..a55b809 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
+ static void svc_rdma_free(struct svc_xprt *xprt);
+ static int svc_rdma_has_wspace(struct svc_xprt *xprt);
+ static int svc_rdma_secure_port(struct svc_rqst *);
++static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
+
+ static struct svc_xprt_ops svc_rdma_ops = {
+ .xpo_create = svc_rdma_create,
+@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
+ .xpo_has_wspace = svc_rdma_has_wspace,
+ .xpo_accept = svc_rdma_accept,
+ .xpo_secure_port = svc_rdma_secure_port,
++ .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
+ };
+
+ struct svc_xprt_class svc_rdma_class = {
+@@ -1285,6 +1287,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
+ return 1;
+ }
+
++static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
++{
++}
++
+ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
+ {
+ struct ib_send_wr *bad_wr, *n_wr;
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 973e8c1..17867e7 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 26e866f..1628180 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6905,8 +6905,6 @@ static const struct hda_fixup alc662_fixups[] = {
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x40f000f0 }, /* disabled */
+ { 0x16, 0x40f000f0 }, /* disabled */
+- { 0x18, 0x01014011 }, /* LO */
+- { 0x1a, 0x01014012 }, /* LO */
+ { }
+ }
+ },
+diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
+index 6a23302..4d9d320 100644
+--- a/sound/pci/hda/thinkpad_helper.c
++++ b/sound/pci/hda/thinkpad_helper.c
+@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
+ static bool is_thinkpad(struct hda_codec *codec)
+ {
+ return (codec->core.subsystem_id >> 16 == 0x17aa) &&
+- (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068"));
++ (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
++ acpi_dev_found("IBM0068"));
+ }
+
+ static void update_tpacpi_mute_led(void *private_data, int enabled)
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 9e5276d..2ddc034 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
+ snd_usb_endpoint_free(ep);
+
+ mutex_destroy(&chip->mutex);
+- dev_set_drvdata(&chip->dev->dev, NULL);
++ if (!atomic_read(&chip->shutdown))
++ dev_set_drvdata(&chip->dev->dev, NULL);
+ kfree(chip);
+ return 0;
+ }
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index de15dbc..7214913 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -1596,18 +1596,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
+ if (prog)
+ ui_progress__update(prog, 1);
+
++ hists->nr_entries++;
++ if (!he->filtered) {
++ hists->nr_non_filtered_entries++;
++ hists__calc_col_len(hists, he);
++ }
++
+ if (!he->leaf) {
+ hists__hierarchy_output_resort(hists, prog,
+ &he->hroot_in,
+ &he->hroot_out,
+ min_callchain_hits,
+ use_callchain);
+- hists->nr_entries++;
+- if (!he->filtered) {
+- hists->nr_non_filtered_entries++;
+- hists__calc_col_len(hists, he);
+- }
+-
+ continue;
+ }
+
+diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
+index 6e9c40e..69ccce3 100644
+--- a/virt/kvm/arm/pmu.c
++++ b/virt/kvm/arm/pmu.c
+@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+ continue;
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_PMU_EVTYPE_EVENT;
+- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
+ && (enable & BIT(i))) {
+ reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
+
+ /* Software increment event does't need to be backed by a perf event */
+- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
++ select_idx != ARMV8_PMU_CYCLE_IDX)
+ return;
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
+ attr.exclude_hv = 1; /* Don't count EL2 events */
+ attr.exclude_host = 1; /* Don't count host events */
+- attr.config = eventsel;
++ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
++ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
+
+ counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ /* The initial sample period (overflow count) of an event. */