summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-08-30 10:56:38 -0400
committerMike Pagano <mpagano@gentoo.org>2023-08-30 10:56:38 -0400
commit7f358e4ee5f57264ec5ddc6741ea2aea1e5c6a3f (patch)
tree0d08163312e7f4bdacd0ae9fb27b587f6d6a5f9f
parentLinux patch 5.4.254 (diff)
downloadlinux-patches-7f358e4ee5f57264ec5ddc6741ea2aea1e5c6a3f.tar.gz
linux-patches-7f358e4ee5f57264ec5ddc6741ea2aea1e5c6a3f.tar.bz2
linux-patches-7f358e4ee5f57264ec5ddc6741ea2aea1e5c6a3f.zip
Linux patch 5.4.2555.4-262
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1254_linux-5.4.255.patch8213
2 files changed, 8217 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 3c468d97..250d3166 100644
--- a/0000_README
+++ b/0000_README
@@ -1059,6 +1059,10 @@ Patch: 1253_linux-5.4.254.patch
From: https://www.kernel.org
Desc: Linux 5.4.254
+Patch: 1254_linux-5.4.255.patch
+From: https://www.kernel.org
+Desc: Linux 5.4.255
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1254_linux-5.4.255.patch b/1254_linux-5.4.255.patch
new file mode 100644
index 00000000..baf01c99
--- /dev/null
+++ b/1254_linux-5.4.255.patch
@@ -0,0 +1,8213 @@
+diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
+index 64aeee1009cab..fdc9c99437d1a 100644
+--- a/Documentation/admin-guide/sysctl/vm.rst
++++ b/Documentation/admin-guide/sysctl/vm.rst
+@@ -61,6 +61,7 @@ Currently, these files are in /proc/sys/vm:
+ - overcommit_memory
+ - overcommit_ratio
+ - page-cluster
++- page_lock_unfairness
+ - panic_on_oom
+ - percpu_pagelist_fraction
+ - stat_interval
+@@ -741,6 +742,14 @@ extra faults and I/O delays for following faults if they would have been part of
+ that consecutive pages readahead would have brought in.
+
+
++page_lock_unfairness
++====================
++
++This value determines the number of times that the page lock can be
++stolen from under a waiter. After the lock is stolen the number of times
++specified in this file (default is 5), the "fair lock handoff" semantics
++will apply, and the waiter will only be awakened if the lock can be taken.
++
+ panic_on_oom
+ ============
+
+diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
+index 2c2ec99b50886..78bef529464fa 100644
+--- a/Documentation/power/runtime_pm.rst
++++ b/Documentation/power/runtime_pm.rst
+@@ -382,6 +382,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
+ nonzero, increment the counter and return 1; otherwise return 0 without
+ changing the counter
+
++ `int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);`
++ - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
++ runtime PM status is RPM_ACTIVE, and either ign_usage_count is true
++ or the device's usage_count is non-zero, increment the counter and
++ return 1; otherwise return 0 without changing the counter
++
+ `void pm_runtime_put_noidle(struct device *dev);`
+ - decrement the device's usage counter
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 34d3497f11772..2040c2f76dcf7 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1101,7 +1101,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER
+ M: William Breathitt Gray <vilhelm.gray@gmail.com>
+ L: linux-iio@vger.kernel.org
+ S: Maintained
+-F: drivers/iio/adc/stx104.c
++F: drivers/iio/addac/stx104.c
+
+ APM DRIVER
+ M: Jiri Kosina <jikos@kernel.org>
+diff --git a/Makefile b/Makefile
+index bf7299823095f..041adebe7da2d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 254
++SUBLEVEL = 255
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
+index 8257630f7a491..42700d7f8bf74 100644
+--- a/arch/arm/boot/dts/imx23.dtsi
++++ b/arch/arm/boot/dts/imx23.dtsi
+@@ -59,7 +59,7 @@
+ reg = <0x80000000 0x2000>;
+ };
+
+- dma_apbh: dma-apbh@80004000 {
++ dma_apbh: dma-controller@80004000 {
+ compatible = "fsl,imx23-dma-apbh";
+ reg = <0x80004000 0x2000>;
+ interrupts = <0 14 20 0
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index e14d8ef0158b8..235c69bd181fe 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -78,7 +78,7 @@
+ status = "disabled";
+ };
+
+- dma_apbh: dma-apbh@80004000 {
++ dma_apbh: dma-controller@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 0x2000>;
+ interrupts = <82 83 84 85
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index bb02923bc2e5b..861392ff70861 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -160,7 +160,7 @@
+ interrupt-parent = <&gpc>;
+ ranges;
+
+- dma_apbh: dma-apbh@110000 {
++ dma_apbh: dma-controller@110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index 790cc88c8b1ae..3dc1e97e145cd 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -211,7 +211,7 @@
+ power-domains = <&pd_pu>;
+ };
+
+- dma_apbh: dma-apbh@1804000 {
++ dma_apbh: dma-controller@1804000 {
+ compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x01804000 0x2000>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+@@ -958,6 +958,8 @@
+ <&clks IMX6SX_CLK_USDHC1>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -970,6 +972,8 @@
+ <&clks IMX6SX_CLK_USDHC2>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -982,6 +986,8 @@
+ <&clks IMX6SX_CLK_USDHC3>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 05390cc2a3b3b..5b677b66162ac 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -174,7 +174,7 @@
+ <0x00a06000 0x2000>;
+ };
+
+- dma_apbh: dma-apbh@1804000 {
++ dma_apbh: dma-controller@1804000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x01804000 0x2000>;
+ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index e5151a7849d6b..791530124fb0a 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1133,6 +1133,8 @@
+ <&clks IMX7D_USDHC1_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1145,6 +1147,8 @@
+ <&clks IMX7D_USDHC2_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1157,6 +1161,8 @@
+ <&clks IMX7D_USDHC3_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1192,14 +1198,13 @@
+ };
+ };
+
+- dma_apbh: dma-apbh@33000000 {
++ dma_apbh: dma-controller@33000000 {
+ compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x33000000 0x2000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+- interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
+ #dma-cells = <1>;
+ dma-channels = <4>;
+ clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
+diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
+index 3e26b0c7391b8..ae4a2f52e3c4d 100644
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -124,7 +124,24 @@
+ #define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
+ #endif
+ #ifndef cpu_has_octeon_cache
+-#define cpu_has_octeon_cache 0
++#define cpu_has_octeon_cache \
++({ \
++ int __res; \
++ \
++ switch (boot_cpu_type()) { \
++ case CPU_CAVIUM_OCTEON: \
++ case CPU_CAVIUM_OCTEON_PLUS: \
++ case CPU_CAVIUM_OCTEON2: \
++ case CPU_CAVIUM_OCTEON3: \
++ __res = 1; \
++ break; \
++ \
++ default: \
++ __res = 0; \
++ } \
++ \
++ __res; \
++})
+ #endif
+ /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
+ #ifndef cpu_has_fpu
+@@ -341,7 +358,7 @@
+ ({ \
+ int __res; \
+ \
+- switch (current_cpu_type()) { \
++ switch (boot_cpu_type()) { \
+ case CPU_M14KC: \
+ case CPU_74K: \
+ case CPU_1074K: \
+diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
+index 1e1247add1cf8..908e96e3a3117 100644
+--- a/arch/mips/include/asm/dec/prom.h
++++ b/arch/mips/include/asm/dec/prom.h
+@@ -70,7 +70,7 @@ static inline bool prom_is_rex(u32 magic)
+ */
+ typedef struct {
+ int pagesize;
+- unsigned char bitmap[0];
++ unsigned char bitmap[];
+ } memmap;
+
+
+diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
+index 84f794782c62f..7defca2f8e8bf 100644
+--- a/arch/powerpc/kernel/rtas_flash.c
++++ b/arch/powerpc/kernel/rtas_flash.c
+@@ -710,9 +710,9 @@ static int __init rtas_flash_init(void)
+ if (!rtas_validate_flash_data.buf)
+ return -ENOMEM;
+
+- flash_block_cache = kmem_cache_create("rtas_flash_cache",
+- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+- NULL);
++ flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
++ RTAS_BLK_SIZE, RTAS_BLK_SIZE,
++ 0, 0, RTAS_BLK_SIZE, NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __func__);
+diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
+index 6577897673dda..22f1a7c3f4362 100644
+--- a/arch/powerpc/mm/kasan/Makefile
++++ b/arch/powerpc/mm/kasan/Makefile
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+ KASAN_SANITIZE := n
++KCOV_INSTRUMENT := n
+
+ obj-$(CONFIG_PPC32) += kasan_init_32.o
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 046782df37a6d..d8162f6baa5d5 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -805,6 +805,14 @@ void __init fpu__init_system_xstate(void)
+ fpu__init_prepare_fx_sw_frame();
+ setup_init_fpu_buf();
+ setup_xstate_comp();
++
++ /*
++ * CPU capabilities initialization runs before FPU init. So
++ * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
++ * functional, set the feature bit so depending code works.
++ */
++ setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
++
+ print_xstate_offset_size();
+
+ pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index d5c2d86fbecd4..7f93ac63b5b64 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1048,8 +1048,10 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+- if (!atomic_dec_and_test(&dev->power.usage_count))
++ if (!atomic_dec_and_test(&dev->power.usage_count)) {
++ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
++ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+@@ -1080,8 +1082,10 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+- if (!atomic_dec_and_test(&dev->power.usage_count))
++ if (!atomic_dec_and_test(&dev->power.usage_count)) {
++ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
++ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+@@ -1125,28 +1129,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
+ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+
+ /**
+- * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
++ * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
+ * @dev: Device to handle.
+ *
+ * Return -EINVAL if runtime PM is disabled for the device.
+ *
+- * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
+- * and the runtime PM usage counter is nonzero, increment the counter and
+- * return 1. Otherwise return 0 without changing the counter.
++ * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
++ * ign_usage_count is true or the device's usage_count is non-zero, increment
++ * the counter and return 1. Otherwise return 0 without changing the counter.
++ *
++ * If ign_usage_count is true, the function can be used to prevent suspending
++ * the device when its runtime PM status is RPM_ACTIVE.
++ *
++ * If ign_usage_count is false, the function can be used to prevent suspending
++ * the device when both its runtime PM status is RPM_ACTIVE and its usage_count
++ * is non-zero.
++ *
++ * The caller is resposible for putting the device's usage count when ther
++ * return value is greater than zero.
+ */
+-int pm_runtime_get_if_in_use(struct device *dev)
++int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+ {
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+- retval = dev->power.disable_depth > 0 ? -EINVAL :
+- dev->power.runtime_status == RPM_ACTIVE
+- && atomic_inc_not_zero(&dev->power.usage_count);
++ if (dev->power.disable_depth > 0) {
++ retval = -EINVAL;
++ } else if (dev->power.runtime_status != RPM_ACTIVE) {
++ retval = 0;
++ } else if (ign_usage_count) {
++ retval = 1;
++ atomic_inc(&dev->power.usage_count);
++ } else {
++ retval = atomic_inc_not_zero(&dev->power.usage_count);
++ }
++ trace_rpm_usage_rcuidle(dev, 0);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
++
+ return retval;
+ }
+-EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
++EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+
+ /**
+ * __pm_runtime_set_status - Set runtime PM status of a device.
+@@ -1476,6 +1499,8 @@ void pm_runtime_allow(struct device *dev)
+ dev->power.runtime_auto = true;
+ if (atomic_dec_and_test(&dev->power.usage_count))
+ rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
++ else
++ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+@@ -1543,6 +1568,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+ if (!old_use || old_delay >= 0) {
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
++ } else {
++ trace_rpm_usage_rcuidle(dev, 0);
+ }
+ }
+
+diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
+index ac9b31c57967d..6d934c35a7700 100644
+--- a/drivers/base/regmap/regmap-i2c.c
++++ b/drivers/base/regmap/regmap-i2c.c
+@@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ static struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .write = regmap_i2c_smbus_i2c_write,
+ .read = regmap_i2c_smbus_i2c_read,
+- .max_raw_read = I2C_SMBUS_BLOCK_MAX,
+- .max_raw_write = I2C_SMBUS_BLOCK_MAX,
++ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
++ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
+ };
+
+ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 44aeceaccfa48..e1a9838c96655 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1809,7 +1809,7 @@ static int sysc_reset(struct sysc *ddata)
+
+ sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
+
+- if (ddata->legacy_mode || sysc_offset < 0 ||
++ if (ddata->legacy_mode ||
+ ddata->cap->regbits->srst_shift < 0 ||
+ ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ return 0;
+@@ -1819,9 +1819,13 @@ static int sysc_reset(struct sysc *ddata)
+ if (ddata->pre_reset_quirk)
+ ddata->pre_reset_quirk(ddata);
+
+- sysc_val = sysc_read_sysconfig(ddata);
+- sysc_val |= sysc_mask;
+- sysc_write(ddata, sysc_offset, sysc_val);
++ if (sysc_offset >= 0) {
++ sysc_val = sysc_read_sysconfig(ddata);
++ sysc_val |= sysc_mask;
++ sysc_write(ddata, sysc_offset, sysc_val);
++ /* Flush posted write */
++ sysc_val = sysc_read_sysconfig(ddata);
++ }
+
+ if (ddata->cfg.srst_udelay)
+ usleep_range(ddata->cfg.srst_udelay,
+diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
+index 4fb4fd4b06bda..737aa70e2cb3d 100644
+--- a/drivers/clk/clk-devres.c
++++ b/drivers/clk/clk-devres.c
+@@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
+ struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id)
+ {
+- struct clk **ptr, *clk;
++ struct devm_clk_state *state;
++ struct clk *clk;
+
+- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+- if (!ptr)
++ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
++ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ clk = of_clk_get_by_name(np, con_id);
+ if (!IS_ERR(clk)) {
+- *ptr = clk;
+- devres_add(dev, ptr);
++ state->clk = clk;
++ devres_add(dev, state);
+ } else {
+- devres_free(ptr);
++ devres_free(state);
+ }
+
+ return clk;
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 6713cfb1995c6..7e7356970d5fc 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
+ */
+ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ {
++ LIST_HEAD(signalled);
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+ if (!timeline_fence_signaled(&pt->base))
+ break;
+
+- list_del_init(&pt->link);
++ dma_fence_get(&pt->base);
++
++ list_move_tail(&pt->link, &signalled);
+ rb_erase(&pt->node, &obj->pt_tree);
+
+- /*
+- * A signal callback may release the last reference to this
+- * fence, causing it to be freed. That operation has to be
+- * last to avoid a use after free inside this loop, and must
+- * be after we remove the fence from the timeline in order to
+- * prevent deadlocking on timeline->lock inside
+- * timeline_fence_release().
+- */
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
++
++ list_for_each_entry_safe(pt, next, &signalled, link) {
++ list_del_init(&pt->link);
++ dma_fence_put(&pt->base);
++ }
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 7eeb98fe50ed7..0e478d4d830c9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1575,15 +1575,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
++ if (r > 0 && fence->error)
++ r = fence->error;
++
+ dma_fence_put(fence);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+-
+- if (fence->error)
+- return fence->error;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index fa3acf60e7bd2..c4c99bc7f2890 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2902,7 +2902,9 @@ static void dcn10_wait_for_mpcc_disconnect(
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
++ if (pipe_ctx->stream_res.tg &&
++ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
++ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ hubp->funcs->set_blank(hubp, true);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 7b54606783821..ba64dad1d7c9e 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -271,7 +271,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ {
+ struct drm_radeon_cs *cs = data;
+ uint64_t *chunk_array_ptr;
+- unsigned size, i;
++ u64 size;
++ unsigned i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 479516bbb61bf..64842926aff64 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -581,6 +581,7 @@
+ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+
+ #define USB_VENDOR_ID_HP 0x03f0
++#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index e5dcc47586ee4..83c3322fcf187 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 70cd9fc7fb869..cae34c55ae08b 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -240,13 +240,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset)
+ {
+ u32 val;
++ unsigned long flags;
+
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ val = readl(iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ val = readl(iproc_i2c->base + offset);
+ }
+@@ -257,12 +258,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset, u32 val)
+ {
++ unsigned long flags;
++
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ writel(val, iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ writel(val, iproc_i2c->base + offset);
+ }
+diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
+index 5bd51853b15ec..3c0da322ece74 100644
+--- a/drivers/iio/Kconfig
++++ b/drivers/iio/Kconfig
+@@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
+
+ source "drivers/iio/accel/Kconfig"
+ source "drivers/iio/adc/Kconfig"
++source "drivers/iio/addac/Kconfig"
+ source "drivers/iio/afe/Kconfig"
+ source "drivers/iio/amplifiers/Kconfig"
+ source "drivers/iio/chemical/Kconfig"
+diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
+index bff682ad1cfbb..96fd43b2ef7c1 100644
+--- a/drivers/iio/Makefile
++++ b/drivers/iio/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
+
+ obj-y += accel/
+ obj-y += adc/
++obj-y += addac/
+ obj-y += afe/
+ obj-y += amplifiers/
+ obj-y += buffer/
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index cb57880842991..b39d5ad157449 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -840,22 +840,6 @@ config STMPE_ADC
+ Say yes here to build support for ST Microelectronics STMPE
+ built-in ADC block (stmpe811).
+
+-config STX104
+- tristate "Apex Embedded Systems STX104 driver"
+- depends on PC104 && X86
+- select ISA_BUS_API
+- select GPIOLIB
+- help
+- Say yes here to build support for the Apex Embedded Systems STX104
+- integrated analog PC/104 card.
+-
+- This driver supports the 16 channels of single-ended (8 channels of
+- differential) analog inputs, 2 channels of analog output, 4 digital
+- inputs, and 4 digital outputs provided by the STX104.
+-
+- The base port addresses for the devices may be configured via the base
+- array module parameter.
+-
+ config SUN4I_GPADC
+ tristate "Support for the Allwinner SoCs GPADC"
+ depends on IIO
+diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
+index ef9cc485fb674..d0b11502102ed 100644
+--- a/drivers/iio/adc/Makefile
++++ b/drivers/iio/adc/Makefile
+@@ -72,7 +72,6 @@ obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
+ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+ obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
+ obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
+-obj-$(CONFIG_STX104) += stx104.o
+ obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
+ obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
+ obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
+deleted file mode 100644
+index f87bbc711ccc0..0000000000000
+--- a/drivers/iio/adc/stx104.c
++++ /dev/null
+@@ -1,375 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * IIO driver for the Apex Embedded Systems STX104
+- * Copyright (C) 2016 William Breathitt Gray
+- */
+-#include <linux/bitops.h>
+-#include <linux/device.h>
+-#include <linux/errno.h>
+-#include <linux/gpio/driver.h>
+-#include <linux/iio/iio.h>
+-#include <linux/iio/types.h>
+-#include <linux/io.h>
+-#include <linux/ioport.h>
+-#include <linux/isa.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/spinlock.h>
+-
+-#define STX104_OUT_CHAN(chan) { \
+- .type = IIO_VOLTAGE, \
+- .channel = chan, \
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+- .indexed = 1, \
+- .output = 1 \
+-}
+-#define STX104_IN_CHAN(chan, diff) { \
+- .type = IIO_VOLTAGE, \
+- .channel = chan, \
+- .channel2 = chan, \
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+- BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), \
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+- .indexed = 1, \
+- .differential = diff \
+-}
+-
+-#define STX104_NUM_OUT_CHAN 2
+-
+-#define STX104_EXTENT 16
+-
+-static unsigned int base[max_num_isa_dev(STX104_EXTENT)];
+-static unsigned int num_stx104;
+-module_param_hw_array(base, uint, ioport, &num_stx104, 0);
+-MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
+-
+-/**
+- * struct stx104_iio - IIO device private data structure
+- * @chan_out_states: channels' output states
+- * @base: base port address of the IIO device
+- */
+-struct stx104_iio {
+- unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
+- unsigned int base;
+-};
+-
+-/**
+- * struct stx104_gpio - GPIO device private data structure
+- * @chip: instance of the gpio_chip
+- * @lock: synchronization lock to prevent I/O race conditions
+- * @base: base port address of the GPIO device
+- * @out_state: output bits state
+- */
+-struct stx104_gpio {
+- struct gpio_chip chip;
+- spinlock_t lock;
+- unsigned int base;
+- unsigned int out_state;
+-};
+-
+-static int stx104_read_raw(struct iio_dev *indio_dev,
+- struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+-{
+- struct stx104_iio *const priv = iio_priv(indio_dev);
+- unsigned int adc_config;
+- int adbu;
+- int gain;
+-
+- switch (mask) {
+- case IIO_CHAN_INFO_HARDWAREGAIN:
+- /* get gain configuration */
+- adc_config = inb(priv->base + 11);
+- gain = adc_config & 0x3;
+-
+- *val = 1 << gain;
+- return IIO_VAL_INT;
+- case IIO_CHAN_INFO_RAW:
+- if (chan->output) {
+- *val = priv->chan_out_states[chan->channel];
+- return IIO_VAL_INT;
+- }
+-
+- /* select ADC channel */
+- outb(chan->channel | (chan->channel << 4), priv->base + 2);
+-
+- /* trigger ADC sample capture and wait for completion */
+- outb(0, priv->base);
+- while (inb(priv->base + 8) & BIT(7));
+-
+- *val = inw(priv->base);
+- return IIO_VAL_INT;
+- case IIO_CHAN_INFO_OFFSET:
+- /* get ADC bipolar/unipolar configuration */
+- adc_config = inb(priv->base + 11);
+- adbu = !(adc_config & BIT(2));
+-
+- *val = -32768 * adbu;
+- return IIO_VAL_INT;
+- case IIO_CHAN_INFO_SCALE:
+- /* get ADC bipolar/unipolar and gain configuration */
+- adc_config = inb(priv->base + 11);
+- adbu = !(adc_config & BIT(2));
+- gain = adc_config & 0x3;
+-
+- *val = 5;
+- *val2 = 15 - adbu + gain;
+- return IIO_VAL_FRACTIONAL_LOG2;
+- }
+-
+- return -EINVAL;
+-}
+-
+-static int stx104_write_raw(struct iio_dev *indio_dev,
+- struct iio_chan_spec const *chan, int val, int val2, long mask)
+-{
+- struct stx104_iio *const priv = iio_priv(indio_dev);
+-
+- switch (mask) {
+- case IIO_CHAN_INFO_HARDWAREGAIN:
+- /* Only four gain states (x1, x2, x4, x8) */
+- switch (val) {
+- case 1:
+- outb(0, priv->base + 11);
+- break;
+- case 2:
+- outb(1, priv->base + 11);
+- break;
+- case 4:
+- outb(2, priv->base + 11);
+- break;
+- case 8:
+- outb(3, priv->base + 11);
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- return 0;
+- case IIO_CHAN_INFO_RAW:
+- if (chan->output) {
+- /* DAC can only accept up to a 16-bit value */
+- if ((unsigned int)val > 65535)
+- return -EINVAL;
+-
+- priv->chan_out_states[chan->channel] = val;
+- outw(val, priv->base + 4 + 2 * chan->channel);
+-
+- return 0;
+- }
+- return -EINVAL;
+- }
+-
+- return -EINVAL;
+-}
+-
+-static const struct iio_info stx104_info = {
+- .read_raw = stx104_read_raw,
+- .write_raw = stx104_write_raw
+-};
+-
+-/* single-ended input channels configuration */
+-static const struct iio_chan_spec stx104_channels_sing[] = {
+- STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
+- STX104_IN_CHAN(0, 0), STX104_IN_CHAN(1, 0), STX104_IN_CHAN(2, 0),
+- STX104_IN_CHAN(3, 0), STX104_IN_CHAN(4, 0), STX104_IN_CHAN(5, 0),
+- STX104_IN_CHAN(6, 0), STX104_IN_CHAN(7, 0), STX104_IN_CHAN(8, 0),
+- STX104_IN_CHAN(9, 0), STX104_IN_CHAN(10, 0), STX104_IN_CHAN(11, 0),
+- STX104_IN_CHAN(12, 0), STX104_IN_CHAN(13, 0), STX104_IN_CHAN(14, 0),
+- STX104_IN_CHAN(15, 0)
+-};
+-/* differential input channels configuration */
+-static const struct iio_chan_spec stx104_channels_diff[] = {
+- STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
+- STX104_IN_CHAN(0, 1), STX104_IN_CHAN(1, 1), STX104_IN_CHAN(2, 1),
+- STX104_IN_CHAN(3, 1), STX104_IN_CHAN(4, 1), STX104_IN_CHAN(5, 1),
+- STX104_IN_CHAN(6, 1), STX104_IN_CHAN(7, 1)
+-};
+-
+-static int stx104_gpio_get_direction(struct gpio_chip *chip,
+- unsigned int offset)
+-{
+- /* GPIO 0-3 are input only, while the rest are output only */
+- if (offset < 4)
+- return 1;
+-
+- return 0;
+-}
+-
+-static int stx104_gpio_direction_input(struct gpio_chip *chip,
+- unsigned int offset)
+-{
+- if (offset >= 4)
+- return -EINVAL;
+-
+- return 0;
+-}
+-
+-static int stx104_gpio_direction_output(struct gpio_chip *chip,
+- unsigned int offset, int value)
+-{
+- if (offset < 4)
+- return -EINVAL;
+-
+- chip->set(chip, offset, value);
+- return 0;
+-}
+-
+-static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
+-{
+- struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+-
+- if (offset >= 4)
+- return -EINVAL;
+-
+- return !!(inb(stx104gpio->base) & BIT(offset));
+-}
+-
+-static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+- unsigned long *bits)
+-{
+- struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+-
+- *bits = inb(stx104gpio->base);
+-
+- return 0;
+-}
+-
+-static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
+- int value)
+-{
+- struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+- const unsigned int mask = BIT(offset) >> 4;
+- unsigned long flags;
+-
+- if (offset < 4)
+- return;
+-
+- spin_lock_irqsave(&stx104gpio->lock, flags);
+-
+- if (value)
+- stx104gpio->out_state |= mask;
+- else
+- stx104gpio->out_state &= ~mask;
+-
+- outb(stx104gpio->out_state, stx104gpio->base);
+-
+- spin_unlock_irqrestore(&stx104gpio->lock, flags);
+-}
+-
+-#define STX104_NGPIO 8
+-static const char *stx104_names[STX104_NGPIO] = {
+- "DIN0", "DIN1", "DIN2", "DIN3", "DOUT0", "DOUT1", "DOUT2", "DOUT3"
+-};
+-
+-static void stx104_gpio_set_multiple(struct gpio_chip *chip,
+- unsigned long *mask, unsigned long *bits)
+-{
+- struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+- unsigned long flags;
+-
+- /* verify masked GPIO are output */
+- if (!(*mask & 0xF0))
+- return;
+-
+- *mask >>= 4;
+- *bits >>= 4;
+-
+- spin_lock_irqsave(&stx104gpio->lock, flags);
+-
+- stx104gpio->out_state &= ~*mask;
+- stx104gpio->out_state |= *mask & *bits;
+- outb(stx104gpio->out_state, stx104gpio->base);
+-
+- spin_unlock_irqrestore(&stx104gpio->lock, flags);
+-}
+-
+-static int stx104_probe(struct device *dev, unsigned int id)
+-{
+- struct iio_dev *indio_dev;
+- struct stx104_iio *priv;
+- struct stx104_gpio *stx104gpio;
+- int err;
+-
+- indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+- if (!indio_dev)
+- return -ENOMEM;
+-
+- stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL);
+- if (!stx104gpio)
+- return -ENOMEM;
+-
+- if (!devm_request_region(dev, base[id], STX104_EXTENT,
+- dev_name(dev))) {
+- dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+- base[id], base[id] + STX104_EXTENT);
+- return -EBUSY;
+- }
+-
+- indio_dev->info = &stx104_info;
+- indio_dev->modes = INDIO_DIRECT_MODE;
+-
+- /* determine if differential inputs */
+- if (inb(base[id] + 8) & BIT(5)) {
+- indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
+- indio_dev->channels = stx104_channels_diff;
+- } else {
+- indio_dev->num_channels = ARRAY_SIZE(stx104_channels_sing);
+- indio_dev->channels = stx104_channels_sing;
+- }
+-
+- indio_dev->name = dev_name(dev);
+- indio_dev->dev.parent = dev;
+-
+- priv = iio_priv(indio_dev);
+- priv->base = base[id];
+-
+- /* configure device for software trigger operation */
+- outb(0, base[id] + 9);
+-
+- /* initialize gain setting to x1 */
+- outb(0, base[id] + 11);
+-
+- /* initialize DAC output to 0V */
+- outw(0, base[id] + 4);
+- outw(0, base[id] + 6);
+-
+- stx104gpio->chip.label = dev_name(dev);
+- stx104gpio->chip.parent = dev;
+- stx104gpio->chip.owner = THIS_MODULE;
+- stx104gpio->chip.base = -1;
+- stx104gpio->chip.ngpio = STX104_NGPIO;
+- stx104gpio->chip.names = stx104_names;
+- stx104gpio->chip.get_direction = stx104_gpio_get_direction;
+- stx104gpio->chip.direction_input = stx104_gpio_direction_input;
+- stx104gpio->chip.direction_output = stx104_gpio_direction_output;
+- stx104gpio->chip.get = stx104_gpio_get;
+- stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
+- stx104gpio->chip.set = stx104_gpio_set;
+- stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
+- stx104gpio->base = base[id] + 3;
+- stx104gpio->out_state = 0x0;
+-
+- spin_lock_init(&stx104gpio->lock);
+-
+- err = devm_gpiochip_add_data(dev, &stx104gpio->chip, stx104gpio);
+- if (err) {
+- dev_err(dev, "GPIO registering failed (%d)\n", err);
+- return err;
+- }
+-
+- return devm_iio_device_register(dev, indio_dev);
+-}
+-
+-static struct isa_driver stx104_driver = {
+- .probe = stx104_probe,
+- .driver = {
+- .name = "stx104"
+- },
+-};
+-
+-module_isa_driver(stx104_driver, num_stx104);
+-
+-MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+-MODULE_DESCRIPTION("Apex Embedded Systems STX104 IIO driver");
+-MODULE_LICENSE("GPL v2");
+diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
+new file mode 100644
+index 0000000000000..1f598670e84fb
+--- /dev/null
++++ b/drivers/iio/addac/Kconfig
+@@ -0,0 +1,24 @@
++#
++# ADC DAC drivers
++#
++# When adding new entries keep the list in alphabetical order
++
++menu "Analog to digital and digital to analog converters"
++
++config STX104
++ tristate "Apex Embedded Systems STX104 driver"
++ depends on PC104 && X86
++ select ISA_BUS_API
++ select GPIOLIB
++ help
++ Say yes here to build support for the Apex Embedded Systems STX104
++ integrated analog PC/104 card.
++
++ This driver supports the 16 channels of single-ended (8 channels of
++ differential) analog inputs, 2 channels of analog output, 4 digital
++ inputs, and 4 digital outputs provided by the STX104.
++
++ The base port addresses for the devices may be configured via the base
++ array module parameter.
++
++endmenu
+diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
+new file mode 100644
+index 0000000000000..8629145233544
+--- /dev/null
++++ b/drivers/iio/addac/Makefile
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0
++#
++# Makefile for industrial I/O ADDAC drivers
++#
++
++# When adding new entries keep the list in alphabetical order
++obj-$(CONFIG_STX104) += stx104.o
+diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c
+new file mode 100644
+index 0000000000000..8237ae4263cbe
+--- /dev/null
++++ b/drivers/iio/addac/stx104.c
+@@ -0,0 +1,415 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * IIO driver for the Apex Embedded Systems STX104
++ * Copyright (C) 2016 William Breathitt Gray
++ */
++#include <linux/bitops.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/gpio/driver.h>
++#include <linux/iio/iio.h>
++#include <linux/iio/types.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/isa.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/mutex.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#define STX104_OUT_CHAN(chan) { \
++ .type = IIO_VOLTAGE, \
++ .channel = chan, \
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
++ .indexed = 1, \
++ .output = 1 \
++}
++#define STX104_IN_CHAN(chan, diff) { \
++ .type = IIO_VOLTAGE, \
++ .channel = chan, \
++ .channel2 = chan, \
++ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
++ BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), \
++ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
++ .indexed = 1, \
++ .differential = diff \
++}
++
++#define STX104_NUM_OUT_CHAN 2
++
++#define STX104_EXTENT 16
++
++static unsigned int base[max_num_isa_dev(STX104_EXTENT)];
++static unsigned int num_stx104;
++module_param_hw_array(base, uint, ioport, &num_stx104, 0);
++MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
++
++/**
++ * struct stx104_reg - device register structure
++ * @ssr_ad: Software Strobe Register and ADC Data
++ * @achan: ADC Channel
++ * @dio: Digital I/O
++ * @dac: DAC Channels
++ * @cir_asr: Clear Interrupts and ADC Status
++ * @acr: ADC Control
++ * @pccr_fsh: Pacer Clock Control and FIFO Status MSB
++ * @acfg: ADC Configuration
++ */
++struct stx104_reg {
++ u16 ssr_ad;
++ u8 achan;
++ u8 dio;
++ u16 dac[2];
++ u8 cir_asr;
++ u8 acr;
++ u8 pccr_fsh;
++ u8 acfg;
++};
++
++/**
++ * struct stx104_iio - IIO device private data structure
++ * @lock: synchronization lock to prevent I/O race conditions
++ * @chan_out_states: channels' output states
++ * @reg: I/O address offset for the device registers
++ */
++struct stx104_iio {
++ struct mutex lock;
++ unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
++ struct stx104_reg __iomem *reg;
++};
++
++/**
++ * struct stx104_gpio - GPIO device private data structure
++ * @chip: instance of the gpio_chip
++ * @lock: synchronization lock to prevent I/O race conditions
++ * @base: base port address of the GPIO device
++ * @out_state: output bits state
++ */
++struct stx104_gpio {
++ struct gpio_chip chip;
++ spinlock_t lock;
++ u8 __iomem *base;
++ unsigned int out_state;
++};
++
++static int stx104_read_raw(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
++{
++ struct stx104_iio *const priv = iio_priv(indio_dev);
++ struct stx104_reg __iomem *const reg = priv->reg;
++ unsigned int adc_config;
++ int adbu;
++ int gain;
++
++ switch (mask) {
++ case IIO_CHAN_INFO_HARDWAREGAIN:
++ /* get gain configuration */
++ adc_config = ioread8(&reg->acfg);
++ gain = adc_config & 0x3;
++
++ *val = 1 << gain;
++ return IIO_VAL_INT;
++ case IIO_CHAN_INFO_RAW:
++ if (chan->output) {
++ *val = priv->chan_out_states[chan->channel];
++ return IIO_VAL_INT;
++ }
++
++ mutex_lock(&priv->lock);
++
++ /* select ADC channel */
++ iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
++
++ /* trigger ADC sample capture by writing to the 8-bit
++ * Software Strobe Register and wait for completion
++ */
++ iowrite8(0, &reg->ssr_ad);
++ while (ioread8(&reg->cir_asr) & BIT(7));
++
++ *val = ioread16(&reg->ssr_ad);
++
++ mutex_unlock(&priv->lock);
++ return IIO_VAL_INT;
++ case IIO_CHAN_INFO_OFFSET:
++ /* get ADC bipolar/unipolar configuration */
++ adc_config = ioread8(&reg->acfg);
++ adbu = !(adc_config & BIT(2));
++
++ *val = -32768 * adbu;
++ return IIO_VAL_INT;
++ case IIO_CHAN_INFO_SCALE:
++ /* get ADC bipolar/unipolar and gain configuration */
++ adc_config = ioread8(&reg->acfg);
++ adbu = !(adc_config & BIT(2));
++ gain = adc_config & 0x3;
++
++ *val = 5;
++ *val2 = 15 - adbu + gain;
++ return IIO_VAL_FRACTIONAL_LOG2;
++ }
++
++ return -EINVAL;
++}
++
++static int stx104_write_raw(struct iio_dev *indio_dev,
++ struct iio_chan_spec const *chan, int val, int val2, long mask)
++{
++ struct stx104_iio *const priv = iio_priv(indio_dev);
++
++ switch (mask) {
++ case IIO_CHAN_INFO_HARDWAREGAIN:
++ /* Only four gain states (x1, x2, x4, x8) */
++ switch (val) {
++ case 1:
++ iowrite8(0, &priv->reg->acfg);
++ break;
++ case 2:
++ iowrite8(1, &priv->reg->acfg);
++ break;
++ case 4:
++ iowrite8(2, &priv->reg->acfg);
++ break;
++ case 8:
++ iowrite8(3, &priv->reg->acfg);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++ case IIO_CHAN_INFO_RAW:
++ if (chan->output) {
++ /* DAC can only accept up to a 16-bit value */
++ if ((unsigned int)val > 65535)
++ return -EINVAL;
++
++ mutex_lock(&priv->lock);
++
++ priv->chan_out_states[chan->channel] = val;
++ iowrite16(val, &priv->reg->dac[chan->channel]);
++
++ mutex_unlock(&priv->lock);
++ return 0;
++ }
++ return -EINVAL;
++ }
++
++ return -EINVAL;
++}
++
++static const struct iio_info stx104_info = {
++ .read_raw = stx104_read_raw,
++ .write_raw = stx104_write_raw
++};
++
++/* single-ended input channels configuration */
++static const struct iio_chan_spec stx104_channels_sing[] = {
++ STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
++ STX104_IN_CHAN(0, 0), STX104_IN_CHAN(1, 0), STX104_IN_CHAN(2, 0),
++ STX104_IN_CHAN(3, 0), STX104_IN_CHAN(4, 0), STX104_IN_CHAN(5, 0),
++ STX104_IN_CHAN(6, 0), STX104_IN_CHAN(7, 0), STX104_IN_CHAN(8, 0),
++ STX104_IN_CHAN(9, 0), STX104_IN_CHAN(10, 0), STX104_IN_CHAN(11, 0),
++ STX104_IN_CHAN(12, 0), STX104_IN_CHAN(13, 0), STX104_IN_CHAN(14, 0),
++ STX104_IN_CHAN(15, 0)
++};
++/* differential input channels configuration */
++static const struct iio_chan_spec stx104_channels_diff[] = {
++ STX104_OUT_CHAN(0), STX104_OUT_CHAN(1),
++ STX104_IN_CHAN(0, 1), STX104_IN_CHAN(1, 1), STX104_IN_CHAN(2, 1),
++ STX104_IN_CHAN(3, 1), STX104_IN_CHAN(4, 1), STX104_IN_CHAN(5, 1),
++ STX104_IN_CHAN(6, 1), STX104_IN_CHAN(7, 1)
++};
++
++static int stx104_gpio_get_direction(struct gpio_chip *chip,
++ unsigned int offset)
++{
++ /* GPIO 0-3 are input only, while the rest are output only */
++ if (offset < 4)
++ return 1;
++
++ return 0;
++}
++
++static int stx104_gpio_direction_input(struct gpio_chip *chip,
++ unsigned int offset)
++{
++ if (offset >= 4)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int stx104_gpio_direction_output(struct gpio_chip *chip,
++ unsigned int offset, int value)
++{
++ if (offset < 4)
++ return -EINVAL;
++
++ chip->set(chip, offset, value);
++ return 0;
++}
++
++static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
++{
++ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
++
++ if (offset >= 4)
++ return -EINVAL;
++
++ return !!(ioread8(stx104gpio->base) & BIT(offset));
++}
++
++static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
++ unsigned long *bits)
++{
++ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
++
++ *bits = ioread8(stx104gpio->base);
++
++ return 0;
++}
++
++static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
++ int value)
++{
++ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
++ const unsigned int mask = BIT(offset) >> 4;
++ unsigned long flags;
++
++ if (offset < 4)
++ return;
++
++ spin_lock_irqsave(&stx104gpio->lock, flags);
++
++ if (value)
++ stx104gpio->out_state |= mask;
++ else
++ stx104gpio->out_state &= ~mask;
++
++ iowrite8(stx104gpio->out_state, stx104gpio->base);
++
++ spin_unlock_irqrestore(&stx104gpio->lock, flags);
++}
++
++#define STX104_NGPIO 8
++static const char *stx104_names[STX104_NGPIO] = {
++ "DIN0", "DIN1", "DIN2", "DIN3", "DOUT0", "DOUT1", "DOUT2", "DOUT3"
++};
++
++static void stx104_gpio_set_multiple(struct gpio_chip *chip,
++ unsigned long *mask, unsigned long *bits)
++{
++ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
++ unsigned long flags;
++
++ /* verify masked GPIO are output */
++ if (!(*mask & 0xF0))
++ return;
++
++ *mask >>= 4;
++ *bits >>= 4;
++
++ spin_lock_irqsave(&stx104gpio->lock, flags);
++
++ stx104gpio->out_state &= ~*mask;
++ stx104gpio->out_state |= *mask & *bits;
++ iowrite8(stx104gpio->out_state, stx104gpio->base);
++
++ spin_unlock_irqrestore(&stx104gpio->lock, flags);
++}
++
++static int stx104_probe(struct device *dev, unsigned int id)
++{
++ struct iio_dev *indio_dev;
++ struct stx104_iio *priv;
++ struct stx104_gpio *stx104gpio;
++ int err;
++
++ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
++ if (!indio_dev)
++ return -ENOMEM;
++
++ stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL);
++ if (!stx104gpio)
++ return -ENOMEM;
++
++ if (!devm_request_region(dev, base[id], STX104_EXTENT,
++ dev_name(dev))) {
++ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
++ base[id], base[id] + STX104_EXTENT);
++ return -EBUSY;
++ }
++
++ priv = iio_priv(indio_dev);
++ priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT);
++ if (!priv->reg)
++ return -ENOMEM;
++
++ indio_dev->info = &stx104_info;
++ indio_dev->modes = INDIO_DIRECT_MODE;
++
++ /* determine if differential inputs */
++ if (ioread8(&priv->reg->cir_asr) & BIT(5)) {
++ indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff);
++ indio_dev->channels = stx104_channels_diff;
++ } else {
++ indio_dev->num_channels = ARRAY_SIZE(stx104_channels_sing);
++ indio_dev->channels = stx104_channels_sing;
++ }
++
++ indio_dev->name = dev_name(dev);
++ indio_dev->dev.parent = dev;
++
++ mutex_init(&priv->lock);
++
++ /* configure device for software trigger operation */
++ iowrite8(0, &priv->reg->acr);
++
++ /* initialize gain setting to x1 */
++ iowrite8(0, &priv->reg->acfg);
++
++ /* initialize DAC output to 0V */
++ iowrite16(0, &priv->reg->dac[0]);
++ iowrite16(0, &priv->reg->dac[1]);
++
++ stx104gpio->chip.label = dev_name(dev);
++ stx104gpio->chip.parent = dev;
++ stx104gpio->chip.owner = THIS_MODULE;
++ stx104gpio->chip.base = -1;
++ stx104gpio->chip.ngpio = STX104_NGPIO;
++ stx104gpio->chip.names = stx104_names;
++ stx104gpio->chip.get_direction = stx104_gpio_get_direction;
++ stx104gpio->chip.direction_input = stx104_gpio_direction_input;
++ stx104gpio->chip.direction_output = stx104_gpio_direction_output;
++ stx104gpio->chip.get = stx104_gpio_get;
++ stx104gpio->chip.get_multiple = stx104_gpio_get_multiple;
++ stx104gpio->chip.set = stx104_gpio_set;
++ stx104gpio->chip.set_multiple = stx104_gpio_set_multiple;
++ stx104gpio->base = &priv->reg->dio;
++ stx104gpio->out_state = 0x0;
++
++ spin_lock_init(&stx104gpio->lock);
++
++ err = devm_gpiochip_add_data(dev, &stx104gpio->chip, stx104gpio);
++ if (err) {
++ dev_err(dev, "GPIO registering failed (%d)\n", err);
++ return err;
++ }
++
++ return devm_iio_device_register(dev, indio_dev);
++}
++
++static struct isa_driver stx104_driver = {
++ .probe = stx104_probe,
++ .driver = {
++ .name = "stx104"
++ },
++};
++
++module_isa_driver(stx104_driver, num_stx104);
++
++MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
++MODULE_DESCRIPTION("Apex Embedded Systems STX104 IIO driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index e63c48a1602fd..be3fa1ac4261c 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -19,39 +19,13 @@
+ #include <linux/of.h>
+ #include <linux/overflow.h>
+
++#include "internal.h"
++
+ static DEFINE_IDR(icc_idr);
+ static LIST_HEAD(icc_providers);
+ static DEFINE_MUTEX(icc_lock);
+ static struct dentry *icc_debugfs_dir;
+
+-/**
+- * struct icc_req - constraints that are attached to each node
+- * @req_node: entry in list of requests for the particular @node
+- * @node: the interconnect node to which this constraint applies
+- * @dev: reference to the device that sets the constraints
+- * @tag: path tag (optional)
+- * @avg_bw: an integer describing the average bandwidth in kBps
+- * @peak_bw: an integer describing the peak bandwidth in kBps
+- */
+-struct icc_req {
+- struct hlist_node req_node;
+- struct icc_node *node;
+- struct device *dev;
+- u32 tag;
+- u32 avg_bw;
+- u32 peak_bw;
+-};
+-
+-/**
+- * struct icc_path - interconnect path structure
+- * @num_nodes: number of hops (nodes)
+- * @reqs: array of the requests applicable to this path of nodes
+- */
+-struct icc_path {
+- size_t num_nodes;
+- struct icc_req reqs[];
+-};
+-
+ static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+ {
+ if (!n)
+@@ -117,6 +91,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+ path->reqs[i].node = node;
+ path->reqs[i].dev = dev;
++ path->reqs[i].enabled = true;
+ /* reference to previous node was saved during path traversal */
+ node = node->reverse;
+ }
+@@ -201,6 +176,7 @@ static int aggregate_requests(struct icc_node *node)
+ {
+ struct icc_provider *p = node->provider;
+ struct icc_req *r;
++ u32 avg_bw, peak_bw;
+
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+@@ -208,9 +184,17 @@ static int aggregate_requests(struct icc_node *node)
+ if (p->pre_aggregate)
+ p->pre_aggregate(node);
+
+- hlist_for_each_entry(r, &node->req_list, req_node)
+- p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
++ hlist_for_each_entry(r, &node->req_list, req_node) {
++ if (r->enabled) {
++ avg_bw = r->avg_bw;
++ peak_bw = r->peak_bw;
++ } else {
++ avg_bw = 0;
++ peak_bw = 0;
++ }
++ p->aggregate(node, r->tag, avg_bw, peak_bw,
+ &node->avg_bw, &node->peak_bw);
++ }
+
+ return 0;
+ }
+@@ -475,6 +459,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ }
+ EXPORT_SYMBOL_GPL(icc_set_bw);
+
++static int __icc_enable(struct icc_path *path, bool enable)
++{
++ int i;
++
++ if (!path)
++ return 0;
++
++ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
++ return -EINVAL;
++
++ mutex_lock(&icc_lock);
++
++ for (i = 0; i < path->num_nodes; i++)
++ path->reqs[i].enabled = enable;
++
++ mutex_unlock(&icc_lock);
++
++ return icc_set_bw(path, path->reqs[0].avg_bw,
++ path->reqs[0].peak_bw);
++}
++
++int icc_enable(struct icc_path *path)
++{
++ return __icc_enable(path, true);
++}
++EXPORT_SYMBOL_GPL(icc_enable);
++
++int icc_disable(struct icc_path *path)
++{
++ return __icc_enable(path, false);
++}
++EXPORT_SYMBOL_GPL(icc_disable);
++
+ /**
+ * icc_get() - return a handle for path between two endpoints
+ * @dev: the device requesting the path
+diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
+new file mode 100644
+index 0000000000000..5c923c444f444
+--- /dev/null
++++ b/drivers/interconnect/internal.h
+@@ -0,0 +1,42 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Interconnect framework internal structs
++ *
++ * Copyright (c) 2019, Linaro Ltd.
++ * Author: Georgi Djakov <georgi.djakov@linaro.org>
++ */
++
++#ifndef __DRIVERS_INTERCONNECT_INTERNAL_H
++#define __DRIVERS_INTERCONNECT_INTERNAL_H
++
++/**
++ * struct icc_req - constraints that are attached to each node
++ * @req_node: entry in list of requests for the particular @node
++ * @node: the interconnect node to which this constraint applies
++ * @dev: reference to the device that sets the constraints
++ * @enabled: indicates whether the path with this request is enabled
++ * @tag: path tag (optional)
++ * @avg_bw: an integer describing the average bandwidth in kBps
++ * @peak_bw: an integer describing the peak bandwidth in kBps
++ */
++struct icc_req {
++ struct hlist_node req_node;
++ struct icc_node *node;
++ struct device *dev;
++ bool enabled;
++ u32 tag;
++ u32 avg_bw;
++ u32 peak_bw;
++};
++
++/**
++ * struct icc_path - interconnect path structure
++ * @num_nodes: number of hops (nodes)
++ * @reqs: array of the requests applicable to this path of nodes
++ */
++struct icc_path {
++ size_t num_nodes;
++ struct icc_req reqs[];
++};
++
++#endif
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 76e9d3e2f9f20..15eef44efd030 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -886,8 +886,8 @@ struct amd_ir_data {
+ */
+ struct irq_cfg *cfg;
+ int ga_vector;
+- int ga_root_ptr;
+- int ga_tag;
++ u64 ga_root_ptr;
++ u32 ga_tag;
+ };
+
+ struct amd_irte_ops {
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index f3985469c2211..caebafed49bb4 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -48,7 +48,7 @@ void __iomem *mips_gic_base;
+
+ DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
+
+-static DEFINE_SPINLOCK(gic_lock);
++static DEFINE_RAW_SPINLOCK(gic_lock);
+ static struct irq_domain *gic_irq_domain;
+ static struct irq_domain *gic_ipi_domain;
+ static int gic_shared_intrs;
+@@ -207,7 +207,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+
+ irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_FALLING:
+ pol = GIC_POL_FALLING_EDGE;
+@@ -247,7 +247,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
+ else
+ irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+ handle_level_irq, NULL);
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+ }
+@@ -265,7 +265,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ return -EINVAL;
+
+ /* Assumption : cpumask refers to a single CPU */
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+ write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
+@@ -276,7 +276,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK;
+ }
+@@ -354,12 +354,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = false;
+
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_rmask(BIT(intr));
+ }
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+
+ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+@@ -372,32 +372,45 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = true;
+
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_smask(BIT(intr));
+ }
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+
+-static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
++static void gic_all_vpes_irq_cpu_online(void)
+ {
+- struct gic_all_vpes_chip_data *cd;
+- unsigned int intr;
++ static const unsigned int local_intrs[] = {
++ GIC_LOCAL_INT_TIMER,
++ GIC_LOCAL_INT_PERFCTR,
++ GIC_LOCAL_INT_FDC,
++ };
++ unsigned long flags;
++ int i;
+
+- intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+- cd = irq_data_get_irq_chip_data(d);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+
+- write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+- if (cd->mask)
+- write_gic_vl_smask(BIT(intr));
++ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
++ unsigned int intr = local_intrs[i];
++ struct gic_all_vpes_chip_data *cd;
++
++ if (!gic_local_irq_is_routable(intr))
++ continue;
++ cd = &gic_all_vpes_chip_data[intr];
++ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
++ if (cd->mask)
++ write_gic_vl_smask(BIT(intr));
++ }
++
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+ }
+
+ static struct irq_chip gic_all_vpes_local_irq_controller = {
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+- .irq_cpu_online = gic_all_vpes_irq_cpu_online,
+ };
+
+ static void __gic_irq_dispatch(void)
+@@ -421,11 +434,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
+
+ data = irq_get_irq_data(virq);
+
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+ write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
+ write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+ }
+@@ -476,6 +489,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+ map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
++ /*
++ * If adding support for more per-cpu interrupts, keep the the
++ * array in gic_all_vpes_irq_cpu_online() in sync.
++ */
+ switch (intr) {
+ case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+@@ -514,12 +531,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+- spin_lock_irqsave(&gic_lock, flags);
++ raw_spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
+ }
+- spin_unlock_irqrestore(&gic_lock, flags);
++ raw_spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
+ }
+@@ -662,8 +679,8 @@ static int gic_cpu_startup(unsigned int cpu)
+ /* Clear all local IRQ masks (ie. disable all local interrupts) */
+ write_gic_vl_rmask(~0);
+
+- /* Invoke irq_cpu_online callbacks to enable desired interrupts */
+- irq_cpu_online();
++ /* Enable desired interrupts */
++ gic_all_vpes_irq_cpu_online();
+
+ return 0;
+ }
+diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
+index d5e774d830215..f4d670ec30bcb 100644
+--- a/drivers/leds/trigger/ledtrig-netdev.c
++++ b/drivers/leds/trigger/ledtrig-netdev.c
+@@ -318,6 +318,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ switch (evt) {
+ case NETDEV_CHANGENAME:
++ if (netif_carrier_ok(dev))
++ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
++ fallthrough;
+ case NETDEV_REGISTER:
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index d7911c623edde..81157801a3dc6 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -31,11 +31,11 @@
+ #define DEFAULT_BUFFER_SECTORS 128
+ #define DEFAULT_JOURNAL_WATERMARK 50
+ #define DEFAULT_SYNC_MSEC 10000
+-#define DEFAULT_MAX_JOURNAL_SECTORS 131072
++#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
+ #define MIN_LOG2_INTERLEAVE_SECTORS 3
+ #define MAX_LOG2_INTERLEAVE_SECTORS 31
+ #define METADATA_WORKQUEUE_MAX_ACTIVE 16
+-#define RECALC_SECTORS 8192
++#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
+ #define RECALC_WRITE_SUPER 16
+ #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+ #define BITMAP_FLUSH_INTERVAL (10 * HZ)
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+index fd8de027e83e3..6117efb425c7b 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+@@ -759,6 +759,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
+ return -EINVAL;
+
+ if (*nplanes) {
++ if (*nplanes != q_data->fmt->num_planes)
++ return -EINVAL;
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+index acf64723f9381..650e198a270e4 100644
+--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
++++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
+@@ -529,15 +529,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
+ int vpu_load_firmware(struct platform_device *pdev)
+ {
+ struct mtk_vpu *vpu;
+- struct device *dev = &pdev->dev;
++ struct device *dev;
+ struct vpu_run *run;
+ int ret;
+
+ if (!pdev) {
+- dev_err(dev, "VPU platform device is invalid\n");
++ pr_err("VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
++ dev = &pdev->dev;
++
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 5757b72f53043..c54b2a23285cf 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1969,14 +1969,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ mmc_blk_urgent_bkops(mq, mqrq);
+ }
+
+-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
++static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+ {
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
++ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+@@ -1988,6 +1988,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+
+ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+ {
++ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+@@ -2003,7 +2004,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+ else
+ blk_mq_complete_request(req);
+
+- mmc_blk_mq_dec_in_flight(mq, req);
++ mmc_blk_mq_dec_in_flight(mq, issue_type);
+ }
+
+ void mmc_blk_mq_recovery(struct mmc_queue *mq)
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 148414d7f0c9d..d20943e433127 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -1408,8 +1408,8 @@ static int bcm2835_probe(struct platform_device *pdev)
+ host->max_clk = clk_get_rate(clk);
+
+ host->irq = platform_get_irq(pdev, 0);
+- if (host->irq <= 0) {
+- ret = -EINVAL;
++ if (host->irq < 0) {
++ ret = host->irq;
+ goto err;
+ }
+
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index 9548d022d52ba..86a8644af4504 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -50,9 +50,16 @@ struct f_sdhost_priv {
+ bool enable_cmd_dat_delay;
+ };
+
++static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++
++ return sdhci_pltfm_priv(pltfm_host);
++}
++
+ static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+ {
+- struct f_sdhost_priv *priv = sdhci_priv(host);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+@@ -85,7 +92,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+
+ static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+ {
+- struct f_sdhost_priv *priv = sdhci_priv(host);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ u32 ctl;
+
+ if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
+@@ -109,31 +116,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
++static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
++ .ops = &sdhci_f_sdh30_ops,
++ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
++ | SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
++ .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
++ | SDHCI_QUIRK2_TUNING_WORK_AROUND,
++};
++
+ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ {
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+- struct resource *res;
+- int irq, ctrl = 0, ret = 0;
++ int ctrl = 0, ret = 0;
+ struct f_sdhost_priv *priv;
++ struct sdhci_pltfm_host *pltfm_host;
+ u32 reg = 0;
+
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return irq;
+-
+- host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
++ host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
++ sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+- priv = sdhci_priv(host);
++ pltfm_host = sdhci_priv(host);
++ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->dev = dev;
+
+- host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+- SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+- host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+- SDHCI_QUIRK2_TUNING_WORK_AROUND;
+-
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+@@ -141,19 +149,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ if (ret)
+ goto err;
+
+- platform_set_drvdata(pdev, host);
+-
+- host->hw_name = "f_sdh30";
+- host->ops = &sdhci_f_sdh30_ops;
+- host->irq = irq;
+-
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(host->ioaddr)) {
+- ret = PTR_ERR(host->ioaddr);
+- goto err;
+- }
+-
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+@@ -208,23 +203,22 @@ err_add_host:
+ err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+ err:
+- sdhci_free_host(host);
++ sdhci_pltfm_free(pdev);
++
+ return ret;
+ }
+
+ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+- struct f_sdhost_priv *priv = sdhci_priv(host);
+-
+- sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+- 0xffffffff);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
++ struct clk *clk_iface = priv->clk_iface;
++ struct clk *clk = priv->clk;
+
+- clk_disable_unprepare(priv->clk_iface);
+- clk_disable_unprepare(priv->clk);
++ sdhci_pltfm_unregister(pdev);
+
+- sdhci_free_host(host);
+- platform_set_drvdata(pdev, NULL);
++ clk_disable_unprepare(clk_iface);
++ clk_disable_unprepare(clk);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index 519718bb246ce..0a67ad57e5c18 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1314,8 +1314,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ return ret;
+
+ host->irq = platform_get_irq(pdev, 0);
+- if (host->irq <= 0) {
+- ret = -EINVAL;
++ if (host->irq < 0) {
++ ret = host->irq;
+ goto error_disable_mmc;
+ }
+
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 639f87ba1606b..c8fd3cb91789a 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1708,8 +1708,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+-
+- mmc_free_host(mmc);
+ return ret;
+ }
+
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 20114e1dde77e..6df78a36bafde 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
+ return NULL;
+ arp = (struct arp_pkt *)skb_network_header(skb);
+
+- /* Don't modify or load balance ARPs that do not originate locally
+- * (e.g.,arrive via a bridge).
++ /* Don't modify or load balance ARPs that do not originate
++ * from the bond itself or a VLAN directly above the bond.
+ */
+- if (!bond_slave_has_mac_rx(bond, arp->mac_src))
++ if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ return NULL;
+
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
+index 282c53ef76d23..1bfede407270d 100644
+--- a/drivers/net/can/vxcan.c
++++ b/drivers/net/can/vxcan.c
+@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
+
+ nla_peer = data[VXCAN_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = rtnl_nla_parse_ifla(peer_tb,
+- nla_data(nla_peer) +
+- sizeof(struct ifinfomsg),
+- nla_len(nla_peer) -
+- sizeof(struct ifinfomsg),
+- NULL);
++ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 393ee145ae066..ca705a0e0961c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2143,6 +2143,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
++ /* If the switch has just been reset and not yet completed
++ * loading EEPROM, the reset may interrupt the I2C transaction
++ * mid-byte, causing the first EEPROM read after the reset
++ * from the wrong location resulting in the switch booting
++ * to wrong mode and inoperable.
++ */
++ mv88e6xxx_g1_wait_eeprom_done(chip);
++
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 89a63fdbe0e39..1148370e2432d 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -1447,7 +1447,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
+ int err;
+
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+- if (!phy_dev || IS_ERR(phy_dev)) {
++ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 53495d39cc9c5..2fbec2acb606d 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -565,7 +565,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
+ };
+
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+- if (!phydev || IS_ERR(phydev)) {
++ if (IS_ERR(phydev)) {
+ dev_err(kdev, "failed to register fixed PHY device\n");
+ return -ENODEV;
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index a20d9147d5f22..fde949a73cb57 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
+ unsigned long offset;
+
+ for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
+- asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
++ asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
+ }
+
+ /* replenish the buffers for a pool. note that we don't need to
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index e4d8d20baf3b9..37a29b5fc2afd 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -210,11 +210,11 @@ read_nvm_exit:
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+- * @words: number of words to write
+- * @data: buffer with words to write to the Shadow RAM
++ * @words: number of words to read
++ * @data: buffer with words to read to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+@@ -234,18 +234,18 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
++ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+- /* We can write only up to 4KB (one sector), in one AQ write */
++ /* We can read only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write fail error: tried to write %d words, limit is %d.\n",
++ "NVM read fail error: tried to read %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+- /* A single write cannot spread over two sectors */
++ /* A single read cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
+index c39e921757ba9..3c501c67bdbb6 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
+@@ -1245,18 +1245,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ return;
+ }
+
+- spin_lock_init(&adapter->tmreg_lock);
+- INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
+-
+- if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+- INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
+- igb_ptp_overflow_check);
+-
+- adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+- adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+-
+- igb_ptp_reset(adapter);
+-
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+@@ -1266,6 +1254,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
+ dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+ adapter->netdev->name);
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
++
++ spin_lock_init(&adapter->tmreg_lock);
++ INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
++
++ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
++ INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
++ igb_ptp_overflow_check);
++
++ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
++ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
++
++ igb_ptp_reset(adapter);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 4a7609fd6dd07..5bc54ba68c831 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2430,9 +2430,10 @@ rx_frscfg:
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+- nix_find_link_frs(rvu, req, pcifunc);
+
+ linkcfg:
++ nix_find_link_frs(rvu, req, pcifunc);
++
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index 5fbabae2909ee..5fea2e4a93101 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -735,7 +735,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
+
+ write_pnet(&port->pnet, newnet);
+
+- ipvlan_migrate_l3s_hook(oldnet, newnet);
++ if (port->mode == IPVLAN_MODE_L3S)
++ ipvlan_migrate_l3s_hook(oldnet, newnet);
+ break;
+ }
+ case NETDEV_UNREGISTER:
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index f729f55f6a174..25fa3ef5b804f 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -317,6 +317,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
+ return sa;
+ }
+
++static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
++{
++ struct macsec_rx_sa *sa = NULL;
++ int an;
++
++ for (an = 0; an < MACSEC_NUM_AN; an++) {
++ sa = macsec_rxsa_get(rx_sc->sa[an]);
++ if (sa)
++ break;
++ }
++ return sa;
++}
++
+ static void free_rx_sc_rcu(struct rcu_head *head)
+ {
+ struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
+@@ -561,18 +574,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
+ skb->protocol = eth_hdr(skb)->h_proto;
+ }
+
++static unsigned int macsec_msdu_len(struct sk_buff *skb)
++{
++ struct macsec_dev *macsec = macsec_priv(skb->dev);
++ struct macsec_secy *secy = &macsec->secy;
++ bool sci_present = macsec_skb_cb(skb)->has_sci;
++
++ return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
++}
++
+ static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
+ struct macsec_tx_sa *tx_sa)
+ {
++ unsigned int msdu_len = macsec_msdu_len(skb);
+ struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
+
+ u64_stats_update_begin(&txsc_stats->syncp);
+ if (tx_sc->encrypt) {
+- txsc_stats->stats.OutOctetsEncrypted += skb->len;
++ txsc_stats->stats.OutOctetsEncrypted += msdu_len;
+ txsc_stats->stats.OutPktsEncrypted++;
+ this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
+ } else {
+- txsc_stats->stats.OutOctetsProtected += skb->len;
++ txsc_stats->stats.OutOctetsProtected += msdu_len;
+ txsc_stats->stats.OutPktsProtected++;
+ this_cpu_inc(tx_sa->stats->OutPktsProtected);
+ }
+@@ -602,9 +625,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
+ aead_request_free(macsec_skb_cb(skb)->req);
+
+ rcu_read_lock_bh();
+- macsec_encrypt_finish(skb, dev);
+ macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
+- len = skb->len;
++ /* packet is encrypted/protected so tx_bytes must be calculated */
++ len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
++ macsec_encrypt_finish(skb, dev);
+ ret = dev_queue_xmit(skb);
+ count_tx(dev, ret, len);
+ rcu_read_unlock_bh();
+@@ -760,6 +784,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+
+ macsec_skb_cb(skb)->req = req;
+ macsec_skb_cb(skb)->tx_sa = tx_sa;
++ macsec_skb_cb(skb)->has_sci = sci_present;
+ aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
+
+ dev_hold(skb->dev);
+@@ -800,15 +825,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsLate++;
+ u64_stats_update_end(&rxsc_stats->syncp);
++ DEV_STATS_INC(secy->netdev, rx_dropped);
+ return false;
+ }
+
+ if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
++ unsigned int msdu_len = macsec_msdu_len(skb);
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ if (hdr->tci_an & MACSEC_TCI_E)
+- rxsc_stats->stats.InOctetsDecrypted += skb->len;
++ rxsc_stats->stats.InOctetsDecrypted += msdu_len;
+ else
+- rxsc_stats->stats.InOctetsValidated += skb->len;
++ rxsc_stats->stats.InOctetsValidated += msdu_len;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ }
+
+@@ -821,6 +848,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotValid++;
+ u64_stats_update_end(&rxsc_stats->syncp);
++ this_cpu_inc(rx_sa->stats->InPktsNotValid);
++ DEV_STATS_INC(secy->netdev, rx_errors);
+ return false;
+ }
+
+@@ -906,9 +935,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
+
+ macsec_finalize_skb(skb, macsec->secy.icv_len,
+ macsec_extra_len(macsec_skb_cb(skb)->has_sci));
++ len = skb->len;
+ macsec_reset_skb(skb, macsec->secy.netdev);
+
+- len = skb->len;
+ if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
+ count_rx(dev, len);
+
+@@ -1050,6 +1079,7 @@ static void handle_not_macsec(struct sk_buff *skb)
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsNoTag++;
+ u64_stats_update_end(&secy_stats->syncp);
++ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ continue;
+ }
+
+@@ -1161,6 +1191,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsBadTag++;
+ u64_stats_update_end(&secy_stats->syncp);
++ DEV_STATS_INC(secy->netdev, rx_errors);
+ goto drop_nosa;
+ }
+
+@@ -1171,11 +1202,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ /* If validateFrames is Strict or the C bit in the
+ * SecTAG is set, discard
+ */
++ struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotUsingSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
++ DEV_STATS_INC(secy->netdev, rx_errors);
++ if (active_rx_sa)
++ this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
+ goto drop_nosa;
+ }
+
+@@ -1185,6 +1220,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsUnusedSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
++ if (active_rx_sa)
++ this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
+ goto deliver;
+ }
+
+@@ -1202,6 +1239,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsLate++;
+ u64_stats_update_end(&rxsc_stats->syncp);
++ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ goto drop;
+ }
+ }
+@@ -1230,6 +1268,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ deliver:
+ macsec_finalize_skb(skb, secy->icv_len,
+ macsec_extra_len(macsec_skb_cb(skb)->has_sci));
++ len = skb->len;
+ macsec_reset_skb(skb, secy->netdev);
+
+ if (rx_sa)
+@@ -1237,12 +1276,11 @@ deliver:
+ macsec_rxsc_put(rx_sc);
+
+ skb_orphan(skb);
+- len = skb->len;
+ ret = gro_cells_receive(&macsec->gro_cells, skb);
+ if (ret == NET_RX_SUCCESS)
+ count_rx(dev, len);
+ else
+- macsec->secy.netdev->stats.rx_dropped++;
++ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+
+ rcu_read_unlock();
+
+@@ -1279,6 +1317,7 @@ nosci:
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsNoSCI++;
+ u64_stats_update_end(&secy_stats->syncp);
++ DEV_STATS_INC(macsec->secy.netdev, rx_errors);
+ continue;
+ }
+
+@@ -1297,7 +1336,7 @@ nosci:
+ secy_stats->stats.InPktsUnknownSCI++;
+ u64_stats_update_end(&secy_stats->syncp);
+ } else {
+- macsec->secy.netdev->stats.rx_dropped++;
++ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
+ }
+ }
+
+@@ -2731,21 +2770,21 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+
+ if (!secy->operational) {
+ kfree_skb(skb);
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
++ len = skb->len;
+ skb = macsec_encrypt(skb, dev);
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) != -EINPROGRESS)
+- dev->stats.tx_dropped++;
++ DEV_STATS_INC(dev, tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
+
+ macsec_encrypt_finish(skb, dev);
+- len = skb->len;
+ ret = dev_queue_xmit(skb);
+ count_tx(dev, ret, len);
+ return ret;
+@@ -2957,8 +2996,9 @@ static void macsec_get_stats64(struct net_device *dev,
+ s->tx_bytes += tmp.tx_bytes;
+ }
+
+- s->rx_dropped = dev->stats.rx_dropped;
+- s->tx_dropped = dev->stats.tx_dropped;
++ s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
++ s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
++ s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
+ }
+
+ static int macsec_get_iflink(const struct net_device *dev)
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 7be75a611e9e8..0e0bcc304d6c8 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -425,6 +425,17 @@ static int bcm5482_read_status(struct phy_device *phydev)
+ return err;
+ }
+
++static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
++{
++ return -EOPNOTSUPP;
++}
++
++static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
++ u16 val)
++{
++ return -EOPNOTSUPP;
++}
++
+ static int bcm5481_config_aneg(struct phy_device *phydev)
+ {
+ struct device_node *np = phydev->mdio.dev.of_node;
+@@ -696,6 +707,8 @@ static struct phy_driver broadcom_drivers[] = {
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54810",
+ /* PHY_GBIT_FEATURES */
++ .read_mmd = bcm54810_read_mmd,
++ .write_mmd = bcm54810_write_mmd,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .ack_interrupt = bcm_phy_ack_intr,
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 5c72e9ac4804d..4dc98832bbba6 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2194,7 +2194,9 @@ static void team_setup(struct net_device *dev)
+
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_CTAG_RX |
+- NETIF_F_HW_VLAN_CTAG_FILTER;
++ NETIF_F_HW_VLAN_CTAG_FILTER |
++ NETIF_F_HW_VLAN_STAG_RX |
++ NETIF_F_HW_VLAN_STAG_FILTER;
+
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ dev->features |= dev->hw_features;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 683425e3a353c..a6445bba4f942 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1255,10 +1255,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+
+ nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
+- err = rtnl_nla_parse_ifla(peer_tb,
+- nla_data(nla_peer) + sizeof(struct ifinfomsg),
+- nla_len(nla_peer) - sizeof(struct ifinfomsg),
+- NULL);
++ err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6e520720beb59..f6a6678f43b9a 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3265,8 +3265,6 @@ static int virtnet_probe(struct virtio_device *vdev)
+ }
+ }
+
+- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+-
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+@@ -3279,6 +3277,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+
+ virtio_device_ready(vdev);
+
++ _virtnet_set_queues(vi, vi->curr_queue_pairs);
++
+ rtnl_unlock();
+
+ err = virtnet_cpu_notif_add(vi);
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 98be06ac2af24..f304bdefa8f5f 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -510,12 +510,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+- __pci_bus_size_bridges(dev->subordinate,
+- &add_list);
++ if (pci_is_root_bus(bus))
++ __pci_bus_size_bridges(dev->subordinate, &add_list);
+ }
+ }
+ }
+- __pci_bus_assign_resources(bus, &add_list, NULL);
++ if (pci_is_root_bus(bus))
++ __pci_bus_assign_resources(bus, &add_list, NULL);
++ else
++ pci_assign_unassigned_bridge_resources(bus->self);
+ }
+
+ acpiphp_sanitize_bus(bus);
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index 3a512513cb32f..6b311d6f8bf02 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
+ q = p->next;
+ kfree(p);
+ }
++
++ kfree(data);
+ }
+
+
+diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
+index eda88cdf870df..8c3174d007507 100644
+--- a/drivers/pinctrl/pinctrl-rza2.c
++++ b/drivers/pinctrl/pinctrl-rza2.c
+@@ -14,6 +14,7 @@
+ #include <linux/gpio/driver.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of_device.h>
+ #include <linux/pinctrl/pinmux.h>
+
+@@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_gpio_range gpio_range;
+ int npins;
++ struct mutex mutex; /* serialize adding groups and functions */
+ };
+
+ #define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
+@@ -359,10 +361,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
+ psel_val[i] = MUX_FUNC(value);
+ }
+
++ mutex_lock(&priv->mutex);
++
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
+- if (gsel < 0)
+- return gsel;
++ if (gsel < 0) {
++ ret = gsel;
++ goto unlock;
++ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+@@ -391,6 +397,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
+ (*map)->data.mux.function = np->name;
+ *num_maps = 1;
+
++ mutex_unlock(&priv->mutex);
++
+ return 0;
+
+ remove_function:
+@@ -399,6 +407,9 @@ remove_function:
+ remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+
++unlock:
++ mutex_unlock(&priv->mutex);
++
+ dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
+
+ return ret;
+@@ -476,6 +487,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
++ mutex_init(&priv->mutex);
++
+ platform_set_drvdata(pdev, priv);
+
+ priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index 711252e52d8e1..95a86e0dfd77a 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -209,54 +209,6 @@ raid_attr_ro_state(level);
+ raid_attr_ro_fn(resync);
+ raid_attr_ro_state_fn(state);
+
+-static void raid_component_release(struct device *dev)
+-{
+- struct raid_component *rc =
+- container_of(dev, struct raid_component, dev);
+- dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
+- put_device(rc->dev.parent);
+- kfree(rc);
+-}
+-
+-int raid_component_add(struct raid_template *r,struct device *raid_dev,
+- struct device *component_dev)
+-{
+- struct device *cdev =
+- attribute_container_find_class_device(&r->raid_attrs.ac,
+- raid_dev);
+- struct raid_component *rc;
+- struct raid_data *rd = dev_get_drvdata(cdev);
+- int err;
+-
+- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+- if (!rc)
+- return -ENOMEM;
+-
+- INIT_LIST_HEAD(&rc->node);
+- device_initialize(&rc->dev);
+- rc->dev.release = raid_component_release;
+- rc->dev.parent = get_device(component_dev);
+- rc->num = rd->component_count++;
+-
+- dev_set_name(&rc->dev, "component-%d", rc->num);
+- list_add_tail(&rc->node, &rd->component_list);
+- rc->dev.class = &raid_class.class;
+- err = device_add(&rc->dev);
+- if (err)
+- goto err_out;
+-
+- return 0;
+-
+-err_out:
+- put_device(&rc->dev);
+- list_del(&rc->node);
+- rd->component_count--;
+- put_device(component_dev);
+- kfree(rc);
+- return err;
+-}
+-EXPORT_SYMBOL(raid_component_add);
+-
+ struct raid_template *
+ raid_class_attach(struct raid_function_template *ft)
+ {
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index c445853c623e2..e362453e8d262 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -317,12 +317,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ "Snic Tgt: device_add, with err = %d\n",
+ ret);
+
+- put_device(&tgt->dev);
+ put_device(&snic->shost->shost_gendev);
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+ list_del(&tgt->list);
+ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+- kfree(tgt);
++ put_device(&tgt->dev);
+ tgt = NULL;
+
+ return tgt;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index f49f3b017206c..4770513944d43 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -3135,6 +3135,7 @@ void serial8250_init_port(struct uart_8250_port *up)
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
++ port->pm = NULL;
+ port->ops = &serial8250_pops;
+
+ up->cur_iotype = 0xFF;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 88c8357969220..9230d96ed3cd8 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1023,8 +1023,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+
+ if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+- /* Read DR to clear the error flags */
+- lpuart32_read(&sport->port, UARTDATA);
++ /* Clear the error flags */
++ lpuart32_write(&sport->port, sr, UARTSTAT);
+
+ if (sr & UARTSTAT_PE)
+ sport->port.icount.parity++;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 85561b3194a16..0fe545815c5ce 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ CI_HDRC_PMQOS,
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
++ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
+ { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
++ { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 2d7cfa8825aa8..8c3ab9bfbb9e6 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -193,55 +193,58 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
+ /* Only usable in contexts where the role can not change. */
+ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+ {
+- struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
++ struct dwc3 *dwc;
++
++ /*
++ * FIXME: Fix this layering violation.
++ */
++ dwc = platform_get_drvdata(qcom->dwc3);
++
++ /* Core driver may not have probed yet. */
++ if (!dwc)
++ return false;
+
+ return dwc->xhci;
+ }
+
++static void dwc3_qcom_enable_wakeup_irq(int irq)
++{
++ if (!irq)
++ return;
++
++ enable_irq(irq);
++ enable_irq_wake(irq);
++}
++
++static void dwc3_qcom_disable_wakeup_irq(int irq)
++{
++ if (!irq)
++ return;
++
++ disable_irq_wake(irq);
++ disable_irq_nosync(irq);
++}
++
+ static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
+ {
+- if (qcom->hs_phy_irq) {
+- disable_irq_wake(qcom->hs_phy_irq);
+- disable_irq_nosync(qcom->hs_phy_irq);
+- }
++ dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
+
+- if (qcom->dp_hs_phy_irq) {
+- disable_irq_wake(qcom->dp_hs_phy_irq);
+- disable_irq_nosync(qcom->dp_hs_phy_irq);
+- }
++ dwc3_qcom_disable_wakeup_irq(qcom->dp_hs_phy_irq);
+
+- if (qcom->dm_hs_phy_irq) {
+- disable_irq_wake(qcom->dm_hs_phy_irq);
+- disable_irq_nosync(qcom->dm_hs_phy_irq);
+- }
++ dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
+
+- if (qcom->ss_phy_irq) {
+- disable_irq_wake(qcom->ss_phy_irq);
+- disable_irq_nosync(qcom->ss_phy_irq);
+- }
++ dwc3_qcom_disable_wakeup_irq(qcom->ss_phy_irq);
+ }
+
+ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
+ {
+- if (qcom->hs_phy_irq) {
+- enable_irq(qcom->hs_phy_irq);
+- enable_irq_wake(qcom->hs_phy_irq);
+- }
++ dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq);
+
+- if (qcom->dp_hs_phy_irq) {
+- enable_irq(qcom->dp_hs_phy_irq);
+- enable_irq_wake(qcom->dp_hs_phy_irq);
+- }
++ dwc3_qcom_enable_wakeup_irq(qcom->dp_hs_phy_irq);
+
+- if (qcom->dm_hs_phy_irq) {
+- enable_irq(qcom->dm_hs_phy_irq);
+- enable_irq_wake(qcom->dm_hs_phy_irq);
+- }
++ dwc3_qcom_enable_wakeup_irq(qcom->dm_hs_phy_irq);
+
+- if (qcom->ss_phy_irq) {
+- enable_irq(qcom->ss_phy_irq);
+- enable_irq_wake(qcom->ss_phy_irq);
+- }
++ dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq);
+ }
+
+ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
+index a4d05b1b17d7d..665ef7a0a2495 100644
+--- a/drivers/video/fbdev/core/sysimgblt.c
++++ b/drivers/video/fbdev/core/sysimgblt.c
+@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ {
+ u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
+ u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
+- u32 bit_mask, end_mask, eorx, shift;
+- const char *s = image->data, *src;
++ u32 bit_mask, eorx, shift;
++ const u8 *s = image->data, *src;
+ u32 *dst;
+- const u32 *tab = NULL;
++ const u32 *tab;
++ size_t tablen;
++ u32 colortab[16];
+ int i, j, k;
+
+ switch (bpp) {
+ case 8:
+ tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
++ tablen = 16;
+ break;
+ case 16:
+ tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
++ tablen = 4;
+ break;
+ case 32:
+- default:
+ tab = cfb_tab32;
++ tablen = 2;
+ break;
++ default:
++ return;
+ }
+
+ for (i = ppw-1; i--; ) {
+@@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
+ eorx = fgx ^ bgx;
+ k = image->width/ppw;
+
++ for (i = 0; i < tablen; ++i)
++ colortab[i] = (tab[i] & eorx) ^ bgx;
++
+ for (i = image->height; i--; ) {
+ dst = dst1;
+ shift = 8;
+ src = s;
+
+- for (j = k; j--; ) {
++ /*
++ * Manually unroll the per-line copying loop for better
++ * performance. This works until we processed the last
++ * completely filled source byte (inclusive).
++ */
++ switch (ppw) {
++ case 4: /* 8 bpp */
++ for (j = k; j >= 2; j -= 2, ++src) {
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 2: /* 16 bpp */
++ for (j = k; j >= 4; j -= 4, ++src) {
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ case 1: /* 32 bpp */
++ for (j = k; j >= 8; j -= 8, ++src) {
++ *dst++ = colortab[(*src >> 7) & bit_mask];
++ *dst++ = colortab[(*src >> 6) & bit_mask];
++ *dst++ = colortab[(*src >> 5) & bit_mask];
++ *dst++ = colortab[(*src >> 4) & bit_mask];
++ *dst++ = colortab[(*src >> 3) & bit_mask];
++ *dst++ = colortab[(*src >> 2) & bit_mask];
++ *dst++ = colortab[(*src >> 1) & bit_mask];
++ *dst++ = colortab[(*src >> 0) & bit_mask];
++ }
++ break;
++ }
++
++ /*
++ * For image widths that are not a multiple of 8, there
++ * are trailing pixels left on the current line. Print
++ * them as well.
++ */
++ for (; j--; ) {
+ shift -= ppw;
+- end_mask = tab[(*src >> shift) & bit_mask];
+- *dst++ = (end_mask & eorx) ^ bgx;
++ *dst++ = colortab[(*src >> shift) & bit_mask];
+ if (!shift) {
+ shift = 8;
+- src++;
++ ++src;
+ }
+ }
++
+ dst1 += p->fix.line_length;
+ s += spitch;
+ }
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index 17174cd7a5bba..b02b0bc106132 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -510,7 +510,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ ret = -ENOENT;
+ goto failed;
+ }
+- clk_prepare_enable(ctrl->clk);
++ ret = clk_prepare_enable(ctrl->clk);
++ if (ret)
++ goto failed;
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index e781e5e9215f0..aee8b5ce8b63c 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -542,11 +542,9 @@ static void virtio_mmio_release_dev(struct device *_d)
+ {
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+- struct virtio_mmio_device *vm_dev =
+- container_of(vdev, struct virtio_mmio_device, vdev);
+- struct platform_device *pdev = vm_dev->pdev;
++ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+
+- devm_kfree(&pdev->dev, vm_dev);
++ kfree(vm_dev);
+ }
+
+ /* Platform device */
+@@ -554,19 +552,10 @@ static void virtio_mmio_release_dev(struct device *_d)
+ static int virtio_mmio_probe(struct platform_device *pdev)
+ {
+ struct virtio_mmio_device *vm_dev;
+- struct resource *mem;
+ unsigned long magic;
+ int rc;
+
+- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!mem)
+- return -EINVAL;
+-
+- if (!devm_request_mem_region(&pdev->dev, mem->start,
+- resource_size(mem), pdev->name))
+- return -EBUSY;
+-
+- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+@@ -577,9 +566,9 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ INIT_LIST_HEAD(&vm_dev->virtqueues);
+ spin_lock_init(&vm_dev->lock);
+
+- vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+- if (vm_dev->base == NULL)
+- return -EFAULT;
++ vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(vm_dev->base))
++ return PTR_ERR(vm_dev->base);
+
+ /* Check magic value */
+ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index c5944c61317f3..0d4afeacb237b 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4558,8 +4558,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+ }
+ }
+
+- BUG_ON(fs_info->balance_ctl ||
+- test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
++ ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ atomic_dec(&fs_info->balance_cancel_req);
+ mutex_unlock(&fs_info->balance_mutex);
+ return 0;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 86924831fd4ba..a0b99c5e07217 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -4510,9 +4510,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+
+ io_error:
+ kunmap(page);
+- unlock_page(page);
+
+ read_complete:
++ unlock_page(page);
+ return rc;
+ }
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index b6242071583e0..86d645d02d55c 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
+ void dlm_scan_timeout(struct dlm_ls *ls)
+ {
+ struct dlm_rsb *r;
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ int do_cancel, do_warn;
+ s64 wait_us;
+
+@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
+ do_cancel = 0;
+ do_warn = 0;
+ mutex_lock(&ls->ls_timeout_mutex);
+- list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
++ list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
+
+ wait_us = ktime_to_us(ktime_sub(ktime_get(),
+- lkb->lkb_timestamp));
++ iter->lkb_timestamp));
+
+- if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
+- wait_us >= (lkb->lkb_timeout_cs * 10000))
++ if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
++ wait_us >= (iter->lkb_timeout_cs * 10000))
+ do_cancel = 1;
+
+- if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
++ if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
+ wait_us >= dlm_config.ci_timewarn_cs * 10000)
+ do_warn = 1;
+
+ if (!do_cancel && !do_warn)
+ continue;
+- hold_lkb(lkb);
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ mutex_unlock(&ls->ls_timeout_mutex);
+
+- if (!do_cancel && !do_warn)
++ if (!lkb)
+ break;
+
+ r = lkb->lkb_resource;
+@@ -5241,21 +5242,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+
+ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
+ {
+- struct dlm_lkb *lkb;
+- int found = 0;
++ struct dlm_lkb *lkb = NULL, *iter;
+
+ mutex_lock(&ls->ls_waiters_mutex);
+- list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
+- if (lkb->lkb_flags & DLM_IFL_RESEND) {
+- hold_lkb(lkb);
+- found = 1;
++ list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
++ if (iter->lkb_flags & DLM_IFL_RESEND) {
++ hold_lkb(iter);
++ lkb = iter;
+ break;
+ }
+ }
+ mutex_unlock(&ls->ls_waiters_mutex);
+
+- if (!found)
+- lkb = NULL;
+ return lkb;
+ }
+
+@@ -5914,37 +5912,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
+ int mode, uint32_t flags, void *name, unsigned int namelen,
+ unsigned long timeout_cs, uint32_t *lkid)
+ {
+- struct dlm_lkb *lkb;
++ struct dlm_lkb *lkb = NULL, *iter;
+ struct dlm_user_args *ua;
+ int found_other_mode = 0;
+- int found = 0;
+ int rv = 0;
+
+ mutex_lock(&ls->ls_orphans_mutex);
+- list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
+- if (lkb->lkb_resource->res_length != namelen)
++ list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
++ if (iter->lkb_resource->res_length != namelen)
+ continue;
+- if (memcmp(lkb->lkb_resource->res_name, name, namelen))
++ if (memcmp(iter->lkb_resource->res_name, name, namelen))
+ continue;
+- if (lkb->lkb_grmode != mode) {
++ if (iter->lkb_grmode != mode) {
+ found_other_mode = 1;
+ continue;
+ }
+
+- found = 1;
+- list_del_init(&lkb->lkb_ownqueue);
+- lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
+- *lkid = lkb->lkb_id;
++ lkb = iter;
++ list_del_init(&iter->lkb_ownqueue);
++ iter->lkb_flags &= ~DLM_IFL_ORPHAN;
++ *lkid = iter->lkb_id;
+ break;
+ }
+ mutex_unlock(&ls->ls_orphans_mutex);
+
+- if (!found && found_other_mode) {
++ if (!lkb && found_other_mode) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+- if (!found) {
++ if (!lkb) {
+ rv = -ENOENT;
+ goto out;
+ }
+diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
+index f3482e936cc25..28735e8c5e206 100644
+--- a/fs/dlm/plock.c
++++ b/fs/dlm/plock.c
+@@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
+ abandoned waiter. So, we have to insert the unlock-close when the
+ lock call is interrupted. */
+
+-static void do_unlock_close(struct dlm_ls *ls, u64 number,
+- struct file *file, struct file_lock *fl)
++static void do_unlock_close(const struct dlm_plock_info *info)
+ {
+ struct plock_op *op;
+
+@@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
+ return;
+
+ op->info.optype = DLM_PLOCK_OP_UNLOCK;
+- op->info.pid = fl->fl_pid;
+- op->info.fsid = ls->ls_global_id;
+- op->info.number = number;
++ op->info.pid = info->pid;
++ op->info.fsid = info->fsid;
++ op->info.number = info->number;
+ op->info.start = 0;
+ op->info.end = OFFSET_MAX;
+- if (fl->fl_lmops && fl->fl_lmops->lm_grant)
+- op->info.owner = (__u64) fl->fl_pid;
+- else
+- op->info.owner = (__u64)(long) fl->fl_owner;
++ op->info.owner = info->owner;
+
+ op->info.flags |= DLM_PLOCK_FL_CLOSE;
+ send_op(op);
+@@ -161,13 +157,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+
+ rv = wait_event_killable(recv_wq, (op->done != 0));
+ if (rv == -ERESTARTSYS) {
+- log_debug(ls, "%s: wait killed %llx", __func__,
+- (unsigned long long)number);
+ spin_lock(&ops_lock);
+ list_del(&op->list);
+ spin_unlock(&ops_lock);
++ log_debug(ls, "%s: wait interrupted %x %llx pid %d",
++ __func__, ls->ls_global_id,
++ (unsigned long long)number, op->info.pid);
+ dlm_release_plock_op(op);
+- do_unlock_close(ls, number, file, fl);
++ do_unlock_close(&op->info);
+ goto out;
+ }
+
+@@ -408,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ if (op->info.flags & DLM_PLOCK_FL_CLOSE)
+ list_del(&op->list);
+ else
+- list_move(&op->list, &recv_list);
++ list_move_tail(&op->list, &recv_list);
+ memcpy(&info, &op->info, sizeof(info));
+ }
+ spin_unlock(&ops_lock);
+@@ -433,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
+ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ loff_t *ppos)
+ {
++ struct plock_op *op = NULL, *iter;
+ struct dlm_plock_info info;
+- struct plock_op *op;
+- int found = 0, do_callback = 0;
++ int do_callback = 0;
+
+ if (count != sizeof(info))
+ return -EINVAL;
+@@ -446,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
+ if (check_version(&info))
+ return -EINVAL;
+
++ /*
++ * The results for waiting ops (SETLKW) can be returned in any
++ * order, so match all fields to find the op. The results for
++ * non-waiting ops are returned in the order that they were sent
++ * to userspace, so match the result with the first non-waiting op.
++ */
+ spin_lock(&ops_lock);
+- list_for_each_entry(op, &recv_list, list) {
+- if (op->info.fsid == info.fsid &&
+- op->info.number == info.number &&
+- op->info.owner == info.owner) {
+- list_del_init(&op->list);
+- memcpy(&op->info, &info, sizeof(info));
+- if (op->data)
+- do_callback = 1;
+- else
+- op->done = 1;
+- found = 1;
+- break;
++ if (info.wait) {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (iter->info.fsid == info.fsid &&
++ iter->info.number == info.number &&
++ iter->info.owner == info.owner &&
++ iter->info.pid == info.pid &&
++ iter->info.start == info.start &&
++ iter->info.end == info.end &&
++ iter->info.ex == info.ex &&
++ iter->info.wait) {
++ op = iter;
++ break;
++ }
++ }
++ } else {
++ list_for_each_entry(iter, &recv_list, list) {
++ if (!iter->info.wait) {
++ op = iter;
++ break;
++ }
+ }
+ }
++
++ if (op) {
++ /* Sanity check that op and info match. */
++ if (info.wait)
++ WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
++ else
++ WARN_ON(op->info.fsid != info.fsid ||
++ op->info.number != info.number ||
++ op->info.owner != info.owner ||
++ op->info.optype != info.optype);
++
++ list_del_init(&op->list);
++ memcpy(&op->info, &info, sizeof(info));
++ if (op->data)
++ do_callback = 1;
++ else
++ op->done = 1;
++ }
+ spin_unlock(&ops_lock);
+
+- if (found) {
++ if (op) {
+ if (do_callback)
+ dlm_plock_callback(op);
+ else
+ wake_up(&recv_wq);
+ } else
+- log_print("dev_write no op %x %llx", info.fsid,
+- (unsigned long long)info.number);
++ log_print("%s: no op %x %llx", __func__,
++ info.fsid, (unsigned long long)info.number);
+ return count;
+ }
+
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index 8928e99dfd47d..df18f38a02734 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
+
+ static void recover_lvb(struct dlm_rsb *r)
+ {
+- struct dlm_lkb *lkb, *high_lkb = NULL;
++ struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
+ uint32_t high_seq = 0;
+ int lock_lvb_exists = 0;
+- int big_lock_exists = 0;
+ int lvblen = r->res_ls->ls_lvblen;
+
+ if (!rsb_flag(r, RSB_NEW_MASTER2) &&
+@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
+ /* we are the new master, so figure out if VALNOTVALID should
+ be set, and set the rsb lvb from the best lkb available. */
+
+- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
+- if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
++ list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
++ if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
+ continue;
+
+ lock_lvb_exists = 1;
+
+- if (lkb->lkb_grmode > DLM_LOCK_CR) {
+- big_lock_exists = 1;
++ if (iter->lkb_grmode > DLM_LOCK_CR) {
++ big_lkb = iter;
+ goto setflag;
+ }
+
+- if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
+- high_lkb = lkb;
+- high_seq = lkb->lkb_lvbseq;
++ if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
++ high_lkb = iter;
++ high_seq = iter->lkb_lvbseq;
+ }
+ }
+
+@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+
+ /* lvb is invalidated if only NL/CR locks remain */
+- if (!big_lock_exists)
++ if (!big_lkb)
+ rsb_set_flag(r, RSB_VALNOTVALID);
+
+ if (!r->res_lvbptr) {
+@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
+ goto out;
+ }
+
+- if (big_lock_exists) {
+- r->res_lvbseq = lkb->lkb_lvbseq;
+- memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
++ if (big_lkb) {
++ r->res_lvbseq = big_lkb->lkb_lvbseq;
++ memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
+ } else if (high_lkb) {
+ r->res_lvbseq = high_lkb->lkb_lvbseq;
+ memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index baf0a70460c03..15e757f763806 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1046,7 +1046,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ {
+ struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
+ struct gfs2_args *args = &sdp->sd_args;
+- int val;
++ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
++
++ spin_lock(&sdp->sd_tune.gt_spin);
++ logd_secs = sdp->sd_tune.gt_logd_secs;
++ quota_quantum = sdp->sd_tune.gt_quota_quantum;
++ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
++ statfs_slow = sdp->sd_tune.gt_statfs_slow;
++ spin_unlock(&sdp->sd_tune.gt_spin);
+
+ if (is_ancestor(root, sdp->sd_master_dir))
+ seq_puts(s, ",meta");
+@@ -1101,17 +1108,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ }
+ if (args->ar_discard)
+ seq_puts(s, ",discard");
+- val = sdp->sd_tune.gt_logd_secs;
+- if (val != 30)
+- seq_printf(s, ",commit=%d", val);
+- val = sdp->sd_tune.gt_statfs_quantum;
+- if (val != 30)
+- seq_printf(s, ",statfs_quantum=%d", val);
+- else if (sdp->sd_tune.gt_statfs_slow)
++ if (logd_secs != 30)
++ seq_printf(s, ",commit=%d", logd_secs);
++ if (statfs_quantum != 30)
++ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
++ else if (statfs_slow)
+ seq_puts(s, ",statfs_quantum=0");
+- val = sdp->sd_tune.gt_quota_quantum;
+- if (val != 60)
+- seq_printf(s, ",quota_quantum=%d", val);
++ if (quota_quantum != 60)
++ seq_printf(s, ",quota_quantum=%d", quota_quantum);
+ if (args->ar_statfs_percent)
+ seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
+ if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index dac67ee1879be..8e8d53241386f 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -2027,6 +2027,9 @@ dbAllocDmapLev(struct bmap * bmp,
+ if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
+ return -ENOSPC;
+
++ if (leafidx < 0)
++ return -EIO;
++
+ /* determine the block number within the file system corresponding
+ * to the leaf at which free space was found.
+ */
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index c8ce7f1bc5942..6f6a5b9203d3f 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -354,6 +354,11 @@ tid_t txBegin(struct super_block *sb, int flag)
+ jfs_info("txBegin: flag = 0x%x", flag);
+ log = JFS_SBI(sb)->log;
+
++ if (!log) {
++ jfs_error(sb, "read-only filesystem\n");
++ return 0;
++ }
++
+ TXN_LOCK();
+
+ INCREMENT(TxStat.txBegin);
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index 7a55d14cc1af0..f155ad6650bd4 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -798,6 +798,11 @@ static int jfs_link(struct dentry *old_dentry,
+ if (rc)
+ goto out;
+
++ if (isReadOnly(ip)) {
++ jfs_error(ip->i_sb, "read-only filesystem\n");
++ return -EROFS;
++ }
++
+ tid = txBegin(ip->i_sb, 0);
+
+ mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 231da9fadf098..c41d149626047 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6887,8 +6887,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
+ goto out_restart;
+ break;
+- case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
++ if (data->arg.new_lock_owner != 0 &&
++ nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
++ lsp->ls_state))
++ goto out_restart;
++ if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
++ goto out_restart;
++ fallthrough;
++ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (data->arg.new_lock_owner != 0) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5922eceb01762..477819700156a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1096,9 +1096,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
+ WARN_ON(!list_empty(&dp->dl_recall_lru));
+
+ if (clp->cl_minorversion) {
++ spin_lock(&clp->cl_lock);
+ dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
+ refcount_inc(&dp->dl_stid.sc_count);
+- spin_lock(&clp->cl_lock);
+ list_add(&dp->dl_recall_lru, &clp->cl_revoked);
+ spin_unlock(&clp->cl_lock);
+ }
+@@ -5513,15 +5513,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ CLOSE_STATEID(stateid))
+ return status;
+- /* Client debugging aid. */
+- if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
+- char addr_str[INET6_ADDRSTRLEN];
+- rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
+- sizeof(addr_str));
+- pr_warn_ratelimited("NFSD: client %s testing state ID "
+- "with incorrect client ID\n", addr_str);
+- return status;
+- }
+ spin_lock(&cl->cl_lock);
+ s = find_stateid_locked(cl, stateid);
+ if (!s)
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index 28348c44ea5b2..8d81e88f1d1ef 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -27,6 +27,7 @@ struct ovl_sb {
+ };
+
+ struct ovl_layer {
++ /* ovl_free_fs() relies on @mnt being the first member! */
+ struct vfsmount *mnt;
+ /* Trap in ovl inode cache */
+ struct inode *trap;
+@@ -37,6 +38,14 @@ struct ovl_layer {
+ int fsid;
+ };
+
++/*
++ * ovl_free_fs() relies on @mnt being the first member when unmounting
++ * the private mounts created for each layer. Let's check both the
++ * offset and type.
++ */
++static_assert(offsetof(struct ovl_layer, mnt) == 0);
++static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
++
+ struct ovl_path {
+ struct ovl_layer *layer;
+ struct dentry *dentry;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 1d652af48f0b1..3d1a71d2909bb 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -546,7 +546,7 @@ restart:
+ continue;
+ /* Wait for dquot users */
+ if (atomic_read(&dquot->dq_count)) {
+- dqgrab(dquot);
++ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ /*
+ * Once dqput() wakes us up, we know it's time to free
+@@ -2415,7 +2415,8 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+
+ error = add_dquot_ref(sb, type);
+ if (error)
+- dquot_disable(sb, type, flags);
++ dquot_disable(sb, type,
++ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+
+ return error;
+ out_fmt:
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index 622569007b530..2142cbd1dde24 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -247,7 +247,7 @@ static int udf_name_from_CS0(struct super_block *sb,
+ }
+
+ if (translate) {
+- if (str_o_len <= 2 && str_o[0] == '.' &&
++ if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' &&
+ (str_o_len == 1 || str_o[1] == '.'))
+ needsCRC = 1;
+ if (needsCRC) {
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index 8364502f92cfe..8eaf640d46809 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -1052,7 +1052,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
+
+ #define DP_BRANCH_OUI_HEADER_SIZE 0xc
+ #define DP_RECEIVER_CAP_SIZE 0xf
+-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
++#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
+ #define EDP_PSR_RECEIVER_CAP_SIZE 2
+ #define EDP_DISPLAY_CTL_CAP_SIZE 3
+
+diff --git a/include/linux/clk.h b/include/linux/clk.h
+index 87730337e28f8..562859ee24f43 100644
+--- a/include/linux/clk.h
++++ b/include/linux/clk.h
+@@ -172,6 +172,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+ */
+ bool clk_is_match(const struct clk *p, const struct clk *q);
+
++/**
++ * clk_rate_exclusive_get - get exclusivity over the rate control of a
++ * producer
++ * @clk: clock source
++ *
++ * This function allows drivers to get exclusive control over the rate of a
++ * provider. It prevents any other consumer to execute, even indirectly,
++ * opereation which could alter the rate of the provider or cause glitches
++ *
++ * If exlusivity is claimed more than once on clock, even by the same driver,
++ * the rate effectively gets locked as exclusivity can't be preempted.
++ *
++ * Must not be called from within atomic context.
++ *
++ * Returns success (0) or negative errno.
++ */
++int clk_rate_exclusive_get(struct clk *clk);
++
++/**
++ * clk_rate_exclusive_put - release exclusivity over the rate control of a
++ * producer
++ * @clk: clock source
++ *
++ * This function allows drivers to release the exclusivity it previously got
++ * from clk_rate_exclusive_get()
++ *
++ * The caller must balance the number of clk_rate_exclusive_get() and
++ * clk_rate_exclusive_put() calls.
++ *
++ * Must not be called from within atomic context.
++ */
++void clk_rate_exclusive_put(struct clk *clk);
++
+ #else
+
+ static inline int clk_notifier_register(struct clk *clk,
+@@ -218,6 +251,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+ return p == q;
+ }
+
++static inline int clk_rate_exclusive_get(struct clk *clk)
++{
++ return 0;
++}
++
++static inline void clk_rate_exclusive_put(struct clk *clk) {}
++
+ #endif
+
+ /**
+@@ -530,38 +570,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
+ */
+ struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
+-/**
+- * clk_rate_exclusive_get - get exclusivity over the rate control of a
+- * producer
+- * @clk: clock source
+- *
+- * This function allows drivers to get exclusive control over the rate of a
+- * provider. It prevents any other consumer to execute, even indirectly,
+- * opereation which could alter the rate of the provider or cause glitches
+- *
+- * If exlusivity is claimed more than once on clock, even by the same driver,
+- * the rate effectively gets locked as exclusivity can't be preempted.
+- *
+- * Must not be called from within atomic context.
+- *
+- * Returns success (0) or negative errno.
+- */
+-int clk_rate_exclusive_get(struct clk *clk);
+-
+-/**
+- * clk_rate_exclusive_put - release exclusivity over the rate control of a
+- * producer
+- * @clk: clock source
+- *
+- * This function allows drivers to release the exclusivity it previously got
+- * from clk_rate_exclusive_get()
+- *
+- * The caller must balance the number of clk_rate_exclusive_get() and
+- * clk_rate_exclusive_put() calls.
+- *
+- * Must not be called from within atomic context.
+- */
+-void clk_rate_exclusive_put(struct clk *clk);
+
+ /**
+ * clk_enable - inform the system when the clock source should be running.
+@@ -918,14 +926,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+
+ static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+
+-
+-static inline int clk_rate_exclusive_get(struct clk *clk)
+-{
+- return 0;
+-}
+-
+-static inline void clk_rate_exclusive_put(struct clk *clk) {}
+-
+ static inline int clk_enable(struct clk *clk)
+ {
+ return 0;
+diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
+index d70a914cba118..1e0dd0541b1ed 100644
+--- a/include/linux/interconnect.h
++++ b/include/linux/interconnect.h
+@@ -29,6 +29,8 @@ struct icc_path *icc_get(struct device *dev, const int src_id,
+ const int dst_id);
+ struct icc_path *of_icc_get(struct device *dev, const char *name);
+ void icc_put(struct icc_path *path);
++int icc_enable(struct icc_path *path);
++int icc_disable(struct icc_path *path);
+ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
+ void icc_set_tag(struct icc_path *path, u32 tag);
+
+@@ -50,6 +52,16 @@ static inline void icc_put(struct icc_path *path)
+ {
+ }
+
++static inline int icc_enable(struct icc_path *path)
++{
++ return 0;
++}
++
++static inline int icc_disable(struct icc_path *path)
++{
++ return 0;
++}
++
+ static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+ {
+ return 0;
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index d35c29d322d83..d14aba548ff4e 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -37,6 +37,8 @@ struct user_struct;
+ struct writeback_control;
+ struct bdi_writeback;
+
++extern int sysctl_page_lock_unfairness;
++
+ void init_mm_internals(void);
+
+ #ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
+index 7145795b4b9da..f615e217e575a 100644
+--- a/include/linux/pm_runtime.h
++++ b/include/linux/pm_runtime.h
+@@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev);
+ extern int __pm_runtime_idle(struct device *dev, int rpmflags);
+ extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
+ extern int __pm_runtime_resume(struct device *dev, int rpmflags);
+-extern int pm_runtime_get_if_in_use(struct device *dev);
++extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
+ extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
+ extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
+ extern int pm_runtime_barrier(struct device *dev);
+@@ -59,6 +59,11 @@ extern void pm_runtime_put_suppliers(struct device *dev);
+ extern void pm_runtime_new_link(struct device *dev);
+ extern void pm_runtime_drop_link(struct device_link *link);
+
++static inline int pm_runtime_get_if_in_use(struct device *dev)
++{
++ return pm_runtime_get_if_active(dev, false);
++}
++
+ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+ {
+ dev->power.ignore_children = enable;
+@@ -142,6 +147,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
+ {
+ return -EINVAL;
+ }
++static inline int pm_runtime_get_if_active(struct device *dev,
++ bool ign_usage_count)
++{
++ return -EINVAL;
++}
+ static inline int __pm_runtime_set_status(struct device *dev,
+ unsigned int status) { return 0; }
+ static inline int pm_runtime_barrier(struct device *dev) { return 0; }
+diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
+index 5cdfcb873a8f0..772d45b2a60a0 100644
+--- a/include/linux/raid_class.h
++++ b/include/linux/raid_class.h
+@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
+
+ struct raid_template *raid_class_attach(struct raid_function_template *);
+ void raid_class_release(struct raid_template *);
+-
+-int __must_check raid_component_add(struct raid_template *, struct device *,
+- struct device *);
+-
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index a960de68ac69e..6047058d67037 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -148,6 +148,10 @@ retry:
+ if (gso_type & SKB_GSO_UDP)
+ nh_off -= thlen;
+
++ /* Kernel has a special handling for GSO_BY_FRAGS. */
++ if (gso_size == GSO_BY_FRAGS)
++ return -EINVAL;
++
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - nh_off > gso_size) {
+ shinfo->gso_size = gso_size;
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 7d04c1b588c73..03bff85e365f4 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -20,6 +20,8 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
+ #define WQ_FLAG_EXCLUSIVE 0x01
+ #define WQ_FLAG_WOKEN 0x02
+ #define WQ_FLAG_BOOKMARK 0x04
++#define WQ_FLAG_CUSTOM 0x08
++#define WQ_FLAG_DONE 0x10
+
+ /*
+ * A single wait-queue entry structure:
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index 0b9c3a287061e..57b48c33f56cf 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -401,7 +401,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ static inline
+ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->out_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+@@ -413,7 +420,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ static inline
+ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->cap_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index a3698f0fb2a6d..9e9ccbade3b54 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -686,37 +686,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
+ }
+
+ /* Caller must hold rcu_read_lock() for read */
+-static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+- const u8 *mac)
++static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
+ {
+ struct list_head *iter;
+ struct slave *tmp;
+
+- bond_for_each_slave_rcu(bond, tmp, iter)
+- if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+- return tmp;
+-
+- return NULL;
+-}
+-
+-/* Caller must hold rcu_read_lock() for read */
+-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+-{
+- struct list_head *iter;
+- struct slave *tmp;
+- struct netdev_hw_addr *ha;
+-
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+-
+- if (netdev_uc_empty(bond->dev))
+- return false;
+-
+- netdev_for_each_uc_addr(ha, bond->dev)
+- if (ether_addr_equal_64bits(mac, ha->addr))
+- return true;
+-
+ return false;
+ }
+
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 4da61c950e931..5c2a73bbfabee 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -166,8 +166,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ int rtnl_delete_link(struct net_device *dev);
+ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
+
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+- struct netlink_ext_ack *exterr);
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++ struct netlink_ext_ack *exterr);
+ struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
+
+ #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ee8630d6abc16..f73ef7087a187 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1161,6 +1161,7 @@ struct proto {
+ /*
+ * Pressure flag: try to collapse.
+ * Technical note: it is used by multiple contexts non atomically.
++ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
+ * All the __sk_mem_schedule() is of this nature: accounting
+ * is strict, actions are advisory and have some latency.
+ */
+@@ -1274,6 +1275,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ return sk->sk_prot->memory_pressure != NULL;
+ }
+
++static inline bool sk_under_global_memory_pressure(const struct sock *sk)
++{
++ return sk->sk_prot->memory_pressure &&
++ !!READ_ONCE(*sk->sk_prot->memory_pressure);
++}
++
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ {
+ if (!sk->sk_prot->memory_pressure)
+@@ -1283,7 +1290,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return !!*sk->sk_prot->memory_pressure;
++ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
+ }
+
+ static inline long
+@@ -1337,7 +1344,7 @@ proto_memory_pressure(struct proto *prot)
+ {
+ if (!prot->memory_pressure)
+ return false;
+- return !!*prot->memory_pressure;
++ return !!READ_ONCE(*prot->memory_pressure);
+ }
+
+
+diff --git a/include/sound/core.h b/include/sound/core.h
+index 8a80121811d94..e4b24dcb4b190 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -119,6 +119,9 @@ struct snd_card {
+ bool registered; /* card_dev is registered? */
+ wait_queue_head_t remove_sleep;
+
++ size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
++ struct mutex memory_mutex; /* protection for the above */
++
+ #ifdef CONFIG_PM
+ unsigned int power_state; /* power state */
+ wait_queue_head_t power_sleep;
+diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
+index 26927a560eabc..3c716214dab1a 100644
+--- a/include/trace/events/rpm.h
++++ b/include/trace/events/rpm.h
+@@ -74,6 +74,12 @@ DEFINE_EVENT(rpm_internal, rpm_idle,
+
+ TP_ARGS(dev, flags)
+ );
++DEFINE_EVENT(rpm_internal, rpm_usage,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++);
+
+ TRACE_EVENT(rpm_return_int,
+ TP_PROTO(struct device *dev, unsigned long ip, int ret),
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index decabf5714c0f..4f85f7ed42fc5 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1563,6 +1563,14 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = percpu_pagelist_fraction_sysctl_handler,
+ .extra1 = SYSCTL_ZERO,
+ },
++ {
++ .procname = "page_lock_unfairness",
++ .data = &sysctl_page_lock_unfairness,
++ .maxlen = sizeof(sysctl_page_lock_unfairness),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = SYSCTL_ZERO,
++ },
+ #ifdef CONFIG_MMU
+ {
+ .procname = "max_map_count",
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 8006592803e1c..ad0ee4de92485 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3499,8 +3499,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
+ * will point to the same string as current_trace->name.
+ */
+ mutex_lock(&trace_types_lock);
+- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
++ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
++ /* Close iter->trace before switching to the new current tracer */
++ if (iter->trace->close)
++ iter->trace->close(iter);
+ *iter->trace = *tr->current_trace;
++ /* Reopen the new current tracer */
++ if (iter->trace->open)
++ iter->trace->open(iter);
++ }
+ mutex_unlock(&trace_types_lock);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index a745b0cee5d32..07557904dab8a 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -228,7 +228,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
+-
++ else
++ iter->private = NULL;
+ }
+
+ static void irqsoff_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index a422cf6a0358b..0b95277396fcd 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -1134,9 +1134,10 @@ probe_mem_read_user(void *dest, void *src, size_t size)
+
+ /* Note that we don't verify it, since the code does not come from user space */
+ static int
+-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
++process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ void *base)
+ {
++ struct pt_regs *regs = rec;
+ unsigned long val;
+
+ retry:
+diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
+index 29348874ebde7..cf14a37dff8c8 100644
+--- a/kernel/trace/trace_probe_tmpl.h
++++ b/kernel/trace/trace_probe_tmpl.h
+@@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf)
+ * If dest is NULL, don't store result and return required dynamic data size.
+ */
+ static int
+-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
++process_fetch_insn(struct fetch_insn *code, void *rec,
+ void *dest, void *base);
+ static nokprobe_inline int fetch_store_strlen(unsigned long addr);
+ static nokprobe_inline int
+@@ -190,7 +190,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
+
+ /* Store the value of each argument */
+ static nokprobe_inline void
+-store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
++store_trace_args(void *data, struct trace_probe *tp, void *rec,
+ int header_size, int maxlen)
+ {
+ struct probe_arg *arg;
+@@ -205,12 +205,14 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
+ /* Point the dynamic data area if needed */
+ if (unlikely(arg->dynamic))
+ *dl = make_data_loc(maxlen, dyndata - base);
+- ret = process_fetch_insn(arg->code, regs, dl, base);
+- if (unlikely(ret < 0 && arg->dynamic)) {
+- *dl = make_data_loc(0, dyndata - base);
+- } else {
+- dyndata += ret;
+- maxlen -= ret;
++ ret = process_fetch_insn(arg->code, rec, dl, base);
++ if (arg->dynamic) {
++ if (unlikely(ret < 0)) {
++ *dl = make_data_loc(0, dyndata - base);
++ } else {
++ dyndata += ret;
++ maxlen -= ret;
++ }
+ }
+ }
+ }
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index 617e297f46dcc..7b2d8f776ae25 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -171,6 +171,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
+ {
+ if (is_graph(iter->tr))
+ graph_trace_open(iter);
++ else
++ iter->private = NULL;
+ }
+
+ static void wakeup_trace_close(struct trace_iterator *iter)
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index efb51a23a14f2..1a566bc675485 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -217,9 +217,10 @@ static unsigned long translate_user_vaddr(unsigned long file_offset)
+
+ /* Note that we don't verify it, since the code does not come from user space */
+ static int
+-process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
++process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ void *base)
+ {
++ struct pt_regs *regs = rec;
+ unsigned long val;
+
+ /* 1st stage: get value from context */
+diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
+index 0d3a686b5ba29..fb8c0c5c2bd27 100644
+--- a/lib/clz_ctz.c
++++ b/lib/clz_ctz.c
+@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
+ }
+ EXPORT_SYMBOL(__clzsi2);
+
+-int __weak __clzdi2(long val);
+-int __weak __ctzdi2(long val);
+-#if BITS_PER_LONG == 32
+-
+-int __weak __clzdi2(long val)
++int __weak __clzdi2(u64 val);
++int __weak __clzdi2(u64 val)
+ {
+- return 32 - fls((int)val);
++ return 64 - fls64(val);
+ }
+ EXPORT_SYMBOL(__clzdi2);
+
+-int __weak __ctzdi2(long val)
++int __weak __ctzdi2(u64 val);
++int __weak __ctzdi2(u64 val)
+ {
+- return __ffs((u32)val);
++ return __ffs64(val);
+ }
+ EXPORT_SYMBOL(__ctzdi2);
+-
+-#elif BITS_PER_LONG == 64
+-
+-int __weak __clzdi2(long val)
+-{
+- return 64 - fls64((u64)val);
+-}
+-EXPORT_SYMBOL(__clzdi2);
+-
+-int __weak __ctzdi2(long val)
+-{
+- return __ffs64((u64)val);
+-}
+-EXPORT_SYMBOL(__ctzdi2);
+-
+-#else
+-#error BITS_PER_LONG not 32 or 64
+-#endif
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 18349781847ca..4121aab98b06e 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1144,7 +1144,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
+ void __rcu **radix_tree_iter_resume(void __rcu **slot,
+ struct radix_tree_iter *iter)
+ {
+- slot++;
+ iter->index = __radix_tree_iter_add(iter, 1);
+ iter->next_index = iter->index;
+ iter->tags = 0;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index adc27af737c64..f1ed0400c37c3 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1044,9 +1044,43 @@ struct wait_page_queue {
+ wait_queue_entry_t wait;
+ };
+
++/*
++ * The page wait code treats the "wait->flags" somewhat unusually, because
++ * we have multiple different kinds of waits, not just he usual "exclusive"
++ * one.
++ *
++ * We have:
++ *
++ * (a) no special bits set:
++ *
++ * We're just waiting for the bit to be released, and when a waker
++ * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
++ * and remove it from the wait queue.
++ *
++ * Simple and straightforward.
++ *
++ * (b) WQ_FLAG_EXCLUSIVE:
++ *
++ * The waiter is waiting to get the lock, and only one waiter should
++ * be woken up to avoid any thundering herd behavior. We'll set the
++ * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
++ *
++ * This is the traditional exclusive wait.
++ *
++ * (b) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
++ *
++ * The waiter is waiting to get the bit, and additionally wants the
++ * lock to be transferred to it for fair lock behavior. If the lock
++ * cannot be taken, we stop walking the wait queue without waking
++ * the waiter.
++ *
++ * This is the "fair lock handoff" case, and in addition to setting
++ * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
++ * that it now has the lock.
++ */
+ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
+ {
+- int ret;
++ unsigned int flags;
+ struct wait_page_key *key = arg;
+ struct wait_page_queue *wait_page
+ = container_of(wait, struct wait_page_queue, wait);
+@@ -1059,35 +1093,44 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
+ return 0;
+
+ /*
+- * If it's an exclusive wait, we get the bit for it, and
+- * stop walking if we can't.
+- *
+- * If it's a non-exclusive wait, then the fact that this
+- * wake function was called means that the bit already
+- * was cleared, and we don't care if somebody then
+- * re-took it.
++ * If it's a lock handoff wait, we get the bit for it, and
++ * stop walking (and do not wake it up) if we can't.
+ */
+- ret = 0;
+- if (wait->flags & WQ_FLAG_EXCLUSIVE) {
+- if (test_and_set_bit(key->bit_nr, &key->page->flags))
++ flags = wait->flags;
++ if (flags & WQ_FLAG_EXCLUSIVE) {
++ if (test_bit(key->bit_nr, &key->page->flags))
+ return -1;
+- ret = 1;
++ if (flags & WQ_FLAG_CUSTOM) {
++ if (test_and_set_bit(key->bit_nr, &key->page->flags))
++ return -1;
++ flags |= WQ_FLAG_DONE;
++ }
+ }
+- wait->flags |= WQ_FLAG_WOKEN;
+
++ /*
++ * We are holding the wait-queue lock, but the waiter that
++ * is waiting for this will be checking the flags without
++ * any locking.
++ *
++ * So update the flags atomically, and wake up the waiter
++ * afterwards to avoid any races. This store-release pairs
++ * with the load-acquire in wait_on_page_bit_common().
++ */
++ smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
+ wake_up_state(wait->private, mode);
+
+ /*
+ * Ok, we have successfully done what we're waiting for,
+ * and we can unconditionally remove the wait entry.
+ *
+- * Note that this has to be the absolute last thing we do,
+- * since after list_del_init(&wait->entry) the wait entry
++ * Note that this pairs with the "finish_wait()" in the
++ * waiter, and has to be the absolute last thing we do.
++ * After this list_del_init(&wait->entry) the wait entry
+ * might be de-allocated and the process might even have
+ * exited.
+ */
+ list_del_init_careful(&wait->entry);
+- return ret;
++ return (flags & WQ_FLAG_EXCLUSIVE) != 0;
+ }
+
+ static void wake_up_page_bit(struct page *page, int bit_nr)
+@@ -1167,8 +1210,8 @@ enum behavior {
+ };
+
+ /*
+- * Attempt to check (or get) the page bit, and mark the
+- * waiter woken if successful.
++ * Attempt to check (or get) the page bit, and mark us done
++ * if successful.
+ */
+ static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
+ struct wait_queue_entry *wait)
+@@ -1179,13 +1222,17 @@ static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
+ } else if (test_bit(bit_nr, &page->flags))
+ return false;
+
+- wait->flags |= WQ_FLAG_WOKEN;
++ wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
+ return true;
+ }
+
++/* How many times do we accept lock stealing from under a waiter? */
++int sysctl_page_lock_unfairness = 5;
++
+ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+ struct page *page, int bit_nr, int state, enum behavior behavior)
+ {
++ int unfairness = sysctl_page_lock_unfairness;
+ struct wait_page_queue wait_page;
+ wait_queue_entry_t *wait = &wait_page.wait;
+ bool thrashing = false;
+@@ -1203,11 +1250,18 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+ }
+
+ init_wait(wait);
+- wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
+ wait->func = wake_page_function;
+ wait_page.page = page;
+ wait_page.bit_nr = bit_nr;
+
++repeat:
++ wait->flags = 0;
++ if (behavior == EXCLUSIVE) {
++ wait->flags = WQ_FLAG_EXCLUSIVE;
++ if (--unfairness < 0)
++ wait->flags |= WQ_FLAG_CUSTOM;
++ }
++
+ /*
+ * Do one last check whether we can get the
+ * page bit synchronously.
+@@ -1230,27 +1284,63 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+
+ /*
+ * From now on, all the logic will be based on
+- * the WQ_FLAG_WOKEN flag, and the and the page
+- * bit testing (and setting) will be - or has
+- * already been - done by the wake function.
++ * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
++ * see whether the page bit testing has already
++ * been done by the wake function.
+ *
+ * We can drop our reference to the page.
+ */
+ if (behavior == DROP)
+ put_page(page);
+
++ /*
++ * Note that until the "finish_wait()", or until
++ * we see the WQ_FLAG_WOKEN flag, we need to
++ * be very careful with the 'wait->flags', because
++ * we may race with a waker that sets them.
++ */
+ for (;;) {
++ unsigned int flags;
++
+ set_current_state(state);
+
+- if (signal_pending_state(state, current))
++ /* Loop until we've been woken or interrupted */
++ flags = smp_load_acquire(&wait->flags);
++ if (!(flags & WQ_FLAG_WOKEN)) {
++ if (signal_pending_state(state, current))
++ break;
++
++ io_schedule();
++ continue;
++ }
++
++ /* If we were non-exclusive, we're done */
++ if (behavior != EXCLUSIVE)
+ break;
+
+- if (wait->flags & WQ_FLAG_WOKEN)
++ /* If the waker got the lock for us, we're done */
++ if (flags & WQ_FLAG_DONE)
+ break;
+
+- io_schedule();
++ /*
++ * Otherwise, if we're getting the lock, we need to
++ * try to get it ourselves.
++ *
++ * And if that fails, we'll have to retry this all.
++ */
++ if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
++ goto repeat;
++
++ wait->flags |= WQ_FLAG_DONE;
++ break;
+ }
+
++ /*
++ * If a signal happened, this 'finish_wait()' may remove the last
++ * waiter from the wait-queues, but the PageWaiters bit will remain
++ * set. That's ok. The next wakeup will take care of it, and trying
++ * to do it here would be difficult and prone to races.
++ */
+ finish_wait(q, wait);
+
+ if (thrashing) {
+@@ -1260,12 +1350,20 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+ }
+
+ /*
+- * A signal could leave PageWaiters set. Clearing it here if
+- * !waitqueue_active would be possible (by open-coding finish_wait),
+- * but still fail to catch it in the case of wait hash collision. We
+- * already can fail to clear wait hash collision cases, so don't
+- * bother with signals either.
++ * NOTE! The wait->flags weren't stable until we've done the
++ * 'finish_wait()', and we could have exited the loop above due
++ * to a signal, and had a wakeup event happen after the signal
++ * test but before the 'finish_wait()'.
++ *
++ * So only after the finish_wait() can we reliably determine
++ * if we got woken up or not, so we can now figure out the final
++ * return value based on that state without races.
++ *
++ * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
++ * waiter, but an exclusive one requires WQ_FLAG_DONE.
+ */
++ if (behavior == EXCLUSIVE)
++ return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
+
+ return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
+ }
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index a39af0eefad31..aae73f94b2c8e 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -501,7 +501,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_elp_packet *elp_packet;
+ struct batadv_hard_iface *primary_if;
+- struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
++ struct ethhdr *ethhdr;
+ bool res;
+ int ret = NET_RX_DROP;
+
+@@ -509,6 +509,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
+ if (!res)
+ goto free_skb;
+
++ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
+index 3165f6ff8ee71..f13a779b86563 100644
+--- a/net/batman-adv/bat_v_ogm.c
++++ b/net/batman-adv/bat_v_ogm.c
+@@ -122,8 +122,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
+ {
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+
+- if (hard_iface->if_status != BATADV_IF_ACTIVE)
++ if (hard_iface->if_status != BATADV_IF_ACTIVE) {
++ kfree_skb(skb);
+ return;
++ }
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+@@ -994,7 +996,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ {
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm2_packet *ogm_packet;
+- struct ethhdr *ethhdr = eth_hdr(skb);
++ struct ethhdr *ethhdr;
+ int ogm_offset;
+ u8 *packet_pos;
+ int ret = NET_RX_DROP;
+@@ -1008,6 +1010,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
+ goto free_skb;
+
++ ethhdr = eth_hdr(skb);
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
+ goto free_skb;
+
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 5f44c94ad707b..073019f2e4519 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -632,7 +632,19 @@ out:
+ */
+ void batadv_update_min_mtu(struct net_device *soft_iface)
+ {
+- soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
++ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
++ int limit_mtu;
++ int mtu;
++
++ mtu = batadv_hardif_min_mtu(soft_iface);
++
++ if (bat_priv->mtu_set_by_user)
++ limit_mtu = bat_priv->mtu_set_by_user;
++ else
++ limit_mtu = ETH_DATA_LEN;
++
++ mtu = min(mtu, limit_mtu);
++ dev_set_mtu(soft_iface, mtu);
+
+ /* Check if the local translate table should be cleaned up to match a
+ * new (and smaller) MTU.
+diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
+index e59c5aa27ee0b..f3e102a1201bc 100644
+--- a/net/batman-adv/netlink.c
++++ b/net/batman-adv/netlink.c
+@@ -496,7 +496,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
+ attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
+
+ atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
++
++ rtnl_lock();
+ batadv_update_min_mtu(bat_priv->soft_iface);
++ rtnl_unlock();
+ }
+
+ if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 504e3cb67bed4..bd06d5b5314e6 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -156,11 +156,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
+
+ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
+ {
++ struct batadv_priv *bat_priv = netdev_priv(dev);
++
+ /* check ranges */
+ if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
++ bat_priv->mtu_set_by_user = new_mtu;
+
+ return 0;
+ }
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 515205d7b650f..a01b0277bdb17 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -780,7 +780,6 @@ check_roaming:
+ if (roamed_back) {
+ batadv_tt_global_free(bat_priv, tt_global,
+ "Roaming canceled");
+- tt_global = NULL;
+ } else {
+ /* The global entry has to be marked as ROAMING and
+ * has to be kept for consistency purpose
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 4d7f1baee7b7d..9fdf1be9b99b6 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -1563,6 +1563,12 @@ struct batadv_priv {
+ /** @soft_iface: net device which holds this struct as private data */
+ struct net_device *soft_iface;
+
++ /**
++ * @mtu_set_by_user: MTU was set once by user
++ * protected by rtnl_lock
++ */
++ int mtu_set_by_user;
++
+ /**
+ * @bat_counters: mesh internal traffic statistic counters (see
+ * batadv_counters)
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index e56863587ea2e..61bf489265505 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5723,9 +5723,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ if (!chan)
+ goto done;
+
++ chan = l2cap_chan_hold_unless_zero(chan);
++ if (!chan)
++ goto done;
++
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ done:
+ mutex_unlock(&conn->chan_lock);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3eaf7c706b0ec..bb1a273840775 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2034,13 +2034,27 @@ out_err:
+ return err;
+ }
+
+-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+- struct netlink_ext_ack *exterr)
++int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
++ struct netlink_ext_ack *exterr)
+ {
+- return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
++ const struct ifinfomsg *ifmp;
++ const struct nlattr *attrs;
++ size_t len;
++
++ ifmp = nla_data(nla_peer);
++ attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
++ len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
++
++ if (ifmp->ifi_index < 0) {
++ NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
++ "ifindex can't be negative");
++ return -EINVAL;
++ }
++
++ return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
+ exterr);
+ }
+-EXPORT_SYMBOL(rtnl_nla_parse_ifla);
++EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
+
+ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+ {
+@@ -3062,9 +3076,12 @@ replay:
+ ifname[0] = '\0';
+
+ ifm = nlmsg_data(nlh);
+- if (ifm->ifi_index > 0)
++ if (ifm->ifi_index > 0) {
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+- else {
++ } else if (ifm->ifi_index < 0) {
++ NL_SET_ERR_MSG(extack, "ifindex can't be negative");
++ return -EINVAL;
++ } else {
+ if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 636427d400d7f..69b4158a29f74 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2631,7 +2631,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+ mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+
+- if (sk_under_memory_pressure(sk) &&
++ if (sk_under_global_memory_pressure(sk) &&
+ (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ sk_leave_memory_pressure(sk);
+ }
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index cd868556452ec..491b148afa8f0 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
+ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+ {
+- __poll_t mask;
+ struct sock *sk = sock->sk;
++ __poll_t mask;
++ u8 shutdown;
++ int state;
+
+ sock_poll_wait(file, sock, wait);
+- if (sk->sk_state == DCCP_LISTEN)
++
++ state = inet_sk_state_load(sk);
++ if (state == DCCP_LISTEN)
+ return inet_csk_listen_poll(sk);
+
+ /* Socket is not locked. We are protected from async events
+@@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ */
+
+ mask = 0;
+- if (sk->sk_err)
++ if (READ_ONCE(sk->sk_err))
+ mask = EPOLLERR;
++ shutdown = READ_ONCE(sk->sk_shutdown);
+
+- if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
++ if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
+ mask |= EPOLLHUP;
+- if (sk->sk_shutdown & RCV_SHUTDOWN)
++ if (shutdown & RCV_SHUTDOWN)
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
+ /* Connected? */
+- if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
++ if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > 0)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+- if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
++ if (!(shutdown & SEND_SHUTDOWN)) {
+ if (sk_stream_is_writeable(sk)) {
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ } else { /* send SIGIO later */
+@@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
+ }
+ return mask;
+ }
+-
+ EXPORT_SYMBOL_GPL(dccp_poll);
+
+ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index bd41354ed8c11..275f2ecf0ba60 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -314,12 +314,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index a0107eb02ae4c..551c4a78f68d4 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -573,7 +573,9 @@ out_reset_timer:
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
++ icsk->icsk_rto = clamp(__tcp_set_rto(tp),
++ tcp_rto_min(sk),
++ TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 8b44d3b53844e..e4cd6909e9bbc 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -558,12 +558,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
+ goto tx_err;
+
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 1a33c46d9c894..ce844919b2eb3 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1852,9 +1852,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+- if ((xfilter->sadb_x_filter_splen >=
++ if ((xfilter->sadb_x_filter_splen >
+ (sizeof(xfrm_address_t) << 3)) ||
+- (xfilter->sadb_x_filter_dplen >=
++ (xfilter->sadb_x_filter_dplen >
+ (sizeof(xfrm_address_t) << 3))) {
+ mutex_unlock(&pfk->dump_lock);
+ return -EINVAL;
+diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
+index 9bd12f7517ed5..6710f6b8764be 100644
+--- a/net/ncsi/ncsi-manage.c
++++ b/net/ncsi/ncsi-manage.c
+@@ -770,9 +770,6 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
+ return -1;
+ }
+
+- /* Set the flag for GMA command which should only be called once */
+- nca->ndp->gma_flag = 1;
+-
+ /* Get Mac address from NCSI device */
+ return nch->handler(nca);
+ }
+diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
+index 7c893c3799202..e1c6bb4ab98fd 100644
+--- a/net/ncsi/ncsi-rsp.c
++++ b/net/ncsi/ncsi-rsp.c
+@@ -627,6 +627,9 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
+ saddr.sa_family = ndev->type;
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN);
++ /* Set the flag for GMA command which should only be called once */
++ ndp->gma_flag = 1;
++
+ ret = ops->ndo_set_mac_address(ndev, &saddr);
+ if (ret < 0)
+ netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+@@ -671,6 +674,9 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
+ if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ return -ENXIO;
+
++ /* Set the flag for GMA command which should only be called once */
++ ndp->gma_flag = 1;
++
+ ret = ops->ndo_set_mac_address(ndev, &saddr);
+ if (ret < 0)
+ netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 07242503d74d3..2bc82dabfe3b8 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1759,6 +1759,7 @@ static int
+ proc_do_sync_threshold(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
++ struct netns_ipvs *ipvs = table->extra2;
+ int *valp = table->data;
+ int val[2];
+ int rc;
+@@ -1768,6 +1769,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ .mode = table->mode,
+ };
+
++ mutex_lock(&ipvs->sync_mutex);
+ memcpy(val, valp, sizeof(val));
+ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write) {
+@@ -1777,6 +1779,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ else
+ memcpy(valp, val, sizeof(val));
+ }
++ mutex_unlock(&ipvs->sync_mutex);
+ return rc;
+ }
+
+@@ -4034,6 +4037,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
+ ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
++ tbl[idx].extra2 = ipvs;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index cec4b16170a0b..21cbaf6dac331 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ };
+@@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
+ /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+ /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+ /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 8aca2fdc0664c..e0c17217817d6 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -161,6 +161,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
++ if (set->flags & NFT_SET_OBJECT)
++ return -EOPNOTSUPP;
++
+ if (set->ops->update == NULL)
+ return -EOPNOTSUPP;
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 6ca0cba8aad16..d07146a2d0bba 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1503,10 +1503,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ return 0;
+ }
+
++static bool req_create_or_replace(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_REPLACE);
++}
++
++static bool req_create_exclusive(struct nlmsghdr *n)
++{
++ return (n->nlmsg_flags & NLM_F_CREATE &&
++ n->nlmsg_flags & NLM_F_EXCL);
++}
++
++static bool req_change(struct nlmsghdr *n)
++{
++ return (!(n->nlmsg_flags & NLM_F_CREATE) &&
++ !(n->nlmsg_flags & NLM_F_REPLACE) &&
++ !(n->nlmsg_flags & NLM_F_EXCL));
++}
++
+ /*
+ * Create/change qdisc.
+ */
+-
+ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1603,27 +1621,35 @@ replay:
+ *
+ * We know, that some child q is already
+ * attached to this parent and have choice:
+- * either to change it or to create/graft new one.
++ * 1) change it or 2) create/graft new one.
++ * If the requested qdisc kind is different
++ * than the existing one, then we choose graft.
++ * If they are the same then this is "change"
++ * operation - just let it fallthrough..
+ *
+ * 1. We are allowed to create/graft only
+- * if CREATE and REPLACE flags are set.
++ * if the request is explicitly stating
++ * "please create if it doesn't exist".
+ *
+- * 2. If EXCL is set, requestor wanted to say,
+- * that qdisc tcm_handle is not expected
++ * 2. If the request is to exclusive create
++ * then the qdisc tcm_handle is not expected
+ * to exist, so that we choose create/graft too.
+ *
+ * 3. The last case is when no flags are set.
++ * This will happen when for example tc
++ * utility issues a "change" command.
+ * Alas, it is sort of hole in API, we
+ * cannot decide what to do unambiguously.
+- * For now we select create/graft, if
+- * user gave KIND, which does not match existing.
++ * For now we select create/graft.
+ */
+- if ((n->nlmsg_flags & NLM_F_CREATE) &&
+- (n->nlmsg_flags & NLM_F_REPLACE) &&
+- ((n->nlmsg_flags & NLM_F_EXCL) ||
+- (tca[TCA_KIND] &&
+- nla_strcmp(tca[TCA_KIND], q->ops->id))))
+- goto create_n_graft;
++ if (tca[TCA_KIND] &&
++ nla_strcmp(tca[TCA_KIND], q->ops->id)) {
++ if (req_create_or_replace(n) ||
++ req_create_exclusive(n))
++ goto create_n_graft;
++ else if (req_change(n))
++ goto create_n_graft2;
++ }
+ }
+ }
+ } else {
+@@ -1657,6 +1683,7 @@ create_n_graft:
+ NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
+ return -ENOENT;
+ }
++create_n_graft2:
+ if (clid == TC_H_INGRESS) {
+ if (dev_ingress_queue(dev)) {
+ q = qdisc_create(dev, dev_ingress_queue(dev), p,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 7cff1a031f761..431b9399a781f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
+
+ static void sctp_enter_memory_pressure(struct sock *sk)
+ {
+- sctp_memory_pressure = 1;
++ WRITE_ONCE(sctp_memory_pressure, 1);
+ }
+
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f966b64d2939a..baf0af49c5bd4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1979,6 +1979,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+
+ if (false) {
+ alloc_skb:
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+@@ -2018,6 +2019,7 @@ alloc_skb:
+ init_scm = false;
+ }
+
++ spin_lock(&other->sk_receive_queue.lock);
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+@@ -2048,14 +2050,11 @@ alloc_skb:
+ refcount_add(size, &sk->sk_wmem_alloc);
+
+ if (newskb) {
+- err = unix_scm_to_skb(&scm, skb, false);
+- if (err)
+- goto err_state_unlock;
+- spin_lock(&other->sk_receive_queue.lock);
++ unix_scm_to_skb(&scm, skb, false);
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
+- spin_unlock(&other->sk_receive_queue.lock);
+ }
+
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+
+diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
+index fbc4552d17b85..6e5e307f985e4 100644
+--- a/net/xfrm/Makefile
++++ b/net/xfrm/Makefile
+@@ -3,6 +3,8 @@
+ # Makefile for the XFRM subsystem.
+ #
+
++xfrm_interface-$(CONFIG_XFRM_INTERFACE) += xfrm_interface_core.o
++
+ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
+ xfrm_input.o xfrm_output.o \
+ xfrm_sysctl.o xfrm_replay.o xfrm_device.o
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+deleted file mode 100644
+index 4cfa79e04e3d1..0000000000000
+--- a/net/xfrm/xfrm_interface.c
++++ /dev/null
+@@ -1,987 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * XFRM virtual interface
+- *
+- * Copyright (C) 2018 secunet Security Networks AG
+- *
+- * Author:
+- * Steffen Klassert <steffen.klassert@secunet.com>
+- */
+-
+-#include <linux/module.h>
+-#include <linux/capability.h>
+-#include <linux/errno.h>
+-#include <linux/types.h>
+-#include <linux/sockios.h>
+-#include <linux/icmp.h>
+-#include <linux/if.h>
+-#include <linux/in.h>
+-#include <linux/ip.h>
+-#include <linux/net.h>
+-#include <linux/in6.h>
+-#include <linux/netdevice.h>
+-#include <linux/if_link.h>
+-#include <linux/if_arp.h>
+-#include <linux/icmpv6.h>
+-#include <linux/init.h>
+-#include <linux/route.h>
+-#include <linux/rtnetlink.h>
+-#include <linux/netfilter_ipv6.h>
+-#include <linux/slab.h>
+-#include <linux/hash.h>
+-
+-#include <linux/uaccess.h>
+-#include <linux/atomic.h>
+-
+-#include <net/icmp.h>
+-#include <net/ip.h>
+-#include <net/ipv6.h>
+-#include <net/ip6_route.h>
+-#include <net/addrconf.h>
+-#include <net/xfrm.h>
+-#include <net/net_namespace.h>
+-#include <net/netns/generic.h>
+-#include <linux/etherdevice.h>
+-
+-static int xfrmi_dev_init(struct net_device *dev);
+-static void xfrmi_dev_setup(struct net_device *dev);
+-static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
+-static unsigned int xfrmi_net_id __read_mostly;
+-
+-struct xfrmi_net {
+- /* lists for storing interfaces in use */
+- struct xfrm_if __rcu *xfrmi[1];
+-};
+-
+-#define for_each_xfrmi_rcu(start, xi) \
+- for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
+-
+-static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
+-{
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+- struct xfrm_if *xi;
+-
+- for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
+- if (x->if_id == xi->p.if_id &&
+- (xi->dev->flags & IFF_UP))
+- return xi;
+- }
+-
+- return NULL;
+-}
+-
+-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
+- unsigned short family)
+-{
+- struct xfrmi_net *xfrmn;
+- struct xfrm_if *xi;
+- int ifindex = 0;
+-
+- if (!secpath_exists(skb) || !skb->dev)
+- return NULL;
+-
+- switch (family) {
+- case AF_INET6:
+- ifindex = inet6_sdif(skb);
+- break;
+- case AF_INET:
+- ifindex = inet_sdif(skb);
+- break;
+- }
+- if (!ifindex)
+- ifindex = skb->dev->ifindex;
+-
+- xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
+-
+- for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
+- if (ifindex == xi->dev->ifindex &&
+- (xi->dev->flags & IFF_UP))
+- return xi;
+- }
+-
+- return NULL;
+-}
+-
+-static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+-{
+- struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
+-
+- rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
+- rcu_assign_pointer(*xip, xi);
+-}
+-
+-static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
+-{
+- struct xfrm_if __rcu **xip;
+- struct xfrm_if *iter;
+-
+- for (xip = &xfrmn->xfrmi[0];
+- (iter = rtnl_dereference(*xip)) != NULL;
+- xip = &iter->next) {
+- if (xi == iter) {
+- rcu_assign_pointer(*xip, xi->next);
+- break;
+- }
+- }
+-}
+-
+-static void xfrmi_dev_free(struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+-
+- gro_cells_destroy(&xi->gro_cells);
+- free_percpu(dev->tstats);
+-}
+-
+-static int xfrmi_create(struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct net *net = dev_net(dev);
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+- int err;
+-
+- dev->rtnl_link_ops = &xfrmi_link_ops;
+- err = register_netdevice(dev);
+- if (err < 0)
+- goto out;
+-
+- dev_hold(dev);
+- xfrmi_link(xfrmn, xi);
+-
+- return 0;
+-
+-out:
+- return err;
+-}
+-
+-static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
+-{
+- struct xfrm_if __rcu **xip;
+- struct xfrm_if *xi;
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-
+- for (xip = &xfrmn->xfrmi[0];
+- (xi = rtnl_dereference(*xip)) != NULL;
+- xip = &xi->next)
+- if (xi->p.if_id == p->if_id)
+- return xi;
+-
+- return NULL;
+-}
+-
+-static void xfrmi_dev_uninit(struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
+-
+- xfrmi_unlink(xfrmn, xi);
+- dev_put(dev);
+-}
+-
+-static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
+-{
+- skb->tstamp = 0;
+- skb->pkt_type = PACKET_HOST;
+- skb->skb_iif = 0;
+- skb->ignore_df = 0;
+- skb_dst_drop(skb);
+- nf_reset_ct(skb);
+- nf_reset_trace(skb);
+-
+- if (!xnet)
+- return;
+-
+- ipvs_reset(skb);
+- secpath_reset(skb);
+- skb_orphan(skb);
+- skb->mark = 0;
+-}
+-
+-static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
+-{
+- const struct xfrm_mode *inner_mode;
+- struct pcpu_sw_netstats *tstats;
+- struct net_device *dev;
+- struct xfrm_state *x;
+- struct xfrm_if *xi;
+- bool xnet;
+-
+- if (err && !secpath_exists(skb))
+- return 0;
+-
+- x = xfrm_input_state(skb);
+-
+- xi = xfrmi_lookup(xs_net(x), x);
+- if (!xi)
+- return 1;
+-
+- dev = xi->dev;
+- skb->dev = dev;
+-
+- if (err) {
+- dev->stats.rx_errors++;
+- dev->stats.rx_dropped++;
+-
+- return 0;
+- }
+-
+- xnet = !net_eq(xi->net, dev_net(skb->dev));
+-
+- if (xnet) {
+- inner_mode = &x->inner_mode;
+-
+- if (x->sel.family == AF_UNSPEC) {
+- inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+- if (inner_mode == NULL) {
+- XFRM_INC_STATS(dev_net(skb->dev),
+- LINUX_MIB_XFRMINSTATEMODEERROR);
+- return -EINVAL;
+- }
+- }
+-
+- if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
+- inner_mode->family))
+- return -EPERM;
+- }
+-
+- xfrmi_scrub_packet(skb, xnet);
+-
+- tstats = this_cpu_ptr(dev->tstats);
+-
+- u64_stats_update_begin(&tstats->syncp);
+- tstats->rx_packets++;
+- tstats->rx_bytes += skb->len;
+- u64_stats_update_end(&tstats->syncp);
+-
+- return 0;
+-}
+-
+-static int
+-xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct net_device_stats *stats = &xi->dev->stats;
+- struct dst_entry *dst = skb_dst(skb);
+- unsigned int length = skb->len;
+- struct net_device *tdev;
+- struct xfrm_state *x;
+- int err = -1;
+- int mtu;
+-
+- dst_hold(dst);
+- dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
+- if (IS_ERR(dst)) {
+- err = PTR_ERR(dst);
+- dst = NULL;
+- goto tx_err_link_failure;
+- }
+-
+- x = dst->xfrm;
+- if (!x)
+- goto tx_err_link_failure;
+-
+- if (x->if_id != xi->p.if_id)
+- goto tx_err_link_failure;
+-
+- tdev = dst->dev;
+-
+- if (tdev == dev) {
+- stats->collisions++;
+- net_warn_ratelimited("%s: Local routing loop detected!\n",
+- dev->name);
+- goto tx_err_dst_release;
+- }
+-
+- mtu = dst_mtu(dst);
+- if (skb->len > mtu) {
+- skb_dst_update_pmtu_no_confirm(skb, mtu);
+-
+- if (skb->protocol == htons(ETH_P_IPV6)) {
+- if (mtu < IPV6_MIN_MTU)
+- mtu = IPV6_MIN_MTU;
+-
+- if (skb->len > 1280)
+- icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+- else
+- goto xmit;
+- } else {
+- if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+- goto xmit;
+- icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+- htonl(mtu));
+- }
+-
+- dst_release(dst);
+- return -EMSGSIZE;
+- }
+-
+-xmit:
+- xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
+- skb_dst_set(skb, dst);
+- skb->dev = tdev;
+-
+- err = dst_output(xi->net, skb->sk, skb);
+- if (net_xmit_eval(err) == 0) {
+- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+-
+- u64_stats_update_begin(&tstats->syncp);
+- tstats->tx_bytes += length;
+- tstats->tx_packets++;
+- u64_stats_update_end(&tstats->syncp);
+- } else {
+- stats->tx_errors++;
+- stats->tx_aborted_errors++;
+- }
+-
+- return 0;
+-tx_err_link_failure:
+- stats->tx_carrier_errors++;
+- dst_link_failure(skb);
+-tx_err_dst_release:
+- dst_release(dst);
+- return err;
+-}
+-
+-static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct net_device_stats *stats = &xi->dev->stats;
+- struct dst_entry *dst = skb_dst(skb);
+- struct flowi fl;
+- int ret;
+-
+- memset(&fl, 0, sizeof(fl));
+-
+- switch (skb->protocol) {
+- case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+- memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+- if (!dst) {
+- fl.u.ip6.flowi6_oif = dev->ifindex;
+- fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+- dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+- if (dst->error) {
+- dst_release(dst);
+- stats->tx_carrier_errors++;
+- goto tx_err;
+- }
+- skb_dst_set(skb, dst);
+- }
+- break;
+- case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+- if (!dst) {
+- struct rtable *rt;
+-
+- fl.u.ip4.flowi4_oif = dev->ifindex;
+- fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+- rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+- if (IS_ERR(rt)) {
+- stats->tx_carrier_errors++;
+- goto tx_err;
+- }
+- skb_dst_set(skb, &rt->dst);
+- }
+- break;
+- default:
+- goto tx_err;
+- }
+-
+- fl.flowi_oif = xi->p.link;
+-
+- ret = xfrmi_xmit2(skb, dev, &fl);
+- if (ret < 0)
+- goto tx_err;
+-
+- return NETDEV_TX_OK;
+-
+-tx_err:
+- stats->tx_errors++;
+- stats->tx_dropped++;
+- kfree_skb(skb);
+- return NETDEV_TX_OK;
+-}
+-
+-static int xfrmi4_err(struct sk_buff *skb, u32 info)
+-{
+- const struct iphdr *iph = (const struct iphdr *)skb->data;
+- struct net *net = dev_net(skb->dev);
+- int protocol = iph->protocol;
+- struct ip_comp_hdr *ipch;
+- struct ip_esp_hdr *esph;
+- struct ip_auth_hdr *ah ;
+- struct xfrm_state *x;
+- struct xfrm_if *xi;
+- __be32 spi;
+-
+- switch (protocol) {
+- case IPPROTO_ESP:
+- esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+- spi = esph->spi;
+- break;
+- case IPPROTO_AH:
+- ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+- spi = ah->spi;
+- break;
+- case IPPROTO_COMP:
+- ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
+- spi = htonl(ntohs(ipch->cpi));
+- break;
+- default:
+- return 0;
+- }
+-
+- switch (icmp_hdr(skb)->type) {
+- case ICMP_DEST_UNREACH:
+- if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+- return 0;
+- case ICMP_REDIRECT:
+- break;
+- default:
+- return 0;
+- }
+-
+- x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+- spi, protocol, AF_INET);
+- if (!x)
+- return 0;
+-
+- xi = xfrmi_lookup(net, x);
+- if (!xi) {
+- xfrm_state_put(x);
+- return -1;
+- }
+-
+- if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+- ipv4_update_pmtu(skb, net, info, 0, protocol);
+- else
+- ipv4_redirect(skb, net, 0, protocol);
+- xfrm_state_put(x);
+-
+- return 0;
+-}
+-
+-static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+- u8 type, u8 code, int offset, __be32 info)
+-{
+- const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
+- struct net *net = dev_net(skb->dev);
+- int protocol = iph->nexthdr;
+- struct ip_comp_hdr *ipch;
+- struct ip_esp_hdr *esph;
+- struct ip_auth_hdr *ah;
+- struct xfrm_state *x;
+- struct xfrm_if *xi;
+- __be32 spi;
+-
+- switch (protocol) {
+- case IPPROTO_ESP:
+- esph = (struct ip_esp_hdr *)(skb->data + offset);
+- spi = esph->spi;
+- break;
+- case IPPROTO_AH:
+- ah = (struct ip_auth_hdr *)(skb->data + offset);
+- spi = ah->spi;
+- break;
+- case IPPROTO_COMP:
+- ipch = (struct ip_comp_hdr *)(skb->data + offset);
+- spi = htonl(ntohs(ipch->cpi));
+- break;
+- default:
+- return 0;
+- }
+-
+- if (type != ICMPV6_PKT_TOOBIG &&
+- type != NDISC_REDIRECT)
+- return 0;
+-
+- x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+- spi, protocol, AF_INET6);
+- if (!x)
+- return 0;
+-
+- xi = xfrmi_lookup(net, x);
+- if (!xi) {
+- xfrm_state_put(x);
+- return -1;
+- }
+-
+- if (type == NDISC_REDIRECT)
+- ip6_redirect(skb, net, skb->dev->ifindex, 0,
+- sock_net_uid(net, NULL));
+- else
+- ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
+- xfrm_state_put(x);
+-
+- return 0;
+-}
+-
+-static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
+-{
+- if (xi->p.link != p->link)
+- return -EINVAL;
+-
+- xi->p.if_id = p->if_id;
+-
+- return 0;
+-}
+-
+-static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
+-{
+- struct net *net = xi->net;
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+- int err;
+-
+- xfrmi_unlink(xfrmn, xi);
+- synchronize_net();
+- err = xfrmi_change(xi, p);
+- xfrmi_link(xfrmn, xi);
+- netdev_state_change(xi->dev);
+- return err;
+-}
+-
+-static void xfrmi_get_stats64(struct net_device *dev,
+- struct rtnl_link_stats64 *s)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- struct pcpu_sw_netstats *stats;
+- struct pcpu_sw_netstats tmp;
+- int start;
+-
+- stats = per_cpu_ptr(dev->tstats, cpu);
+- do {
+- start = u64_stats_fetch_begin_irq(&stats->syncp);
+- tmp.rx_packets = stats->rx_packets;
+- tmp.rx_bytes = stats->rx_bytes;
+- tmp.tx_packets = stats->tx_packets;
+- tmp.tx_bytes = stats->tx_bytes;
+- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+-
+- s->rx_packets += tmp.rx_packets;
+- s->rx_bytes += tmp.rx_bytes;
+- s->tx_packets += tmp.tx_packets;
+- s->tx_bytes += tmp.tx_bytes;
+- }
+-
+- s->rx_dropped = dev->stats.rx_dropped;
+- s->tx_dropped = dev->stats.tx_dropped;
+-}
+-
+-static int xfrmi_get_iflink(const struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+-
+- return xi->p.link;
+-}
+-
+-
+-static const struct net_device_ops xfrmi_netdev_ops = {
+- .ndo_init = xfrmi_dev_init,
+- .ndo_uninit = xfrmi_dev_uninit,
+- .ndo_start_xmit = xfrmi_xmit,
+- .ndo_get_stats64 = xfrmi_get_stats64,
+- .ndo_get_iflink = xfrmi_get_iflink,
+-};
+-
+-static void xfrmi_dev_setup(struct net_device *dev)
+-{
+- dev->netdev_ops = &xfrmi_netdev_ops;
+- dev->type = ARPHRD_NONE;
+- dev->mtu = ETH_DATA_LEN;
+- dev->min_mtu = ETH_MIN_MTU;
+- dev->max_mtu = IP_MAX_MTU;
+- dev->flags = IFF_NOARP;
+- dev->needs_free_netdev = true;
+- dev->priv_destructor = xfrmi_dev_free;
+- netif_keep_dst(dev);
+-
+- eth_broadcast_addr(dev->broadcast);
+-}
+-
+-static int xfrmi_dev_init(struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
+- int err;
+-
+- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+- if (!dev->tstats)
+- return -ENOMEM;
+-
+- err = gro_cells_init(&xi->gro_cells, dev);
+- if (err) {
+- free_percpu(dev->tstats);
+- return err;
+- }
+-
+- dev->features |= NETIF_F_LLTX;
+-
+- if (phydev) {
+- dev->needed_headroom = phydev->needed_headroom;
+- dev->needed_tailroom = phydev->needed_tailroom;
+-
+- if (is_zero_ether_addr(dev->dev_addr))
+- eth_hw_addr_inherit(dev, phydev);
+- if (is_zero_ether_addr(dev->broadcast))
+- memcpy(dev->broadcast, phydev->broadcast,
+- dev->addr_len);
+- } else {
+- eth_hw_addr_random(dev);
+- eth_broadcast_addr(dev->broadcast);
+- }
+-
+- return 0;
+-}
+-
+-static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- return 0;
+-}
+-
+-static void xfrmi_netlink_parms(struct nlattr *data[],
+- struct xfrm_if_parms *parms)
+-{
+- memset(parms, 0, sizeof(*parms));
+-
+- if (!data)
+- return;
+-
+- if (data[IFLA_XFRM_LINK])
+- parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
+-
+- if (data[IFLA_XFRM_IF_ID])
+- parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
+-}
+-
+-static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
+- struct nlattr *tb[], struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- struct net *net = dev_net(dev);
+- struct xfrm_if_parms p = {};
+- struct xfrm_if *xi;
+- int err;
+-
+- xfrmi_netlink_parms(data, &p);
+- if (!p.if_id) {
+- NL_SET_ERR_MSG(extack, "if_id must be non zero");
+- return -EINVAL;
+- }
+-
+- xi = xfrmi_locate(net, &p);
+- if (xi)
+- return -EEXIST;
+-
+- xi = netdev_priv(dev);
+- xi->p = p;
+- xi->net = net;
+- xi->dev = dev;
+-
+- err = xfrmi_create(dev);
+- return err;
+-}
+-
+-static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
+-{
+- unregister_netdevice_queue(dev, head);
+-}
+-
+-static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
+- struct nlattr *data[],
+- struct netlink_ext_ack *extack)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct net *net = xi->net;
+- struct xfrm_if_parms p = {};
+-
+- xfrmi_netlink_parms(data, &p);
+- if (!p.if_id) {
+- NL_SET_ERR_MSG(extack, "if_id must be non zero");
+- return -EINVAL;
+- }
+-
+- xi = xfrmi_locate(net, &p);
+- if (!xi) {
+- xi = netdev_priv(dev);
+- } else {
+- if (xi->dev != dev)
+- return -EEXIST;
+- }
+-
+- return xfrmi_update(xi, &p);
+-}
+-
+-static size_t xfrmi_get_size(const struct net_device *dev)
+-{
+- return
+- /* IFLA_XFRM_LINK */
+- nla_total_size(4) +
+- /* IFLA_XFRM_IF_ID */
+- nla_total_size(4) +
+- 0;
+-}
+-
+-static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+- struct xfrm_if_parms *parm = &xi->p;
+-
+- if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
+- nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
+- goto nla_put_failure;
+- return 0;
+-
+-nla_put_failure:
+- return -EMSGSIZE;
+-}
+-
+-static struct net *xfrmi_get_link_net(const struct net_device *dev)
+-{
+- struct xfrm_if *xi = netdev_priv(dev);
+-
+- return xi->net;
+-}
+-
+-static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
+- [IFLA_XFRM_LINK] = { .type = NLA_U32 },
+- [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
+-};
+-
+-static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
+- .kind = "xfrm",
+- .maxtype = IFLA_XFRM_MAX,
+- .policy = xfrmi_policy,
+- .priv_size = sizeof(struct xfrm_if),
+- .setup = xfrmi_dev_setup,
+- .validate = xfrmi_validate,
+- .newlink = xfrmi_newlink,
+- .dellink = xfrmi_dellink,
+- .changelink = xfrmi_changelink,
+- .get_size = xfrmi_get_size,
+- .fill_info = xfrmi_fill_info,
+- .get_link_net = xfrmi_get_link_net,
+-};
+-
+-static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
+-{
+- struct xfrm_if *xi;
+- LIST_HEAD(list);
+-
+- xi = rtnl_dereference(xfrmn->xfrmi[0]);
+- if (!xi)
+- return;
+-
+- unregister_netdevice_queue(xi->dev, &list);
+- unregister_netdevice_many(&list);
+-}
+-
+-static void __net_exit xfrmi_exit_net(struct net *net)
+-{
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+-
+- rtnl_lock();
+- xfrmi_destroy_interfaces(xfrmn);
+- rtnl_unlock();
+-}
+-
+-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+-{
+- struct net *net;
+- LIST_HEAD(list);
+-
+- rtnl_lock();
+- list_for_each_entry(net, net_exit_list, exit_list) {
+- struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+- struct xfrm_if __rcu **xip;
+- struct xfrm_if *xi;
+-
+- for (xip = &xfrmn->xfrmi[0];
+- (xi = rtnl_dereference(*xip)) != NULL;
+- xip = &xi->next)
+- unregister_netdevice_queue(xi->dev, &list);
+- }
+- unregister_netdevice_many(&list);
+- rtnl_unlock();
+-}
+-
+-static struct pernet_operations xfrmi_net_ops = {
+- .exit_batch = xfrmi_exit_batch_net,
+- .exit = xfrmi_exit_net,
+- .id = &xfrmi_net_id,
+- .size = sizeof(struct xfrmi_net),
+-};
+-
+-static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
+- .handler = xfrm6_rcv,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi6_err,
+- .priority = 10,
+-};
+-
+-static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
+- .handler = xfrm6_rcv,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi6_err,
+- .priority = 10,
+-};
+-
+-static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
+- .handler = xfrm6_rcv,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi6_err,
+- .priority = 10,
+-};
+-
+-static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
+- .handler = xfrm4_rcv,
+- .input_handler = xfrm_input,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi4_err,
+- .priority = 10,
+-};
+-
+-static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
+- .handler = xfrm4_rcv,
+- .input_handler = xfrm_input,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi4_err,
+- .priority = 10,
+-};
+-
+-static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
+- .handler = xfrm4_rcv,
+- .input_handler = xfrm_input,
+- .cb_handler = xfrmi_rcv_cb,
+- .err_handler = xfrmi4_err,
+- .priority = 10,
+-};
+-
+-static int __init xfrmi4_init(void)
+-{
+- int err;
+-
+- err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
+- if (err < 0)
+- goto xfrm_proto_esp_failed;
+- err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
+- if (err < 0)
+- goto xfrm_proto_ah_failed;
+- err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+- if (err < 0)
+- goto xfrm_proto_comp_failed;
+-
+- return 0;
+-
+-xfrm_proto_comp_failed:
+- xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+-xfrm_proto_ah_failed:
+- xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+-xfrm_proto_esp_failed:
+- return err;
+-}
+-
+-static void xfrmi4_fini(void)
+-{
+- xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
+- xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
+- xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
+-}
+-
+-static int __init xfrmi6_init(void)
+-{
+- int err;
+-
+- err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
+- if (err < 0)
+- goto xfrm_proto_esp_failed;
+- err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
+- if (err < 0)
+- goto xfrm_proto_ah_failed;
+- err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+- if (err < 0)
+- goto xfrm_proto_comp_failed;
+-
+- return 0;
+-
+-xfrm_proto_comp_failed:
+- xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+-xfrm_proto_ah_failed:
+- xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+-xfrm_proto_esp_failed:
+- return err;
+-}
+-
+-static void xfrmi6_fini(void)
+-{
+- xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
+- xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
+- xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
+-}
+-
+-static const struct xfrm_if_cb xfrm_if_cb = {
+- .decode_session = xfrmi_decode_session,
+-};
+-
+-static int __init xfrmi_init(void)
+-{
+- const char *msg;
+- int err;
+-
+- pr_info("IPsec XFRM device driver\n");
+-
+- msg = "tunnel device";
+- err = register_pernet_device(&xfrmi_net_ops);
+- if (err < 0)
+- goto pernet_dev_failed;
+-
+- msg = "xfrm4 protocols";
+- err = xfrmi4_init();
+- if (err < 0)
+- goto xfrmi4_failed;
+-
+- msg = "xfrm6 protocols";
+- err = xfrmi6_init();
+- if (err < 0)
+- goto xfrmi6_failed;
+-
+-
+- msg = "netlink interface";
+- err = rtnl_link_register(&xfrmi_link_ops);
+- if (err < 0)
+- goto rtnl_link_failed;
+-
+- xfrm_if_register_cb(&xfrm_if_cb);
+-
+- return err;
+-
+-rtnl_link_failed:
+- xfrmi6_fini();
+-xfrmi6_failed:
+- xfrmi4_fini();
+-xfrmi4_failed:
+- unregister_pernet_device(&xfrmi_net_ops);
+-pernet_dev_failed:
+- pr_err("xfrmi init: failed to register %s\n", msg);
+- return err;
+-}
+-
+-static void __exit xfrmi_fini(void)
+-{
+- xfrm_if_unregister_cb();
+- rtnl_link_unregister(&xfrmi_link_ops);
+- xfrmi4_fini();
+- xfrmi6_fini();
+- unregister_pernet_device(&xfrmi_net_ops);
+-}
+-
+-module_init(xfrmi_init);
+-module_exit(xfrmi_fini);
+-MODULE_LICENSE("GPL");
+-MODULE_ALIAS_RTNL_LINK("xfrm");
+-MODULE_ALIAS_NETDEV("xfrm0");
+-MODULE_AUTHOR("Steffen Klassert");
+-MODULE_DESCRIPTION("XFRM virtual interface");
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+new file mode 100644
+index 0000000000000..3dc63810c5f5a
+--- /dev/null
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -0,0 +1,987 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * XFRM virtual interface
++ *
++ * Copyright (C) 2018 secunet Security Networks AG
++ *
++ * Author:
++ * Steffen Klassert <steffen.klassert@secunet.com>
++ */
++
++#include <linux/module.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/sockios.h>
++#include <linux/icmp.h>
++#include <linux/if.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/net.h>
++#include <linux/in6.h>
++#include <linux/netdevice.h>
++#include <linux/if_link.h>
++#include <linux/if_arp.h>
++#include <linux/icmpv6.h>
++#include <linux/init.h>
++#include <linux/route.h>
++#include <linux/rtnetlink.h>
++#include <linux/netfilter_ipv6.h>
++#include <linux/slab.h>
++#include <linux/hash.h>
++
++#include <linux/uaccess.h>
++#include <linux/atomic.h>
++
++#include <net/icmp.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <net/ip6_route.h>
++#include <net/addrconf.h>
++#include <net/xfrm.h>
++#include <net/net_namespace.h>
++#include <net/netns/generic.h>
++#include <linux/etherdevice.h>
++
++static int xfrmi_dev_init(struct net_device *dev);
++static void xfrmi_dev_setup(struct net_device *dev);
++static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
++static unsigned int xfrmi_net_id __read_mostly;
++
++struct xfrmi_net {
++ /* lists for storing interfaces in use */
++ struct xfrm_if __rcu *xfrmi[1];
++};
++
++#define for_each_xfrmi_rcu(start, xi) \
++ for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
++
++static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
++{
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++ struct xfrm_if *xi;
++
++ for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
++ if (x->if_id == xi->p.if_id &&
++ (xi->dev->flags & IFF_UP))
++ return xi;
++ }
++
++ return NULL;
++}
++
++static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
++ unsigned short family)
++{
++ struct xfrmi_net *xfrmn;
++ struct xfrm_if *xi;
++ int ifindex = 0;
++
++ if (!secpath_exists(skb) || !skb->dev)
++ return NULL;
++
++ switch (family) {
++ case AF_INET6:
++ ifindex = inet6_sdif(skb);
++ break;
++ case AF_INET:
++ ifindex = inet_sdif(skb);
++ break;
++ }
++ if (!ifindex)
++ ifindex = skb->dev->ifindex;
++
++ xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
++
++ for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
++ if (ifindex == xi->dev->ifindex &&
++ (xi->dev->flags & IFF_UP))
++ return xi;
++ }
++
++ return NULL;
++}
++
++static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
++{
++ struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
++
++ rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
++ rcu_assign_pointer(*xip, xi);
++}
++
++static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
++{
++ struct xfrm_if __rcu **xip;
++ struct xfrm_if *iter;
++
++ for (xip = &xfrmn->xfrmi[0];
++ (iter = rtnl_dereference(*xip)) != NULL;
++ xip = &iter->next) {
++ if (xi == iter) {
++ rcu_assign_pointer(*xip, xi->next);
++ break;
++ }
++ }
++}
++
++static void xfrmi_dev_free(struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++
++ gro_cells_destroy(&xi->gro_cells);
++ free_percpu(dev->tstats);
++}
++
++static int xfrmi_create(struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct net *net = dev_net(dev);
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++ int err;
++
++ dev->rtnl_link_ops = &xfrmi_link_ops;
++ err = register_netdevice(dev);
++ if (err < 0)
++ goto out;
++
++ dev_hold(dev);
++ xfrmi_link(xfrmn, xi);
++
++ return 0;
++
++out:
++ return err;
++}
++
++static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
++{
++ struct xfrm_if __rcu **xip;
++ struct xfrm_if *xi;
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++
++ for (xip = &xfrmn->xfrmi[0];
++ (xi = rtnl_dereference(*xip)) != NULL;
++ xip = &xi->next)
++ if (xi->p.if_id == p->if_id)
++ return xi;
++
++ return NULL;
++}
++
++static void xfrmi_dev_uninit(struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
++
++ xfrmi_unlink(xfrmn, xi);
++ dev_put(dev);
++}
++
++static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
++{
++ skb->tstamp = 0;
++ skb->pkt_type = PACKET_HOST;
++ skb->skb_iif = 0;
++ skb->ignore_df = 0;
++ skb_dst_drop(skb);
++ nf_reset_ct(skb);
++ nf_reset_trace(skb);
++
++ if (!xnet)
++ return;
++
++ ipvs_reset(skb);
++ secpath_reset(skb);
++ skb_orphan(skb);
++ skb->mark = 0;
++}
++
++static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
++{
++ const struct xfrm_mode *inner_mode;
++ struct pcpu_sw_netstats *tstats;
++ struct net_device *dev;
++ struct xfrm_state *x;
++ struct xfrm_if *xi;
++ bool xnet;
++
++ if (err && !secpath_exists(skb))
++ return 0;
++
++ x = xfrm_input_state(skb);
++
++ xi = xfrmi_lookup(xs_net(x), x);
++ if (!xi)
++ return 1;
++
++ dev = xi->dev;
++ skb->dev = dev;
++
++ if (err) {
++ dev->stats.rx_errors++;
++ dev->stats.rx_dropped++;
++
++ return 0;
++ }
++
++ xnet = !net_eq(xi->net, dev_net(skb->dev));
++
++ if (xnet) {
++ inner_mode = &x->inner_mode;
++
++ if (x->sel.family == AF_UNSPEC) {
++ inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
++ if (inner_mode == NULL) {
++ XFRM_INC_STATS(dev_net(skb->dev),
++ LINUX_MIB_XFRMINSTATEMODEERROR);
++ return -EINVAL;
++ }
++ }
++
++ if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
++ inner_mode->family))
++ return -EPERM;
++ }
++
++ xfrmi_scrub_packet(skb, xnet);
++
++ tstats = this_cpu_ptr(dev->tstats);
++
++ u64_stats_update_begin(&tstats->syncp);
++ tstats->rx_packets++;
++ tstats->rx_bytes += skb->len;
++ u64_stats_update_end(&tstats->syncp);
++
++ return 0;
++}
++
++static int
++xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct net_device_stats *stats = &xi->dev->stats;
++ struct dst_entry *dst = skb_dst(skb);
++ unsigned int length = skb->len;
++ struct net_device *tdev;
++ struct xfrm_state *x;
++ int err = -1;
++ int mtu;
++
++ dst_hold(dst);
++ dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
++ if (IS_ERR(dst)) {
++ err = PTR_ERR(dst);
++ dst = NULL;
++ goto tx_err_link_failure;
++ }
++
++ x = dst->xfrm;
++ if (!x)
++ goto tx_err_link_failure;
++
++ if (x->if_id != xi->p.if_id)
++ goto tx_err_link_failure;
++
++ tdev = dst->dev;
++
++ if (tdev == dev) {
++ stats->collisions++;
++ net_warn_ratelimited("%s: Local routing loop detected!\n",
++ dev->name);
++ goto tx_err_dst_release;
++ }
++
++ mtu = dst_mtu(dst);
++ if (skb->len > mtu) {
++ skb_dst_update_pmtu_no_confirm(skb, mtu);
++
++ if (skb->protocol == htons(ETH_P_IPV6)) {
++ if (mtu < IPV6_MIN_MTU)
++ mtu = IPV6_MIN_MTU;
++
++ if (skb->len > 1280)
++ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++ else
++ goto xmit;
++ } else {
++ if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
++ goto xmit;
++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++ htonl(mtu));
++ }
++
++ dst_release(dst);
++ return -EMSGSIZE;
++ }
++
++xmit:
++ xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
++ skb_dst_set(skb, dst);
++ skb->dev = tdev;
++
++ err = dst_output(xi->net, skb->sk, skb);
++ if (net_xmit_eval(err) == 0) {
++ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
++
++ u64_stats_update_begin(&tstats->syncp);
++ tstats->tx_bytes += length;
++ tstats->tx_packets++;
++ u64_stats_update_end(&tstats->syncp);
++ } else {
++ stats->tx_errors++;
++ stats->tx_aborted_errors++;
++ }
++
++ return 0;
++tx_err_link_failure:
++ stats->tx_carrier_errors++;
++ dst_link_failure(skb);
++tx_err_dst_release:
++ dst_release(dst);
++ return err;
++}
++
++static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct net_device_stats *stats = &xi->dev->stats;
++ struct dst_entry *dst = skb_dst(skb);
++ struct flowi fl;
++ int ret;
++
++ memset(&fl, 0, sizeof(fl));
++
++ switch (skb->protocol) {
++ case htons(ETH_P_IPV6):
++ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
++ if (!dst) {
++ fl.u.ip6.flowi6_oif = dev->ifindex;
++ fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
++ dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
++ if (dst->error) {
++ dst_release(dst);
++ stats->tx_carrier_errors++;
++ goto tx_err;
++ }
++ skb_dst_set(skb, dst);
++ }
++ break;
++ case htons(ETH_P_IP):
++ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
++ if (!dst) {
++ struct rtable *rt;
++
++ fl.u.ip4.flowi4_oif = dev->ifindex;
++ fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
++ rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
++ if (IS_ERR(rt)) {
++ stats->tx_carrier_errors++;
++ goto tx_err;
++ }
++ skb_dst_set(skb, &rt->dst);
++ }
++ break;
++ default:
++ goto tx_err;
++ }
++
++ fl.flowi_oif = xi->p.link;
++
++ ret = xfrmi_xmit2(skb, dev, &fl);
++ if (ret < 0)
++ goto tx_err;
++
++ return NETDEV_TX_OK;
++
++tx_err:
++ stats->tx_errors++;
++ stats->tx_dropped++;
++ kfree_skb(skb);
++ return NETDEV_TX_OK;
++}
++
++static int xfrmi4_err(struct sk_buff *skb, u32 info)
++{
++ const struct iphdr *iph = (const struct iphdr *)skb->data;
++ struct net *net = dev_net(skb->dev);
++ int protocol = iph->protocol;
++ struct ip_comp_hdr *ipch;
++ struct ip_esp_hdr *esph;
++ struct ip_auth_hdr *ah ;
++ struct xfrm_state *x;
++ struct xfrm_if *xi;
++ __be32 spi;
++
++ switch (protocol) {
++ case IPPROTO_ESP:
++ esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
++ spi = esph->spi;
++ break;
++ case IPPROTO_AH:
++ ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
++ spi = ah->spi;
++ break;
++ case IPPROTO_COMP:
++ ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
++ spi = htonl(ntohs(ipch->cpi));
++ break;
++ default:
++ return 0;
++ }
++
++ switch (icmp_hdr(skb)->type) {
++ case ICMP_DEST_UNREACH:
++ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
++ return 0;
++ case ICMP_REDIRECT:
++ break;
++ default:
++ return 0;
++ }
++
++ x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
++ spi, protocol, AF_INET);
++ if (!x)
++ return 0;
++
++ xi = xfrmi_lookup(net, x);
++ if (!xi) {
++ xfrm_state_put(x);
++ return -1;
++ }
++
++ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
++ ipv4_update_pmtu(skb, net, info, 0, protocol);
++ else
++ ipv4_redirect(skb, net, 0, protocol);
++ xfrm_state_put(x);
++
++ return 0;
++}
++
++static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
++ u8 type, u8 code, int offset, __be32 info)
++{
++ const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
++ struct net *net = dev_net(skb->dev);
++ int protocol = iph->nexthdr;
++ struct ip_comp_hdr *ipch;
++ struct ip_esp_hdr *esph;
++ struct ip_auth_hdr *ah;
++ struct xfrm_state *x;
++ struct xfrm_if *xi;
++ __be32 spi;
++
++ switch (protocol) {
++ case IPPROTO_ESP:
++ esph = (struct ip_esp_hdr *)(skb->data + offset);
++ spi = esph->spi;
++ break;
++ case IPPROTO_AH:
++ ah = (struct ip_auth_hdr *)(skb->data + offset);
++ spi = ah->spi;
++ break;
++ case IPPROTO_COMP:
++ ipch = (struct ip_comp_hdr *)(skb->data + offset);
++ spi = htonl(ntohs(ipch->cpi));
++ break;
++ default:
++ return 0;
++ }
++
++ if (type != ICMPV6_PKT_TOOBIG &&
++ type != NDISC_REDIRECT)
++ return 0;
++
++ x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
++ spi, protocol, AF_INET6);
++ if (!x)
++ return 0;
++
++ xi = xfrmi_lookup(net, x);
++ if (!xi) {
++ xfrm_state_put(x);
++ return -1;
++ }
++
++ if (type == NDISC_REDIRECT)
++ ip6_redirect(skb, net, skb->dev->ifindex, 0,
++ sock_net_uid(net, NULL));
++ else
++ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
++ xfrm_state_put(x);
++
++ return 0;
++}
++
++static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
++{
++ if (xi->p.link != p->link)
++ return -EINVAL;
++
++ xi->p.if_id = p->if_id;
++
++ return 0;
++}
++
++static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
++{
++ struct net *net = xi->net;
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++ int err;
++
++ xfrmi_unlink(xfrmn, xi);
++ synchronize_net();
++ err = xfrmi_change(xi, p);
++ xfrmi_link(xfrmn, xi);
++ netdev_state_change(xi->dev);
++ return err;
++}
++
++static void xfrmi_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *s)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct pcpu_sw_netstats *stats;
++ struct pcpu_sw_netstats tmp;
++ int start;
++
++ stats = per_cpu_ptr(dev->tstats, cpu);
++ do {
++ start = u64_stats_fetch_begin_irq(&stats->syncp);
++ tmp.rx_packets = stats->rx_packets;
++ tmp.rx_bytes = stats->rx_bytes;
++ tmp.tx_packets = stats->tx_packets;
++ tmp.tx_bytes = stats->tx_bytes;
++ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
++
++ s->rx_packets += tmp.rx_packets;
++ s->rx_bytes += tmp.rx_bytes;
++ s->tx_packets += tmp.tx_packets;
++ s->tx_bytes += tmp.tx_bytes;
++ }
++
++ s->rx_dropped = dev->stats.rx_dropped;
++ s->tx_dropped = dev->stats.tx_dropped;
++}
++
++static int xfrmi_get_iflink(const struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++
++ return xi->p.link;
++}
++
++
++static const struct net_device_ops xfrmi_netdev_ops = {
++ .ndo_init = xfrmi_dev_init,
++ .ndo_uninit = xfrmi_dev_uninit,
++ .ndo_start_xmit = xfrmi_xmit,
++ .ndo_get_stats64 = xfrmi_get_stats64,
++ .ndo_get_iflink = xfrmi_get_iflink,
++};
++
++static void xfrmi_dev_setup(struct net_device *dev)
++{
++ dev->netdev_ops = &xfrmi_netdev_ops;
++ dev->type = ARPHRD_NONE;
++ dev->mtu = ETH_DATA_LEN;
++ dev->min_mtu = ETH_MIN_MTU;
++ dev->max_mtu = IP_MAX_MTU;
++ dev->flags = IFF_NOARP;
++ dev->needs_free_netdev = true;
++ dev->priv_destructor = xfrmi_dev_free;
++ netif_keep_dst(dev);
++
++ eth_broadcast_addr(dev->broadcast);
++}
++
++static int xfrmi_dev_init(struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
++ int err;
++
++ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
++ if (!dev->tstats)
++ return -ENOMEM;
++
++ err = gro_cells_init(&xi->gro_cells, dev);
++ if (err) {
++ free_percpu(dev->tstats);
++ return err;
++ }
++
++ dev->features |= NETIF_F_LLTX;
++
++ if (phydev) {
++ dev->needed_headroom = phydev->needed_headroom;
++ dev->needed_tailroom = phydev->needed_tailroom;
++
++ if (is_zero_ether_addr(dev->dev_addr))
++ eth_hw_addr_inherit(dev, phydev);
++ if (is_zero_ether_addr(dev->broadcast))
++ memcpy(dev->broadcast, phydev->broadcast,
++ dev->addr_len);
++ } else {
++ eth_hw_addr_random(dev);
++ eth_broadcast_addr(dev->broadcast);
++ }
++
++ return 0;
++}
++
++static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ return 0;
++}
++
++static void xfrmi_netlink_parms(struct nlattr *data[],
++ struct xfrm_if_parms *parms)
++{
++ memset(parms, 0, sizeof(*parms));
++
++ if (!data)
++ return;
++
++ if (data[IFLA_XFRM_LINK])
++ parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
++
++ if (data[IFLA_XFRM_IF_ID])
++ parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
++}
++
++static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct net *net = dev_net(dev);
++ struct xfrm_if_parms p = {};
++ struct xfrm_if *xi;
++ int err;
++
++ xfrmi_netlink_parms(data, &p);
++ if (!p.if_id) {
++ NL_SET_ERR_MSG(extack, "if_id must be non zero");
++ return -EINVAL;
++ }
++
++ xi = xfrmi_locate(net, &p);
++ if (xi)
++ return -EEXIST;
++
++ xi = netdev_priv(dev);
++ xi->p = p;
++ xi->net = net;
++ xi->dev = dev;
++
++ err = xfrmi_create(dev);
++ return err;
++}
++
++static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
++{
++ unregister_netdevice_queue(dev, head);
++}
++
++static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
++ struct nlattr *data[],
++ struct netlink_ext_ack *extack)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct net *net = xi->net;
++ struct xfrm_if_parms p = {};
++
++ xfrmi_netlink_parms(data, &p);
++ if (!p.if_id) {
++ NL_SET_ERR_MSG(extack, "if_id must be non zero");
++ return -EINVAL;
++ }
++
++ xi = xfrmi_locate(net, &p);
++ if (!xi) {
++ xi = netdev_priv(dev);
++ } else {
++ if (xi->dev != dev)
++ return -EEXIST;
++ }
++
++ return xfrmi_update(xi, &p);
++}
++
++static size_t xfrmi_get_size(const struct net_device *dev)
++{
++ return
++ /* IFLA_XFRM_LINK */
++ nla_total_size(4) +
++ /* IFLA_XFRM_IF_ID */
++ nla_total_size(4) +
++ 0;
++}
++
++static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++ struct xfrm_if_parms *parm = &xi->p;
++
++ if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
++ nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
++ goto nla_put_failure;
++ return 0;
++
++nla_put_failure:
++ return -EMSGSIZE;
++}
++
++static struct net *xfrmi_get_link_net(const struct net_device *dev)
++{
++ struct xfrm_if *xi = netdev_priv(dev);
++
++ return xi->net;
++}
++
++static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
++ [IFLA_XFRM_LINK] = { .type = NLA_U32 },
++ [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
++};
++
++static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
++ .kind = "xfrm",
++ .maxtype = IFLA_XFRM_MAX,
++ .policy = xfrmi_policy,
++ .priv_size = sizeof(struct xfrm_if),
++ .setup = xfrmi_dev_setup,
++ .validate = xfrmi_validate,
++ .newlink = xfrmi_newlink,
++ .dellink = xfrmi_dellink,
++ .changelink = xfrmi_changelink,
++ .get_size = xfrmi_get_size,
++ .fill_info = xfrmi_fill_info,
++ .get_link_net = xfrmi_get_link_net,
++};
++
++static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
++{
++ struct xfrm_if *xi;
++ LIST_HEAD(list);
++
++ xi = rtnl_dereference(xfrmn->xfrmi[0]);
++ if (!xi)
++ return;
++
++ unregister_netdevice_queue(xi->dev, &list);
++ unregister_netdevice_many(&list);
++}
++
++static void __net_exit xfrmi_exit_net(struct net *net)
++{
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++
++ rtnl_lock();
++ xfrmi_destroy_interfaces(xfrmn);
++ rtnl_unlock();
++}
++
++static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
++{
++ struct net *net;
++ LIST_HEAD(list);
++
++ rtnl_lock();
++ list_for_each_entry(net, net_exit_list, exit_list) {
++ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
++ struct xfrm_if __rcu **xip;
++ struct xfrm_if *xi;
++
++ for (xip = &xfrmn->xfrmi[0];
++ (xi = rtnl_dereference(*xip)) != NULL;
++ xip = &xi->next)
++ unregister_netdevice_queue(xi->dev, &list);
++ }
++ unregister_netdevice_many(&list);
++ rtnl_unlock();
++}
++
++static struct pernet_operations xfrmi_net_ops = {
++ .exit_batch = xfrmi_exit_batch_net,
++ .exit = xfrmi_exit_net,
++ .id = &xfrmi_net_id,
++ .size = sizeof(struct xfrmi_net),
++};
++
++static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
++ .handler = xfrm6_rcv,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi6_err,
++ .priority = 10,
++};
++
++static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
++ .handler = xfrm6_rcv,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi6_err,
++ .priority = 10,
++};
++
++static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
++ .handler = xfrm6_rcv,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi6_err,
++ .priority = 10,
++};
++
++static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
++ .handler = xfrm4_rcv,
++ .input_handler = xfrm_input,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi4_err,
++ .priority = 10,
++};
++
++static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
++ .handler = xfrm4_rcv,
++ .input_handler = xfrm_input,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi4_err,
++ .priority = 10,
++};
++
++static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
++ .handler = xfrm4_rcv,
++ .input_handler = xfrm_input,
++ .cb_handler = xfrmi_rcv_cb,
++ .err_handler = xfrmi4_err,
++ .priority = 10,
++};
++
++static int __init xfrmi4_init(void)
++{
++ int err;
++
++ err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
++ if (err < 0)
++ goto xfrm_proto_esp_failed;
++ err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
++ if (err < 0)
++ goto xfrm_proto_ah_failed;
++ err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
++ if (err < 0)
++ goto xfrm_proto_comp_failed;
++
++ return 0;
++
++xfrm_proto_comp_failed:
++ xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
++xfrm_proto_ah_failed:
++ xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
++xfrm_proto_esp_failed:
++ return err;
++}
++
++static void xfrmi4_fini(void)
++{
++ xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
++ xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
++ xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
++}
++
++static int __init xfrmi6_init(void)
++{
++ int err;
++
++ err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
++ if (err < 0)
++ goto xfrm_proto_esp_failed;
++ err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
++ if (err < 0)
++ goto xfrm_proto_ah_failed;
++ err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
++ if (err < 0)
++ goto xfrm_proto_comp_failed;
++
++ return 0;
++
++xfrm_proto_comp_failed:
++ xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
++xfrm_proto_ah_failed:
++ xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
++xfrm_proto_esp_failed:
++ return err;
++}
++
++static void xfrmi6_fini(void)
++{
++ xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
++ xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
++ xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
++}
++
++static const struct xfrm_if_cb xfrm_if_cb = {
++ .decode_session = xfrmi_decode_session,
++};
++
++static int __init xfrmi_init(void)
++{
++ const char *msg;
++ int err;
++
++ pr_info("IPsec XFRM device driver\n");
++
++ msg = "tunnel device";
++ err = register_pernet_device(&xfrmi_net_ops);
++ if (err < 0)
++ goto pernet_dev_failed;
++
++ msg = "xfrm4 protocols";
++ err = xfrmi4_init();
++ if (err < 0)
++ goto xfrmi4_failed;
++
++ msg = "xfrm6 protocols";
++ err = xfrmi6_init();
++ if (err < 0)
++ goto xfrmi6_failed;
++
++
++ msg = "netlink interface";
++ err = rtnl_link_register(&xfrmi_link_ops);
++ if (err < 0)
++ goto rtnl_link_failed;
++
++ xfrm_if_register_cb(&xfrm_if_cb);
++
++ return err;
++
++rtnl_link_failed:
++ xfrmi6_fini();
++xfrmi6_failed:
++ xfrmi4_fini();
++xfrmi4_failed:
++ unregister_pernet_device(&xfrmi_net_ops);
++pernet_dev_failed:
++ pr_err("xfrmi init: failed to register %s\n", msg);
++ return err;
++}
++
++static void __exit xfrmi_fini(void)
++{
++ xfrm_if_unregister_cb();
++ rtnl_link_unregister(&xfrmi_link_ops);
++ xfrmi4_fini();
++ xfrmi6_fini();
++ unregister_pernet_device(&xfrmi_net_ops);
++}
++
++module_init(xfrmi_init);
++module_exit(xfrmi_fini);
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("xfrm");
++MODULE_ALIAS_NETDEV("xfrm0");
++MODULE_AUTHOR("Steffen Klassert");
++MODULE_DESCRIPTION("XFRM virtual interface");
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index bd44a800e7db7..3589c2ee3d6fc 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -522,7 +522,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+
+- if (re) {
++ if (re && x->replay_esn && x->preplay_esn) {
+ struct xfrm_replay_state_esn *replay_esn;
+ replay_esn = nla_data(re);
+ memcpy(x->replay_esn, replay_esn,
+@@ -1037,6 +1037,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
+ sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
++
++ /* see addr_match(), (prefix length >> 5) << 2
++ * will be used to compare xfrm_address_t
++ */
++ if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
++ filter->dplen > (sizeof(xfrm_address_t) << 3)) {
++ kfree(filter);
++ return -EINVAL;
++ }
+ }
+
+ if (attrs[XFRMA_PROTO])
+@@ -2574,7 +2583,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 44b3315f32352..d90ead61f0def 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -8,7 +8,7 @@ config IMA
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+- select TCG_TPM if HAS_IOMEM && !UML
++ select TCG_TPM if HAS_IOMEM
+ select TCG_TIS if TCG_TPM && X86
+ select TCG_CRB if TCG_TPM && ACPI
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 45bbc4884ef0f..a127763ae5fbd 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -211,6 +211,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
+ INIT_LIST_HEAD(&card->ctl_files);
+ spin_lock_init(&card->files_lock);
+ INIT_LIST_HEAD(&card->files_list);
++ mutex_init(&card->memory_mutex);
+ #ifdef CONFIG_PM
+ init_waitqueue_head(&card->power_sleep);
+ #endif
+diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
+index 9aea1d6fb0547..b961a30c2a221 100644
+--- a/sound/core/pcm_memory.c
++++ b/sound/core/pcm_memory.c
+@@ -26,6 +26,67 @@ MODULE_PARM_DESC(maximum_substreams, "Maximum substreams with preallocated DMA m
+
+ static const size_t snd_minimum_buffer = 16384;
+
++static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
++module_param(max_alloc_per_card, ulong, 0644);
++MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
++
++static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++ card->total_pcm_alloc_bytes += bytes;
++}
++
++static void update_allocated_size(struct snd_card *card, ssize_t bytes)
++{
++ mutex_lock(&card->memory_mutex);
++ __update_allocated_size(card, bytes);
++ mutex_unlock(&card->memory_mutex);
++}
++
++static void decrease_allocated_size(struct snd_card *card, size_t bytes)
++{
++ mutex_lock(&card->memory_mutex);
++ WARN_ON(card->total_pcm_alloc_bytes < bytes);
++ __update_allocated_size(card, -(ssize_t)bytes);
++ mutex_unlock(&card->memory_mutex);
++}
++
++static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
++ size_t size, struct snd_dma_buffer *dmab)
++{
++ int err;
++
++ /* check and reserve the requested size */
++ mutex_lock(&card->memory_mutex);
++ if (max_alloc_per_card &&
++ card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
++ mutex_unlock(&card->memory_mutex);
++ return -ENOMEM;
++ }
++ __update_allocated_size(card, size);
++ mutex_unlock(&card->memory_mutex);
++
++ err = snd_dma_alloc_pages(type, dev, size, dmab);
++ if (!err) {
++ /* the actual allocation size might be bigger than requested,
++ * and we need to correct the account
++ */
++ if (dmab->bytes != size)
++ update_allocated_size(card, dmab->bytes - size);
++ } else {
++ /* take back on allocation failure */
++ decrease_allocated_size(card, size);
++ }
++ return err;
++}
++
++static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
++{
++ if (!dmab->area)
++ return;
++ decrease_allocated_size(card, dmab->bytes);
++ snd_dma_free_pages(dmab);
++ dmab->area = NULL;
++}
+
+ /*
+ * try to allocate as the large pages as possible.
+@@ -36,16 +97,15 @@ static const size_t snd_minimum_buffer = 16384;
+ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t size)
+ {
+ struct snd_dma_buffer *dmab = &substream->dma_buffer;
++ struct snd_card *card = substream->pcm->card;
+ size_t orig_size = size;
+ int err;
+
+ do {
+- if ((err = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev,
+- size, dmab)) < 0) {
+- if (err != -ENOMEM)
+- return err; /* fatal error */
+- } else
+- return 0;
++ err = do_alloc_pages(card, dmab->dev.type, dmab->dev.dev,
++ size, dmab);
++ if (err != -ENOMEM)
++ return err;
+ size >>= 1;
+ } while (size >= snd_minimum_buffer);
+ dmab->bytes = 0; /* tell error */
+@@ -61,10 +121,7 @@ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t siz
+ */
+ static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream)
+ {
+- if (substream->dma_buffer.area == NULL)
+- return;
+- snd_dma_free_pages(&substream->dma_buffer);
+- substream->dma_buffer.area = NULL;
++ do_free_pages(substream->pcm->card, &substream->dma_buffer);
+ }
+
+ /**
+@@ -129,6 +186,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+ {
+ struct snd_pcm_substream *substream = entry->private_data;
++ struct snd_card *card = substream->pcm->card;
+ char line[64], str[64];
+ size_t size;
+ struct snd_dma_buffer new_dmab;
+@@ -150,9 +208,10 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+ memset(&new_dmab, 0, sizeof(new_dmab));
+ new_dmab.dev = substream->dma_buffer.dev;
+ if (size > 0) {
+- if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
+- substream->dma_buffer.dev.dev,
+- size, &new_dmab) < 0) {
++ if (do_alloc_pages(card,
++ substream->dma_buffer.dev.type,
++ substream->dma_buffer.dev.dev,
++ size, &new_dmab) < 0) {
+ buffer->error = -ENOMEM;
+ goto unlock;
+ }
+@@ -161,7 +220,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
+ substream->buffer_bytes_max = UINT_MAX;
+ }
+ if (substream->dma_buffer.area)
+- snd_dma_free_pages(&substream->dma_buffer);
++ do_free_pages(card, &substream->dma_buffer);
+ substream->dma_buffer = new_dmab;
+ } else {
+ buffer->error = -EINVAL;
+@@ -289,6 +348,7 @@ EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
+ */
+ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
+ {
++ struct snd_card *card = substream->pcm->card;
+ struct snd_pcm_runtime *runtime;
+ struct snd_dma_buffer *dmab = NULL;
+
+@@ -317,9 +377,10 @@ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
+ if (! dmab)
+ return -ENOMEM;
+ dmab->dev = substream->dma_buffer.dev;
+- if (snd_dma_alloc_pages(substream->dma_buffer.dev.type,
+- substream->dma_buffer.dev.dev,
+- size, dmab) < 0) {
++ if (do_alloc_pages(card,
++ substream->dma_buffer.dev.type,
++ substream->dma_buffer.dev.dev,
++ size, dmab) < 0) {
+ kfree(dmab);
+ return -ENOMEM;
+ }
+@@ -348,8 +409,10 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
+ if (runtime->dma_area == NULL)
+ return 0;
+ if (runtime->dma_buffer_p != &substream->dma_buffer) {
++ struct snd_card *card = substream->pcm->card;
++
+ /* it's a newly allocated buffer. release it now. */
+- snd_dma_free_pages(runtime->dma_buffer_p);
++ do_free_pages(card, runtime->dma_buffer_p);
+ kfree(runtime->dma_buffer_p);
+ }
+ snd_pcm_set_runtime_buffer(substream, NULL);
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index 489f996d86bcb..9df0158e89f44 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+ int snd_hdac_keep_power_up(struct hdac_device *codec)
+ {
+ if (!atomic_inc_not_zero(&codec->in_pm)) {
+- int ret = pm_runtime_get_if_in_use(&codec->dev);
++ int ret = pm_runtime_get_if_active(&codec->dev, true);
+ if (!ret)
+ return -1;
+ if (ret < 0)
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index 49780399c2849..a035a7d74ce09 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -596,10 +596,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
+ */
+ void snd_hdac_regmap_sync(struct hdac_device *codec)
+ {
+- if (codec->regmap) {
+- mutex_lock(&codec->regmap_lock);
++ mutex_lock(&codec->regmap_lock);
++ if (codec->regmap)
+ regcache_sync(codec->regmap);
+- mutex_unlock(&codec->regmap_lock);
+- }
++ mutex_unlock(&codec->regmap_lock);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
+diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
+index e053f0d58bdd0..2f3cfcfcdb9a3 100644
+--- a/sound/pci/emu10k1/emufx.c
++++ b/sound/pci/emu10k1/emufx.c
+@@ -1536,14 +1536,8 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ gpr += 2;
+
+ /* Master volume (will be renamed later) */
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS));
+- A_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS));
++ for (z = 0; z < 8; z++)
++ A_OP(icode, &ptr, iMAC0, A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+z+SND_EMU10K1_PLAYBACK_CHANNELS));
+ snd_emu10k1_init_mono_control(&controls[nctl++], "Wave Master Playback Volume", gpr, 0);
+ gpr += 2;
+
+@@ -1627,102 +1621,14 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
+ dev_dbg(emu->card->dev, "emufx.c: gpr=0x%x, tmp=0x%x\n",
+ gpr, tmp);
+ */
+- /* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */
+- /* A_P16VIN(0) is delayed by one sample,
+- * so all other A_P16VIN channels will need to also be delayed
+- */
+- /* Left ADC in. 1 of 2 */
+ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) );
+- /* Right ADC in 1 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- /* Delaying by one sample: instead of copying the input
+- * value A_P16VIN to output A_FXBUS2 as in the first channel,
+- * we use an auxiliary register, delaying the value by one
+- * sample
+- */
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000);
+- /* For 96kHz mode */
+- /* Left ADC in. 2 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000);
+- /* Right ADC in 2 of 2 */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) );
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000);
+- /* Pavel Hofman - we still have voices, A_FXBUS2s, and
+- * A_P16VINs available -
+- * let's add 8 more capture channels - total of 16
+- */
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x10));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x12));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x14));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x16));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x18));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1a));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1c));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe),
+- A_C_00000000, A_C_00000000);
+- gpr_map[gpr++] = 0x00000000;
+- snd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,
+- bit_shifter16,
+- A_GPR(gpr - 1),
+- A_FXBUS2(0x1e));
+- A_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf),
+- A_C_00000000, A_C_00000000);
++ /* A_P16VIN(0) is delayed by one sample, so all other A_P16VIN channels
++ * will need to also be delayed; we use an auxiliary register for that. */
++ for (z = 1; z < 0x10; z++) {
++ snd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr), A_FXBUS2(z * 2) );
++ A_OP(icode, &ptr, iACC3, A_GPR(gpr), A_P16VIN(z), A_C_00000000, A_C_00000000);
++ gpr_map[gpr++] = 0x00000000;
++ }
+ }
+
+ #if 0
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6d8d9fc1da0b0..c0bcbab7b6560 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9877,6 +9877,7 @@ enum {
+ ALC897_FIXUP_HP_HSMIC_VERB,
+ ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ ALC897_FIXUP_HEADSET_MIC_PIN2,
++ ALC897_FIXUP_UNIS_H3C_X500S,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -10316,6 +10317,13 @@ static const struct hda_fixup alc662_fixups[] = {
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
+ },
++ [ALC897_FIXUP_UNIS_H3C_X500S] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
++ {}
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -10477,6 +10485,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
+ {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
+ {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
++ {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
+ {}
+ };
+
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index 68299ce26d3e4..648e0708007e1 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component)
+ struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
++
++ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index f8445231ad782..fdbfaedda4ce8 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -37,6 +37,24 @@ static const struct snd_pcm_hw_constraint_list fsl_sai_rate_constraints = {
+ .list = fsl_sai_rates,
+ };
+
++/**
++ * fsl_sai_dir_is_synced - Check if stream is synced by the opposite stream
++ *
++ * SAI supports synchronous mode using bit/frame clocks of either Transmitter's
++ * or Receiver's for both streams. This function is used to check if clocks of
++ * the stream's are synced by the opposite stream.
++ *
++ * @sai: SAI context
++ * @dir: stream direction
++ */
++static inline bool fsl_sai_dir_is_synced(struct fsl_sai *sai, int dir)
++{
++ int adir = (dir == TX) ? RX : TX;
++
++ /* current dir in async mode while opposite dir in sync mode */
++ return !sai->synchronous[dir] && sai->synchronous[adir];
++}
++
+ static irqreturn_t fsl_sai_isr(int irq, void *devid)
+ {
+ struct fsl_sai *sai = (struct fsl_sai *)devid;
+@@ -523,6 +541,38 @@ static int fsl_sai_hw_free(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
++static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
++{
++ unsigned int ofs = sai->soc_data->reg_offset;
++ bool tx = dir == TX;
++ u32 xcsr, count = 100;
++
++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
++ FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
++
++ /* TERE will remain set till the end of current frame */
++ do {
++ udelay(10);
++ regmap_read(sai->regmap, FSL_SAI_xCSR(tx, ofs), &xcsr);
++ } while (--count && xcsr & FSL_SAI_CSR_TERE);
++
++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
++ FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
++
++ /*
++ * For sai master mode, after several open/close sai,
++ * there will be no frame clock, and can't recover
++ * anymore. Add software reset to fix this issue.
++ * This is a hardware bug, and will be fix in the
++ * next sai version.
++ */
++ if (!sai->is_slave_mode) {
++ /* Software Reset */
++ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
++ /* Clear SR bit to finish the reset */
++ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
++ }
++}
+
+ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+@@ -531,7 +581,9 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+ unsigned int ofs = sai->soc_data->reg_offset;
+
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+- u32 xcsr, count = 100;
++ int adir = tx ? RX : TX;
++ int dir = tx ? TX : RX;
++ u32 xcsr;
+
+ /*
+ * Asynchronous mode: Clear SYNC for both Tx and Rx.
+@@ -554,10 +606,22 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+ FSL_SAI_CSR_FRDE, FSL_SAI_CSR_FRDE);
+
+- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+- FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
+- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+ FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
++ /*
++ * Enable the opposite direction for synchronous mode
++ * 1. Tx sync with Rx: only set RE for Rx; set TE & RE for Tx
++ * 2. Rx sync with Tx: only set TE for Tx; set RE & TE for Rx
++ *
++ * RM recommends to enable RE after TE for case 1 and to enable
++ * TE after RE for case 2, but we here may not always guarantee
++ * that happens: "arecord 1.wav; aplay 2.wav" in case 1 enables
++ * TE after RE, which is against what RM recommends but should
++ * be safe to do, judging by years of testing results.
++ */
++ if (fsl_sai_dir_is_synced(sai, adir))
++ regmap_update_bits(sai->regmap, FSL_SAI_xCSR((!tx), ofs),
++ FSL_SAI_CSR_TERE, FSL_SAI_CSR_TERE);
+
+ regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
+ FSL_SAI_CSR_xIE_MASK, FSL_SAI_FLAGS);
+@@ -572,43 +636,23 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+
+ /* Check if the opposite FRDE is also disabled */
+ regmap_read(sai->regmap, FSL_SAI_xCSR(!tx, ofs), &xcsr);
+- if (!(xcsr & FSL_SAI_CSR_FRDE)) {
+- /* Disable both directions and reset their FIFOs */
+- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
+- FSL_SAI_CSR_TERE, 0);
+- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+- FSL_SAI_CSR_TERE, 0);
+-
+- /* TERE will remain set till the end of current frame */
+- do {
+- udelay(10);
+- regmap_read(sai->regmap,
+- FSL_SAI_xCSR(tx, ofs), &xcsr);
+- } while (--count && xcsr & FSL_SAI_CSR_TERE);
+-
+- regmap_update_bits(sai->regmap, FSL_SAI_TCSR(ofs),
+- FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
+- regmap_update_bits(sai->regmap, FSL_SAI_RCSR(ofs),
+- FSL_SAI_CSR_FR, FSL_SAI_CSR_FR);
+-
+- /*
+- * For sai master mode, after several open/close sai,
+- * there will be no frame clock, and can't recover
+- * anymore. Add software reset to fix this issue.
+- * This is a hardware bug, and will be fix in the
+- * next sai version.
+- */
+- if (!sai->is_slave_mode) {
+- /* Software Reset for both Tx and Rx */
+- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs),
+- FSL_SAI_CSR_SR);
+- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs),
+- FSL_SAI_CSR_SR);
+- /* Clear SR bit to finish the reset */
+- regmap_write(sai->regmap, FSL_SAI_TCSR(ofs), 0);
+- regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
+- }
+- }
++
++ /*
++ * If opposite stream provides clocks for synchronous mode and
++ * it is inactive, disable it before disabling the current one
++ */
++ if (fsl_sai_dir_is_synced(sai, adir) && !(xcsr & FSL_SAI_CSR_FRDE))
++ fsl_sai_config_disable(sai, adir);
++
++ /*
++ * Disable current stream if either of:
++ * 1. current stream doesn't provide clocks for synchronous mode
++ * 2. current stream provides clocks for synchronous mode but no
++ * more stream is active.
++ */
++ if (!fsl_sai_dir_is_synced(sai, dir) || !(xcsr & FSL_SAI_CSR_FRDE))
++ fsl_sai_config_disable(sai, dir);
++
+ break;
+ default:
+ return -EINVAL;
+@@ -766,6 +810,8 @@ static struct reg_default fsl_sai_reg_defaults_ofs8[] = {
+ {FSL_SAI_RCR4(8), 0},
+ {FSL_SAI_RCR5(8), 0},
+ {FSL_SAI_RMR, 0},
++ {FSL_SAI_MCTL, 0},
++ {FSL_SAI_MDIV, 0},
+ };
+
+ static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
+@@ -806,6 +852,18 @@ static bool fsl_sai_readable_reg(struct device *dev, unsigned int reg)
+ case FSL_SAI_RFR6:
+ case FSL_SAI_RFR7:
+ case FSL_SAI_RMR:
++ case FSL_SAI_MCTL:
++ case FSL_SAI_MDIV:
++ case FSL_SAI_VERID:
++ case FSL_SAI_PARAM:
++ case FSL_SAI_TTCTN:
++ case FSL_SAI_RTCTN:
++ case FSL_SAI_TTCTL:
++ case FSL_SAI_TBCTN:
++ case FSL_SAI_TTCAP:
++ case FSL_SAI_RTCTL:
++ case FSL_SAI_RBCTN:
++ case FSL_SAI_RTCAP:
+ return true;
+ default:
+ return false;
+@@ -820,6 +878,10 @@ static bool fsl_sai_volatile_reg(struct device *dev, unsigned int reg)
+ if (reg == FSL_SAI_TCSR(ofs) || reg == FSL_SAI_RCSR(ofs))
+ return true;
+
++ /* Set VERID and PARAM be volatile for reading value in probe */
++ if (ofs == 8 && (reg == FSL_SAI_VERID || reg == FSL_SAI_PARAM))
++ return true;
++
+ switch (reg) {
+ case FSL_SAI_TFR0:
+ case FSL_SAI_TFR1:
+@@ -873,6 +935,10 @@ static bool fsl_sai_writeable_reg(struct device *dev, unsigned int reg)
+ case FSL_SAI_TDR7:
+ case FSL_SAI_TMR:
+ case FSL_SAI_RMR:
++ case FSL_SAI_MCTL:
++ case FSL_SAI_MDIV:
++ case FSL_SAI_TTCTL:
++ case FSL_SAI_RTCTL:
+ return true;
+ default:
+ return false;
+@@ -921,6 +987,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
+
+ if (sai->soc_data->reg_offset == 8) {
+ fsl_sai_regmap_config.reg_defaults = fsl_sai_reg_defaults_ofs8;
++ fsl_sai_regmap_config.max_register = FSL_SAI_MDIV;
+ fsl_sai_regmap_config.num_reg_defaults =
+ ARRAY_SIZE(fsl_sai_reg_defaults_ofs8);
+ }
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index afaef20272342..771990396804c 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -14,6 +14,8 @@
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+ /* SAI Register Map Register */
++#define FSL_SAI_VERID 0x00 /* SAI Version ID Register */
++#define FSL_SAI_PARAM 0x04 /* SAI Parameter Register */
+ #define FSL_SAI_TCSR(ofs) (0x00 + ofs) /* SAI Transmit Control */
+ #define FSL_SAI_TCR1(ofs) (0x04 + ofs) /* SAI Transmit Configuration 1 */
+ #define FSL_SAI_TCR2(ofs) (0x08 + ofs) /* SAI Transmit Configuration 2 */
+@@ -37,6 +39,10 @@
+ #define FSL_SAI_TFR6 0x58 /* SAI Transmit FIFO 6 */
+ #define FSL_SAI_TFR7 0x5C /* SAI Transmit FIFO 7 */
+ #define FSL_SAI_TMR 0x60 /* SAI Transmit Mask */
++#define FSL_SAI_TTCTL 0x70 /* SAI Transmit Timestamp Control Register */
++#define FSL_SAI_TTCTN 0x74 /* SAI Transmit Timestamp Counter Register */
++#define FSL_SAI_TBCTN 0x78 /* SAI Transmit Bit Counter Register */
++#define FSL_SAI_TTCAP 0x7C /* SAI Transmit Timestamp Capture */
+ #define FSL_SAI_RCSR(ofs) (0x80 + ofs) /* SAI Receive Control */
+ #define FSL_SAI_RCR1(ofs) (0x84 + ofs)/* SAI Receive Configuration 1 */
+ #define FSL_SAI_RCR2(ofs) (0x88 + ofs) /* SAI Receive Configuration 2 */
+@@ -60,6 +66,13 @@
+ #define FSL_SAI_RFR6 0xd8 /* SAI Receive FIFO 6 */
+ #define FSL_SAI_RFR7 0xdc /* SAI Receive FIFO 7 */
+ #define FSL_SAI_RMR 0xe0 /* SAI Receive Mask */
++#define FSL_SAI_RTCTL 0xf0 /* SAI Receive Timestamp Control Register */
++#define FSL_SAI_RTCTN 0xf4 /* SAI Receive Timestamp Counter Register */
++#define FSL_SAI_RBCTN 0xf8 /* SAI Receive Bit Counter Register */
++#define FSL_SAI_RTCAP 0xfc /* SAI Receive Timestamp Capture */
++
++#define FSL_SAI_MCTL 0x100 /* SAI MCLK Control Register */
++#define FSL_SAI_MDIV 0x104 /* SAI MCLK Divide Register */
+
+ #define FSL_SAI_xCSR(tx, ofs) (tx ? FSL_SAI_TCSR(ofs) : FSL_SAI_RCSR(ofs))
+ #define FSL_SAI_xCR1(tx, ofs) (tx ? FSL_SAI_TCR1(ofs) : FSL_SAI_RCR1(ofs))
+@@ -73,6 +86,8 @@
+
+ /* SAI Transmit/Receive Control Register */
+ #define FSL_SAI_CSR_TERE BIT(31)
++#define FSL_SAI_CSR_SE BIT(30)
++#define FSL_SAI_CSR_BCE BIT(28)
+ #define FSL_SAI_CSR_FR BIT(25)
+ #define FSL_SAI_CSR_SR BIT(24)
+ #define FSL_SAI_CSR_xF_SHIFT 16
+@@ -106,6 +121,7 @@
+ #define FSL_SAI_CR2_MSEL(ID) ((ID) << 26)
+ #define FSL_SAI_CR2_BCP BIT(25)
+ #define FSL_SAI_CR2_BCD_MSTR BIT(24)
++#define FSL_SAI_CR2_BYP BIT(23) /* BCLK bypass */
+ #define FSL_SAI_CR2_DIV_MASK 0xff
+
+ /* SAI Transmit and Receive Configuration 3 Register */
+@@ -115,6 +131,13 @@
+ #define FSL_SAI_CR3_WDFL_MASK 0x1f
+
+ /* SAI Transmit and Receive Configuration 4 Register */
++
++#define FSL_SAI_CR4_FCONT BIT(28)
++#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
++#define FSL_SAI_CR4_FCOMB_SOFT BIT(27)
++#define FSL_SAI_CR4_FCOMB_MASK (0x3 << 26)
++#define FSL_SAI_CR4_FPACK_8 (0x2 << 24)
++#define FSL_SAI_CR4_FPACK_16 (0x3 << 24)
+ #define FSL_SAI_CR4_FRSZ(x) (((x) - 1) << 16)
+ #define FSL_SAI_CR4_FRSZ_MASK (0x1f << 16)
+ #define FSL_SAI_CR4_SYWD(x) (((x) - 1) << 8)
+@@ -132,6 +155,43 @@
+ #define FSL_SAI_CR5_FBT(x) ((x) << 8)
+ #define FSL_SAI_CR5_FBT_MASK (0x1f << 8)
+
++/* SAI MCLK Control Register */
++#define FSL_SAI_MCTL_MCLK_EN BIT(30) /* MCLK Enable */
++#define FSL_SAI_MCTL_MSEL_MASK (0x3 << 24)
++#define FSL_SAI_MCTL_MSEL(ID) ((ID) << 24)
++#define FSL_SAI_MCTL_MSEL_BUS 0
++#define FSL_SAI_MCTL_MSEL_MCLK1 BIT(24)
++#define FSL_SAI_MCTL_MSEL_MCLK2 BIT(25)
++#define FSL_SAI_MCTL_MSEL_MCLK3 (BIT(24) | BIT(25))
++#define FSL_SAI_MCTL_DIV_EN BIT(23)
++#define FSL_SAI_MCTL_DIV_MASK 0xFF
++
++/* SAI VERID Register */
++#define FSL_SAI_VERID_MAJOR_SHIFT 24
++#define FSL_SAI_VERID_MAJOR_MASK GENMASK(31, 24)
++#define FSL_SAI_VERID_MINOR_SHIFT 16
++#define FSL_SAI_VERID_MINOR_MASK GENMASK(23, 16)
++#define FSL_SAI_VERID_FEATURE_SHIFT 0
++#define FSL_SAI_VERID_FEATURE_MASK GENMASK(15, 0)
++#define FSL_SAI_VERID_EFIFO_EN BIT(0)
++#define FSL_SAI_VERID_TSTMP_EN BIT(1)
++
++/* SAI PARAM Register */
++#define FSL_SAI_PARAM_SPF_SHIFT 16
++#define FSL_SAI_PARAM_SPF_MASK GENMASK(19, 16)
++#define FSL_SAI_PARAM_WPF_SHIFT 8
++#define FSL_SAI_PARAM_WPF_MASK GENMASK(11, 8)
++#define FSL_SAI_PARAM_DLN_MASK GENMASK(3, 0)
++
++/* SAI MCLK Divide Register */
++#define FSL_SAI_MDIV_MASK 0xFFFFF
++
++/* SAI timestamp and bitcounter */
++#define FSL_SAI_xTCTL_TSEN BIT(0)
++#define FSL_SAI_xTCTL_TSINC BIT(1)
++#define FSL_SAI_xTCTL_RTSC BIT(8)
++#define FSL_SAI_xTCTL_RBC BIT(9)
++
+ /* SAI type */
+ #define FSL_SAI_DMA BIT(0)
+ #define FSL_SAI_USE_AC97 BIT(1)
+diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
+index f7e8e9da68a06..981dbaaa6f3b9 100644
+--- a/sound/soc/meson/axg-tdm-formatter.c
++++ b/sound/soc/meson/axg-tdm-formatter.c
+@@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ struct axg_tdm_stream *ts,
+ unsigned int offset)
+ {
+- unsigned int val, ch = ts->channels;
+- unsigned long mask;
+- int i, j;
++ unsigned int ch = ts->channels;
++ u32 val[AXG_TDM_NUM_LANES];
++ int i, j, k;
++
++ /*
++ * We need to mimick the slot distribution used by the HW to keep the
++ * channel placement consistent regardless of the number of channel
++ * in the stream. This is why the odd algorithm below is used.
++ */
++ memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
+
+ /*
+ * Distribute the channels of the stream over the available slots
+- * of each TDM lane
++ * of each TDM lane. We need to go over the 32 slots ...
+ */
+- for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+- val = 0;
+- mask = ts->mask[i];
+-
+- for (j = find_first_bit(&mask, 32);
+- (j < 32) && ch;
+- j = find_next_bit(&mask, 32, j + 1)) {
+- val |= 1 << j;
+- ch -= 1;
++ for (i = 0; (i < 32) && ch; i += 2) {
++ /* ... of all the lanes ... */
++ for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
++ /* ... then distribute the channels in pairs */
++ for (k = 0; k < 2; k++) {
++ if ((BIT(i + k) & ts->mask[j]) && ch) {
++ val[j] |= BIT(i + k);
++ ch -= 1;
++ }
++ }
+ }
+-
+- regmap_write(map, offset, val);
+- offset += regmap_get_reg_stride(map);
+ }
+
+ /*
+@@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ return -EINVAL;
+ }
+
++ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
++ regmap_write(map, offset, val[i]);
++ offset += regmap_get_reg_stride(map);
++ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 06657412c6d8e..96d32766e93c6 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3910,5 +3910,34 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
+ }
+ }
+ },
++{
++ /* Advanced modes of the Mythware XA001AU.
++ * For the standard mode, Mythware XA001AU has ID ffad:a001
++ */
++ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "Mythware",
++ .product_name = "XA001AU",
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_IGNORE_INTERFACE,
++ },
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+
+ #undef USB_DEVICE_VENDOR_SPEC
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+index 472bd023e2a5f..b501b366367f7 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+@@ -72,7 +72,8 @@ test_span_gre_ttl()
+
+ RET=0
+
+- mirror_install $swp1 ingress $tundev "matchall $tcflags"
++ mirror_install $swp1 ingress $tundev \
++ "prot ip flower $tcflags ip_prot icmp"
+ tc filter add dev $h3 ingress pref 77 prot $prot \
+ flower ip_ttl 50 action pass
+
+diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
+index b11d8e6b5bc14..b7cdf75efb5f9 100755
+--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
++++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
+@@ -49,8 +49,8 @@ match_dst_mac_test()
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched on a wrong filter"
+
+- tc_check_packets "dev $h2 ingress" 102 1
+- check_err $? "Did not match on correct filter"
++ tc_check_packets "dev $h2 ingress" 102 0
++ check_fail $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+@@ -75,8 +75,8 @@ match_src_mac_test()
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched on a wrong filter"
+
+- tc_check_packets "dev $h2 ingress" 102 1
+- check_err $? "Did not match on correct filter"
++ tc_check_packets "dev $h2 ingress" 102 0
++ check_fail $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower