summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-10-29 07:19:22 -0400
committerMike Pagano <mpagano@gentoo.org>2020-10-29 07:19:22 -0400
commit041d5eccd7e071fa162d963f85c8ef7cbe2dd595 (patch)
treec833737bfabddf2565d07f6fd8d51ebfaf9f0cac /1072_linux-5.4.73.patch
parentLinux patch 5.4.72 (diff)
downloadlinux-patches-041d5eccd7e071fa162d963f85c8ef7cbe2dd595.tar.gz
linux-patches-041d5eccd7e071fa162d963f85c8ef7cbe2dd595.tar.bz2
linux-patches-041d5eccd7e071fa162d963f85c8ef7cbe2dd595.zip
Linux patch 5.4.735.4-74
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1072_linux-5.4.73.patch')
-rw-r--r--1072_linux-5.4.73.patch13402
1 files changed, 13402 insertions, 0 deletions
diff --git a/1072_linux-5.4.73.patch b/1072_linux-5.4.73.patch
new file mode 100644
index 00000000..4ca07f20
--- /dev/null
+++ b/1072_linux-5.4.73.patch
@@ -0,0 +1,13402 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 13984b6cc3225..988a0d2535b25 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -567,7 +567,7 @@
+ loops can be debugged more effectively on production
+ systems.
+
+- clearcpuid=BITNUM [X86]
++ clearcpuid=BITNUM[,BITNUM...] [X86]
+ Disable CPUID feature X for the kernel. See
+ arch/x86/include/asm/cpufeatures.h for the valid bit
+ numbers. Note the Linux specific bits are not necessarily
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 8d4ad1d1ae26f..8af3771a3ebf2 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -1000,12 +1000,14 @@ icmp_ratelimit - INTEGER
+ icmp_msgs_per_sec - INTEGER
+ Limit maximal number of ICMP packets sent per second from this host.
+ Only messages whose type matches icmp_ratemask (see below) are
+- controlled by this limit.
++ controlled by this limit. For security reasons, the precise count
++ of messages per second is randomized.
+ Default: 1000
+
+ icmp_msgs_burst - INTEGER
+ icmp_msgs_per_sec controls number of ICMP packets sent per second,
+ while icmp_msgs_burst controls the burst size of these packets.
++ For security reasons, the precise burst size is randomized.
+ Default: 50
+
+ icmp_ratemask - INTEGER
+diff --git a/Makefile b/Makefile
+index 8db75cc76ed16..f9a8d76a693eb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 72
++SUBLEVEL = 73
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
+index ce81018345184..6b5c54576f54d 100644
+--- a/arch/arc/plat-hsdk/Kconfig
++++ b/arch/arc/plat-hsdk/Kconfig
+@@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
+ select ARC_HAS_ACCL_REGS
+ select ARC_IRQ_NO_AUTOSAVE
+ select CLK_HSDK
++ select RESET_CONTROLLER
+ select RESET_HSDK
+ select HAVE_PCI
+diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
+index 3a96b5538a2a1..540880f0413fd 100644
+--- a/arch/arm/boot/dts/imx6sl.dtsi
++++ b/arch/arm/boot/dts/imx6sl.dtsi
+@@ -936,8 +936,10 @@
+ };
+
+ rngb: rngb@21b4000 {
++ compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
+ reg = <0x021b4000 0x4000>;
+ interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6SL_CLK_DUMMY>;
+ };
+
+ weim: weim@21b8000 {
+diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
+index db2033f674c67..3efe9d41c2bb6 100644
+--- a/arch/arm/boot/dts/meson8.dtsi
++++ b/arch/arm/boot/dts/meson8.dtsi
+@@ -230,8 +230,6 @@
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/owl-s500.dtsi b/arch/arm/boot/dts/owl-s500.dtsi
+index 5ceb6cc4451d2..1dbe4e8b38ac7 100644
+--- a/arch/arm/boot/dts/owl-s500.dtsi
++++ b/arch/arm/boot/dts/owl-s500.dtsi
+@@ -84,21 +84,21 @@
+ global_timer: timer@b0020200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0xb0020200 0x100>;
+- interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ status = "disabled";
+ };
+
+ twd_timer: timer@b0020600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xb0020600 0x20>;
+- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ status = "disabled";
+ };
+
+ twd_wdt: wdt@b0020620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0xb0020620 0xe0>;
+- interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
++ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+index 42d62d1ba1dc7..ea15073f0c79c 100644
+--- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
++++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts
+@@ -223,16 +223,16 @@
+ };
+
+ &reg_dc1sw {
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-gmac-phy";
+ };
+
+ &reg_dcdc1 {
+ regulator-always-on;
+- regulator-min-microvolt = <3000000>;
+- regulator-max-microvolt = <3000000>;
+- regulator-name = "vcc-3v0";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-name = "vcc-3v3";
+ };
+
+ &reg_dcdc2 {
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 6bc3000deb86e..676cc2a318f41 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -777,6 +777,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
+
+ pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
+ soc_pm.data.pmc = of_iomap(pmc_np, 0);
++ of_node_put(pmc_np);
+ if (!soc_pm.data.pmc) {
+ pr_err("AT91: PM not supported, PMC not found\n");
+ return;
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+index 6f5f89711f256..a92d277f81a08 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ */
+ if (mpuss_can_lose_context) {
+ error = cpu_cluster_pm_enter();
+- if (error)
++ if (error) {
++ omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+ goto cpu_cluster_pm_out;
++ }
+ }
+ }
+
+diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
+index 58c5ef3cf1d7e..2d370f7f75fa2 100644
+--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
++++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
+@@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
+ .dev_id = "s3c2410-sdi",
+ .table = {
+ /* Card detect S3C2410_GPG(10) */
+- GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
+ { },
+ },
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c
+index 74d6b68e91c74..8d9d8e7c71d4c 100644
+--- a/arch/arm/mach-s3c24xx/mach-h1940.c
++++ b/arch/arm/mach-s3c24xx/mach-h1940.c
+@@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
+ .dev_id = "s3c2410-sdi",
+ .table = {
+ /* Card detect S3C2410_GPF(5) */
+- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
+ /* Write protect S3C2410_GPH(8) */
+- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
+ { },
+ },
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
+index 9035f868fb34e..3a5b1124037b2 100644
+--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
++++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
+@@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
+ .dev_id = "s3c2410-sdi",
+ .table = {
+ /* Card detect S3C2410_GPG(8) */
+- GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
+ /* Write protect S3C2410_GPH(8) */
+- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
+ { },
+ },
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
+index d856f23939aff..ffa20f52aa832 100644
+--- a/arch/arm/mach-s3c24xx/mach-n30.c
++++ b/arch/arm/mach-s3c24xx/mach-n30.c
+@@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
+ .dev_id = "s3c2410-sdi",
+ .table = {
+ /* Card detect S3C2410_GPF(1) */
+- GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
+ /* Write protect S3C2410_GPG(10) */
+- GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
+ { },
+ },
+ };
+diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
+index 29f9b345a5311..534e9c1d8161f 100644
+--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
++++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
+@@ -567,9 +567,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
+ .dev_id = "s3c2410-sdi",
+ .table = {
+ /* Card detect S3C2410_GPF(5) */
+- GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
+ /* Write protect S3C2410_GPH(8) */
+- GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
++ GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
+ { },
+ },
+ };
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index 12c26eb88afbc..43d91bfd23600 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
+
+ ret = of_property_read_u32(np, "prefetch-data", &val);
+ if (ret == 0) {
+- if (val)
++ if (val) {
+ prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
+- else
++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
++ } else {
+ prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
++ }
++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF prefetch-data property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "prefetch-instr", &val);
+ if (ret == 0) {
+- if (val)
++ if (val) {
+ prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
+- else
++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
++ } else {
+ prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
++ }
++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF prefetch-instr property value is missing\n");
+ }
+diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi
+index 2006ad5424fa6..f8eb72bb41254 100644
+--- a/arch/arm64/boot/dts/actions/s700.dtsi
++++ b/arch/arm64/boot/dts/actions/s700.dtsi
+@@ -231,7 +231,7 @@
+
+ pinctrl: pinctrl@e01b0000 {
+ compatible = "actions,s700-pinctrl";
+- reg = <0x0 0xe01b0000 0x0 0x1000>;
++ reg = <0x0 0xe01b0000 0x0 0x100>;
+ clocks = <&cmu CLK_GPIO>;
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 136>;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+index eaf8f83794fd9..8466d44ee0b15 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+@@ -155,8 +155,7 @@
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
++ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp",
+ "gpmmu",
+ "pp",
+@@ -167,8 +166,7 @@
+ "pp2",
+ "ppmmu2",
+ "pp3",
+- "ppmmu3",
+- "pmu";
++ "ppmmu3";
+ clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
+ clock-names = "bus", "core";
+ resets = <&ccu RST_BUS_GPU>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+index 8647da7d6609b..f6694aad84db3 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+@@ -43,13 +43,13 @@
+
+ white {
+ label = "vim3:white:sys";
+- gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
++ gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ red {
+ label = "vim3:red";
+- gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
++ gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index bc8540f879654..f1011bcd5ed5a 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -516,6 +516,7 @@
+ gpc: gpc@303a0000 {
+ compatible = "fsl,imx8mq-gpc";
+ reg = <0x303a0000 0x10000>;
++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 340da154d4e37..d95273af9f1e4 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -175,14 +175,14 @@
+ };
+
+ thermal-zones {
+- cpu0_1-thermal {
++ cpu0-1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 4>;
+
+ trips {
+- cpu0_1_alert0: trip-point@0 {
++ cpu0_1_alert0: trip-point0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+@@ -205,7 +205,7 @@
+ };
+ };
+
+- cpu2_3-thermal {
++ cpu2-3-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+@@ -934,7 +934,7 @@
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+- interrupts = <0 0>;
++ interrupts = <0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+@@ -966,7 +966,7 @@
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+- interrupts = <4 0>;
++ interrupts = <4>;
+
+ assigned-clocks = <&gcc BYTE0_CLK_SRC>,
+ <&gcc PCLK0_CLK_SRC>;
+diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
+index 9dd2df1cbf47d..40df4d95a47ac 100644
+--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
+@@ -113,7 +113,7 @@
+
+ wcd_codec: codec@f000 {
+ compatible = "qcom,pm8916-wcd-analog-codec";
+- reg = <0xf000 0x200>;
++ reg = <0xf000>;
+ reg-names = "pmic-codec-core";
+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "mclk";
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+index a1c2de90e4706..73ded80a79ba0 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+@@ -1212,9 +1212,8 @@
+ reg = <0 0xe6ea0000 0 0x0064>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 210>;
+- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+- <&dmac2 0x43>, <&dmac2 0x42>;
+- dma-names = "tx", "rx", "tx", "rx";
++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
++ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 210>;
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+index 455954c3d98ea..dabee157119f9 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+@@ -1168,9 +1168,8 @@
+ reg = <0 0xe6ea0000 0 0x0064>;
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 210>;
+- dmas = <&dmac1 0x43>, <&dmac1 0x42>,
+- <&dmac2 0x43>, <&dmac2 0x42>;
+- dma-names = "tx", "rx", "tx", "rx";
++ dmas = <&dmac0 0x43>, <&dmac0 0x42>;
++ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 210>;
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+index 9aa67340a4d8c..a2645262f8623 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+@@ -419,7 +419,7 @@
+ };
+
+ i2c0: i2c@ff020000 {
+- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
++ compatible = "cdns,i2c-r1p14";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 17 4>;
+@@ -429,7 +429,7 @@
+ };
+
+ i2c1: i2c@ff030000 {
+- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
++ compatible = "cdns,i2c-r1p14";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 18 4>;
+diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+index 3f9ae3585ab98..80c9534148821 100644
+--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
++++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
+@@ -13,20 +13,19 @@
+ */
+ #define MAX_EA_BITS_PER_CONTEXT 46
+
+-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
+
+ /*
+- * Our page table limit us to 64TB. Hence for the kernel mapping,
+- * each MAP area is limited to 16 TB.
+- * The four map areas are: linear mapping, vmap, IO and vmemmap
++ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
++ * of vmemmap space. To better support sparse memory layout, we use 61TB
++ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
+ */
++#define REGION_SHIFT (40)
+ #define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
+
+ /*
+- * Define the address range of the kernel non-linear virtual area
+- * 16TB
++ * Define the address range of the kernel non-linear virtual area (61TB)
+ */
+-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
++#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
+
+ #ifndef __ASSEMBLY__
+ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
+diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
+index 28c3d936fdf32..bea7a2405ba5d 100644
+--- a/arch/powerpc/include/asm/drmem.h
++++ b/arch/powerpc/include/asm/drmem.h
+@@ -8,14 +8,13 @@
+ #ifndef _ASM_POWERPC_LMB_H
+ #define _ASM_POWERPC_LMB_H
+
++#include <linux/sched.h>
++
+ struct drmem_lmb {
+ u64 base_addr;
+ u32 drc_index;
+ u32 aa_index;
+ u32 flags;
+-#ifdef CONFIG_MEMORY_HOTPLUG
+- int nid;
+-#endif
+ };
+
+ struct drmem_lmb_info {
+@@ -26,8 +25,22 @@ struct drmem_lmb_info {
+
+ extern struct drmem_lmb_info *drmem_info;
+
++static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
++ const struct drmem_lmb *start)
++{
++ /*
++ * DLPAR code paths can take several milliseconds per element
++ * when interacting with firmware. Ensure that we don't
++ * unfairly monopolize the CPU.
++ */
++ if (((++lmb - start) % 16) == 0)
++ cond_resched();
++
++ return lmb;
++}
++
+ #define for_each_drmem_lmb_in_range(lmb, start, end) \
+- for ((lmb) = (start); (lmb) < (end); (lmb)++)
++ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
+
+ #define for_each_drmem_lmb(lmb) \
+ for_each_drmem_lmb_in_range((lmb), \
+@@ -103,22 +116,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
+ lmb->aa_index = 0xffffffff;
+ }
+
+-#ifdef CONFIG_MEMORY_HOTPLUG
+-static inline void lmb_set_nid(struct drmem_lmb *lmb)
+-{
+- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
+-}
+-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+-{
+- lmb->nid = -1;
+-}
+-#else
+-static inline void lmb_set_nid(struct drmem_lmb *lmb)
+-{
+-}
+-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+-{
+-}
+-#endif
+-
+ #endif /* _ASM_POWERPC_LMB_H */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index b3cbb1136bce0..34d08ff21b988 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -796,7 +796,7 @@
+ #define THRM1_TIN (1 << 31)
+ #define THRM1_TIV (1 << 30)
+ #define THRM1_THRES(x) ((x&0x7f)<<23)
+-#define THRM3_SITV(x) ((x&0x3fff)<<1)
++#define THRM3_SITV(x) ((x & 0x1fff) << 1)
+ #define THRM1_TID (1<<2)
+ #define THRM1_TIE (1<<1)
+ #define THRM1_V (1<<0)
+diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
+index 7f3a8b9023254..02a1c18cdba3d 100644
+--- a/arch/powerpc/include/asm/tlb.h
++++ b/arch/powerpc/include/asm/tlb.h
+@@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
+ return false;
+ return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+ }
+-static inline void mm_reset_thread_local(struct mm_struct *mm)
+-{
+- WARN_ON(atomic_read(&mm->context.copros) > 0);
+- /*
+- * It's possible for mm_access to take a reference on mm_users to
+- * access the remote mm from another thread, but it's not allowed
+- * to set mm_cpumask, so mm_users may be > 1 here.
+- */
+- WARN_ON(current->mm != mm);
+- atomic_set(&mm->context.active_cpus, 1);
+- cpumask_clear(mm_cpumask(mm));
+- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+-}
+ #else /* CONFIG_PPC_BOOK3S_64 */
+ static inline int mm_is_thread_local(struct mm_struct *mm)
+ {
+diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
+index e2ab8a111b693..0b4694b8d2482 100644
+--- a/arch/powerpc/kernel/tau_6xx.c
++++ b/arch/powerpc/kernel/tau_6xx.c
+@@ -13,13 +13,14 @@
+ */
+
+ #include <linux/errno.h>
+-#include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/param.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/workqueue.h>
+
+ #include <asm/io.h>
+ #include <asm/reg.h>
+@@ -39,9 +40,7 @@ static struct tau_temp
+ unsigned char grew;
+ } tau[NR_CPUS];
+
+-struct timer_list tau_timer;
+-
+-#undef DEBUG
++static bool tau_int_enable;
+
+ /* TODO: put these in a /proc interface, with some sanity checks, and maybe
+ * dynamic adjustment to minimize # of interrupts */
+@@ -50,72 +49,49 @@ struct timer_list tau_timer;
+ #define step_size 2 /* step size when temp goes out of range */
+ #define window_expand 1 /* expand the window by this much */
+ /* configurable values for shrinking the window */
+-#define shrink_timer 2*HZ /* period between shrinking the window */
++#define shrink_timer 2000 /* period between shrinking the window */
+ #define min_window 2 /* minimum window size, degrees C */
+
+ static void set_thresholds(unsigned long cpu)
+ {
+-#ifdef CONFIG_TAU_INT
+- /*
+- * setup THRM1,
+- * threshold, valid bit, enable interrupts, interrupt when below threshold
+- */
+- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
++ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
+
+- /* setup THRM2,
+- * threshold, valid bit, enable interrupts, interrupt when above threshold
+- */
+- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
+-#else
+- /* same thing but don't enable interrupts */
+- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
+- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
+-#endif
++ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
++ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
++
++ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
++ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
+ }
+
+ static void TAUupdate(int cpu)
+ {
+- unsigned thrm;
+-
+-#ifdef DEBUG
+- printk("TAUupdate ");
+-#endif
++ u32 thrm;
++ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
+
+ /* if both thresholds are crossed, the step_sizes cancel out
+ * and the window winds up getting expanded twice. */
+- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
+- if(thrm & THRM1_TIN){ /* crossed low threshold */
+- if (tau[cpu].low >= step_size){
+- tau[cpu].low -= step_size;
+- tau[cpu].high -= (step_size - window_expand);
+- }
+- tau[cpu].grew = 1;
+-#ifdef DEBUG
+- printk("low threshold crossed ");
+-#endif
++ thrm = mfspr(SPRN_THRM1);
++ if ((thrm & bits) == bits) {
++ mtspr(SPRN_THRM1, 0);
++
++ if (tau[cpu].low >= step_size) {
++ tau[cpu].low -= step_size;
++ tau[cpu].high -= (step_size - window_expand);
+ }
++ tau[cpu].grew = 1;
++ pr_debug("%s: low threshold crossed\n", __func__);
+ }
+- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
+- if(thrm & THRM1_TIN){ /* crossed high threshold */
+- if (tau[cpu].high <= 127-step_size){
+- tau[cpu].low += (step_size - window_expand);
+- tau[cpu].high += step_size;
+- }
+- tau[cpu].grew = 1;
+-#ifdef DEBUG
+- printk("high threshold crossed ");
+-#endif
++ thrm = mfspr(SPRN_THRM2);
++ if ((thrm & bits) == bits) {
++ mtspr(SPRN_THRM2, 0);
++
++ if (tau[cpu].high <= 127 - step_size) {
++ tau[cpu].low += (step_size - window_expand);
++ tau[cpu].high += step_size;
+ }
++ tau[cpu].grew = 1;
++ pr_debug("%s: high threshold crossed\n", __func__);
+ }
+-
+-#ifdef DEBUG
+- printk("grew = %d\n", tau[cpu].grew);
+-#endif
+-
+-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
+- set_thresholds(cpu);
+-#endif
+-
+ }
+
+ #ifdef CONFIG_TAU_INT
+@@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
+ static void tau_timeout(void * info)
+ {
+ int cpu;
+- unsigned long flags;
+ int size;
+ int shrink;
+
+- /* disabling interrupts *should* be okay */
+- local_irq_save(flags);
+ cpu = smp_processor_id();
+
+-#ifndef CONFIG_TAU_INT
+- TAUupdate(cpu);
+-#endif
++ if (!tau_int_enable)
++ TAUupdate(cpu);
++
++ /* Stop thermal sensor comparisons and interrupts */
++ mtspr(SPRN_THRM3, 0);
+
+ size = tau[cpu].high - tau[cpu].low;
+ if (size > min_window && ! tau[cpu].grew) {
+@@ -173,32 +148,26 @@ static void tau_timeout(void * info)
+
+ set_thresholds(cpu);
+
+- /*
+- * Do the enable every time, since otherwise a bunch of (relatively)
+- * complex sleep code needs to be added. One mtspr every time
+- * tau_timeout is called is probably not a big deal.
+- *
+- * Enable thermal sensor and set up sample interval timer
+- * need 20 us to do the compare.. until a nice 'cpu_speed' function
+- * call is implemented, just assume a 500 mhz clock. It doesn't really
+- * matter if we take too long for a compare since it's all interrupt
+- * driven anyway.
+- *
+- * use a extra long time.. (60 us @ 500 mhz)
++ /* Restart thermal sensor comparisons and interrupts.
++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
++ * recommends that "the maximum value be set in THRM3 under all
++ * conditions."
+ */
+- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
+-
+- local_irq_restore(flags);
++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
+ }
+
+-static void tau_timeout_smp(struct timer_list *unused)
+-{
++static struct workqueue_struct *tau_workq;
+
+- /* schedule ourselves to be run again */
+- mod_timer(&tau_timer, jiffies + shrink_timer) ;
++static void tau_work_func(struct work_struct *work)
++{
++ msleep(shrink_timer);
+ on_each_cpu(tau_timeout, NULL, 0);
++ /* schedule ourselves to be run again */
++ queue_work(tau_workq, work);
+ }
+
++DECLARE_WORK(tau_work, tau_work_func);
++
+ /*
+ * setup the TAU
+ *
+@@ -231,21 +200,19 @@ static int __init TAU_init(void)
+ return 1;
+ }
+
++ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
++ !strcmp(cur_cpu_spec->platform, "ppc750");
+
+- /* first, set up the window shrinking timer */
+- timer_setup(&tau_timer, tau_timeout_smp, 0);
+- tau_timer.expires = jiffies + shrink_timer;
+- add_timer(&tau_timer);
++ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
++ if (!tau_workq)
++ return -ENOMEM;
+
+ on_each_cpu(TAU_init_smp, NULL, 0);
+
+- printk("Thermal assist unit ");
+-#ifdef CONFIG_TAU_INT
+- printk("using interrupts, ");
+-#else
+- printk("using timers, ");
+-#endif
+- printk("shrink_timer: %d jiffies\n", shrink_timer);
++ queue_work(tau_workq, &tau_work);
++
++ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
++ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
+ tau_initialized = 1;
+
+ return 0;
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 67af871190c6d..b0f240afffa22 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -639,19 +639,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
+ struct mm_struct *mm = arg;
+ unsigned long pid = mm->context.id;
+
++ /*
++ * A kthread could have done a mmget_not_zero() after the flushing CPU
++ * checked mm_is_singlethreaded, and be in the process of
++ * kthread_use_mm when interrupted here. In that case, current->mm will
++ * be set to mm, because kthread_use_mm() setting ->mm and switching to
++ * the mm is done with interrupts off.
++ */
+ if (current->mm == mm)
+- return; /* Local CPU */
++ goto out_flush;
+
+ if (current->active_mm == mm) {
+- /*
+- * Must be a kernel thread because sender is single-threaded.
+- */
+- BUG_ON(current->mm);
++ WARN_ON_ONCE(current->mm != NULL);
++ /* Is a kernel thread and is using mm as the lazy tlb */
+ mmgrab(&init_mm);
+- switch_mm(mm, &init_mm, current);
+ current->active_mm = &init_mm;
++ switch_mm_irqs_off(mm, &init_mm, current);
+ mmdrop(mm);
+ }
++
++ atomic_dec(&mm->context.active_cpus);
++ cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
++
++out_flush:
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ }
+
+@@ -666,7 +676,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
+ */
+ smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
+ (void *)mm, 1);
+- mm_reset_thread_local(mm);
+ }
+
+ void radix__flush_tlb_mm(struct mm_struct *mm)
+diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
+index 59327cefbc6a6..873fcfc7b8756 100644
+--- a/arch/powerpc/mm/drmem.c
++++ b/arch/powerpc/mm/drmem.c
+@@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
+ if (!drmem_info->lmbs)
+ return;
+
+- for_each_drmem_lmb(lmb) {
++ for_each_drmem_lmb(lmb)
+ read_drconf_v1_cell(lmb, &prop);
+- lmb_set_nid(lmb);
+- }
+ }
+
+ static void __init init_drmem_v2_lmbs(const __be32 *prop)
+@@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
+
+ lmb->aa_index = dr_cell.aa_index;
+ lmb->flags = dr_cell.flags;
+-
+- lmb_set_nid(lmb);
+ }
+ }
+ }
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
+index e608f9db12ddc..8965b4463d433 100644
+--- a/arch/powerpc/perf/hv-gpci-requests.h
++++ b/arch/powerpc/perf/hv-gpci-requests.h
+@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
+
+ #define REQUEST_NAME system_performance_capabilities
+ #define REQUEST_NUM 0x40
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__field(0, 1, perf_collect_privileged)
+ __field(0x1, 1, capability_mask)
+@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
+
+ #define REQUEST_NAME system_hypervisor_times
+ #define REQUEST_NUM 0xF0
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
+ __count(0x8, 8, time_spent_processing_virtual_processor_timers)
+@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
+
+ #define REQUEST_NAME system_tlbie_count_and_time
+ #define REQUEST_NUM 0xF4
+-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
++#define REQUEST_IDX_KIND "starting_index=0xffffffff"
+ #include I(REQUEST_BEGIN)
+ REQUEST(__count(0, 8, tlbie_instructions_issued)
+ /*
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index 4c86da5eb28ab..0b5c8f4fbdbfd 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+
+ mask |= CNST_PMC_MASK(pmc);
+ value |= CNST_PMC_VAL(pmc);
++
++ /*
++ * PMC5 and PMC6 are used to count cycles and instructions and
++ * they do not support most of the constraint bits. Add a check
++ * to exclude PMC5/6 from most of the constraints except for
++ * EBB/BHRB.
++ */
++ if (pmc >= 5)
++ goto ebb_bhrb;
+ }
+
+ if (pmc <= 4) {
+@@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+ }
+ }
+
++ebb_bhrb:
+ if (!pmc && ebb)
+ /* EBB events must specify the PMC */
+ return -1;
+diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
+index d82e3664ffdf8..18792a5b003a0 100644
+--- a/arch/powerpc/platforms/Kconfig
++++ b/arch/powerpc/platforms/Kconfig
+@@ -219,12 +219,11 @@ config TAU
+ temperature within 2-4 degrees Celsius. This option shows the current
+ on-die temperature in /proc/cpuinfo if the cpu supports it.
+
+- Unfortunately, on some chip revisions, this sensor is very inaccurate
+- and in many cases, does not work at all, so don't assume the cpu
+- temp is actually what /proc/cpuinfo says it is.
++ Unfortunately, this sensor is very inaccurate when uncalibrated, so
++ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
+
+ config TAU_INT
+- bool "Interrupt driven TAU driver (DANGEROUS)"
++ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
+ depends on TAU
+ ---help---
+ The TAU supports an interrupt driven mode which causes an interrupt
+@@ -232,12 +231,7 @@ config TAU_INT
+ to get notified the temp has exceeded a range. With this option off,
+ a timer is used to re-check the temperature periodically.
+
+- However, on some cpus it appears that the TAU interrupt hardware
+- is buggy and can cause a situation which would lead unexplained hard
+- lockups.
+-
+- Unless you are extending the TAU driver, or enjoy kernel/hardware
+- debugging, leave this option off.
++ If in doubt, say N here.
+
+ config TAU_AVERAGE
+ bool "Average high and low temp"
+diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
+index 543c816fa99ef..0e6693bacb7e7 100644
+--- a/arch/powerpc/platforms/powernv/opal-dump.c
++++ b/arch/powerpc/platforms/powernv/opal-dump.c
+@@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
+ return count;
+ }
+
+-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
+- uint32_t type)
++static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
+ {
+ struct dump_obj *dump;
+ int rc;
+
+ dump = kzalloc(sizeof(*dump), GFP_KERNEL);
+ if (!dump)
+- return NULL;
++ return;
+
+ dump->kobj.kset = dump_kset;
+
+@@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
+ rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
+ if (rc) {
+ kobject_put(&dump->kobj);
+- return NULL;
++ return;
+ }
+
++ /*
++ * As soon as the sysfs file for this dump is created/activated there is
++ * a chance the opal_errd daemon (or any userspace) might read and
++ * acknowledge the dump before kobject_uevent() is called. If that
++ * happens then there is a potential race between
++ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
++ * use-after-free of a kernfs object resulting in a kernel crash.
++ *
++ * To avoid that, we need to take a reference on behalf of the bin file,
++ * so that our reference remains valid while we call kobject_uevent().
++ * We then drop our reference before exiting the function, leaving the
++ * bin file to drop the last reference (if it hasn't already).
++ */
++
++ /* Take a reference for the bin file */
++ kobject_get(&dump->kobj);
+ rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
+- if (rc) {
++ if (rc == 0) {
++ kobject_uevent(&dump->kobj, KOBJ_ADD);
++
++ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
++ __func__, dump->id, dump->size);
++ } else {
++ /* Drop reference count taken for bin file */
+ kobject_put(&dump->kobj);
+- return NULL;
+ }
+
+- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
+- __func__, dump->id, dump->size);
+-
+- kobject_uevent(&dump->kobj, KOBJ_ADD);
+-
+- return dump;
++ /* Drop our reference */
++ kobject_put(&dump->kobj);
++ return;
+ }
+
+ static irqreturn_t process_dump(int irq, void *data)
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index e7d23a933a0d3..66b32f46702de 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -376,25 +376,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
+
+ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
+ {
++ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int rc;
+
+ if (!lmb_is_removable(lmb))
+ return -EINVAL;
+
++ mem_block = lmb_to_memblock(lmb);
++ if (mem_block == NULL)
++ return -EINVAL;
++
+ rc = dlpar_offline_lmb(lmb);
+- if (rc)
++ if (rc) {
++ put_device(&mem_block->dev);
+ return rc;
++ }
+
+ block_sz = pseries_memory_block_size();
+
+- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
++ __remove_memory(mem_block->nid, lmb->base_addr, block_sz);
++ put_device(&mem_block->dev);
+
+ /* Update memory regions for memory remove */
+ memblock_remove(lmb->base_addr, block_sz);
+
+ invalidate_lmb_associativity_index(lmb);
+- lmb_clear_nid(lmb);
+ lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+
+ return 0;
+@@ -651,7 +658,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
+ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+ {
+ unsigned long block_sz;
+- int rc;
++ int nid, rc;
+
+ if (lmb->flags & DRCONF_MEM_ASSIGNED)
+ return -EINVAL;
+@@ -662,11 +669,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+ return rc;
+ }
+
+- lmb_set_nid(lmb);
+ block_sz = memory_block_size_bytes();
+
++ /* Find the node id for this address. */
++ nid = memory_add_physaddr_to_nid(lmb->base_addr);
++
+ /* Add the memory */
+- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
++ rc = __add_memory(nid, lmb->base_addr, block_sz);
+ if (rc) {
+ invalidate_lmb_associativity_index(lmb);
+ return rc;
+@@ -674,9 +683,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
+
+ rc = dlpar_online_lmb(lmb);
+ if (rc) {
+- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
++ __remove_memory(nid, lmb->base_addr, block_sz);
+ invalidate_lmb_associativity_index(lmb);
+- lmb_clear_nid(lmb);
+ } else {
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
+ }
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index b3c4848869e52..b658fa627a34b 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -494,18 +494,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
+ return 0; /* need to perform reset */
+ }
+
++static int mce_handle_err_realmode(int disposition, u8 error_type)
++{
++#ifdef CONFIG_PPC_BOOK3S_64
++ if (disposition == RTAS_DISP_NOT_RECOVERED) {
++ switch (error_type) {
++ case MC_ERROR_TYPE_SLB:
++ case MC_ERROR_TYPE_ERAT:
++ /*
++ * Store the old slb content in paca before flushing.
++ * Print this when we go to virtual mode.
++ * There are chances that we may hit MCE again if there
++ * is a parity error on the SLB entry we trying to read
++ * for saving. Hence limit the slb saving to single
++ * level of recursion.
++ */
++ if (local_paca->in_mce == 1)
++ slb_save_contents(local_paca->mce_faulty_slbs);
++ flush_and_reload_slb();
++ disposition = RTAS_DISP_FULLY_RECOVERED;
++ break;
++ default:
++ break;
++ }
++ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
++ /* Platform corrected itself but could be degraded */
++ pr_err("MCE: limited recovery, system may be degraded\n");
++ disposition = RTAS_DISP_FULLY_RECOVERED;
++ }
++#endif
++ return disposition;
++}
+
+-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
++static int mce_handle_err_virtmode(struct pt_regs *regs,
++ struct rtas_error_log *errp,
++ struct pseries_mc_errorlog *mce_log,
++ int disposition)
+ {
+ struct mce_error_info mce_err = { 0 };
+- unsigned long eaddr = 0, paddr = 0;
+- struct pseries_errorlog *pseries_log;
+- struct pseries_mc_errorlog *mce_log;
+- int disposition = rtas_error_disposition(errp);
+ int initiator = rtas_error_initiator(errp);
+ int severity = rtas_error_severity(errp);
++ unsigned long eaddr = 0, paddr = 0;
+ u8 error_type, err_sub_type;
+
++ if (!mce_log)
++ goto out;
++
++ error_type = mce_log->error_type;
++ err_sub_type = rtas_mc_error_sub_type(mce_log);
++
+ if (initiator == RTAS_INITIATOR_UNKNOWN)
+ mce_err.initiator = MCE_INITIATOR_UNKNOWN;
+ else if (initiator == RTAS_INITIATOR_CPU)
+@@ -544,18 +581,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+ mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
+ mce_err.error_class = MCE_ECLASS_UNKNOWN;
+
+- if (!rtas_error_extended(errp))
+- goto out;
+-
+- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+- if (pseries_log == NULL)
+- goto out;
+-
+- mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+- error_type = mce_log->error_type;
+- err_sub_type = rtas_mc_error_sub_type(mce_log);
+-
+- switch (mce_log->error_type) {
++ switch (error_type) {
+ case MC_ERROR_TYPE_UE:
+ mce_err.error_type = MCE_ERROR_TYPE_UE;
+ switch (err_sub_type) {
+@@ -652,37 +678,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+ mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
+ break;
+ }
++out:
++ save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
++ &mce_err, regs->nip, eaddr, paddr);
++ return disposition;
++}
+
+-#ifdef CONFIG_PPC_BOOK3S_64
+- if (disposition == RTAS_DISP_NOT_RECOVERED) {
+- switch (error_type) {
+- case MC_ERROR_TYPE_SLB:
+- case MC_ERROR_TYPE_ERAT:
+- /*
+- * Store the old slb content in paca before flushing.
+- * Print this when we go to virtual mode.
+- * There are chances that we may hit MCE again if there
+- * is a parity error on the SLB entry we trying to read
+- * for saving. Hence limit the slb saving to single
+- * level of recursion.
+- */
+- if (local_paca->in_mce == 1)
+- slb_save_contents(local_paca->mce_faulty_slbs);
+- flush_and_reload_slb();
+- disposition = RTAS_DISP_FULLY_RECOVERED;
+- break;
+- default:
+- break;
+- }
+- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
+- /* Platform corrected itself but could be degraded */
+- printk(KERN_ERR "MCE: limited recovery, system may "
+- "be degraded\n");
+- disposition = RTAS_DISP_FULLY_RECOVERED;
+- }
+-#endif
++static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
++{
++ struct pseries_errorlog *pseries_log;
++ struct pseries_mc_errorlog *mce_log = NULL;
++ int disposition = rtas_error_disposition(errp);
++ u8 error_type;
++
++ if (!rtas_error_extended(errp))
++ goto out;
++
++ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
++ if (!pseries_log)
++ goto out;
++
++ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
++ error_type = mce_log->error_type;
++
++ disposition = mce_handle_err_realmode(disposition, error_type);
+
+-out:
+ /*
+ * Enable translation as we will be accessing per-cpu variables
+ * in save_mce_event() which may fall outside RMO region, also
+@@ -693,10 +713,10 @@ out:
+ * Note: All the realmode handling like flushing SLB entries for
+ * SLB multihit is done by now.
+ */
++out:
+ mtmsr(mfmsr() | MSR_IR | MSR_DR);
+- save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
+- &mce_err, regs->nip, eaddr, paddr);
+-
++ disposition = mce_handle_err_virtmode(regs, errp, mce_log,
++ disposition);
+ return disposition;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
+index bbb97169bf63e..6268545947b83 100644
+--- a/arch/powerpc/platforms/pseries/rng.c
++++ b/arch/powerpc/platforms/pseries/rng.c
+@@ -36,6 +36,7 @@ static __init int rng_init(void)
+
+ ppc_md.get_random_seed = pseries_get_random_long;
+
++ of_node_put(dn);
+ return 0;
+ }
+ machine_subsys_initcall(pseries, rng_init);
+diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
+index ad8117148ea3b..21b9d1bf39ff6 100644
+--- a/arch/powerpc/sysdev/xics/icp-hv.c
++++ b/arch/powerpc/sysdev/xics/icp-hv.c
+@@ -174,6 +174,7 @@ int icp_hv_init(void)
+
+ icp_ops = &icp_hv_ops;
+
++ of_node_put(np);
+ return 0;
+ }
+
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index c8862696a47b9..7d0394f4ebf97 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -5,15 +5,6 @@
+ #include "pgtable.h"
+ #include "../string.h"
+
+-/*
+- * __force_order is used by special_insns.h asm code to force instruction
+- * serialization.
+- *
+- * It is not referenced from the code, but GCC < 5 with -fPIE would fail
+- * due to an undefined symbol. Define it to make these ancient GCCs work.
+- */
+-unsigned long __force_order;
+-
+ #define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
+ #define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
+
+diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
+index fb616203ce427..be50ef8572cce 100644
+--- a/arch/x86/events/amd/iommu.c
++++ b/arch/x86/events/amd/iommu.c
+@@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
+ while (amd_iommu_v2_event_descs[i].attr.attr.name)
+ i++;
+
+- attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
++ attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index e5ad97a823426..1aaba2c8a9ba6 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -669,9 +669,7 @@ unlock:
+
+ static inline void intel_pmu_drain_pebs_buffer(void)
+ {
+- struct pt_regs regs;
+-
+- x86_pmu.drain_pebs(&regs);
++ x86_pmu.drain_pebs(NULL);
+ }
+
+ /*
+@@ -1736,6 +1734,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ struct x86_perf_regs perf_regs;
+ struct pt_regs *regs = &perf_regs.regs;
+ void *at = get_next_pebs_record_by_bit(base, top, bit);
++ struct pt_regs dummy_iregs;
+
+ if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+ /*
+@@ -1748,6 +1747,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ } else if (!intel_pmu_save_and_restart(event))
+ return;
+
++ if (!iregs)
++ iregs = &dummy_iregs;
++
+ while (count > 1) {
+ setup_sample(event, iregs, at, &data, regs);
+ perf_event_output(event, &data, regs);
+@@ -1757,16 +1759,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
+ }
+
+ setup_sample(event, iregs, at, &data, regs);
+-
+- /*
+- * All but the last records are processed.
+- * The last one is left to be able to call the overflow handler.
+- */
+- if (perf_event_overflow(event, &data, regs)) {
+- x86_pmu_stop(event, 0);
+- return;
++ if (iregs == &dummy_iregs) {
++ /*
++ * The PEBS records may be drained in the non-overflow context,
++ * e.g., large PEBS + context switch. Perf should treat the
++ * last record the same as other PEBS records, and doesn't
++ * invoke the generic overflow handler.
++ */
++ perf_event_output(event, &data, regs);
++ } else {
++ /*
++ * All but the last records are processed.
++ * The last one is left to be able to call the overflow handler.
++ */
++ if (perf_event_overflow(event, &data, regs))
++ x86_pmu_stop(event, 0);
+ }
+-
+ }
+
+ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index c37cb12d0ef68..aec6e63c6a04a 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -110,6 +110,10 @@
+ #define ICL_UNC_CBO_0_PER_CTR0 0x702
+ #define ICL_UNC_CBO_MSR_OFFSET 0x8
+
++/* ICL ARB register */
++#define ICL_UNC_ARB_PER_CTR 0x3b1
++#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
++
+ DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+ DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+ DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+@@ -297,15 +301,21 @@ void skl_uncore_cpu_init(void)
+ snb_uncore_arb.ops = &skl_uncore_msr_ops;
+ }
+
++static struct intel_uncore_ops icl_uncore_msr_ops = {
++ .disable_event = snb_uncore_msr_disable_event,
++ .enable_event = snb_uncore_msr_enable_event,
++ .read_counter = uncore_msr_read_counter,
++};
++
+ static struct intel_uncore_type icl_uncore_cbox = {
+ .name = "cbox",
+- .num_counters = 4,
++ .num_counters = 2,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
+- .ops = &skl_uncore_msr_ops,
++ .ops = &icl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+ };
+
+@@ -334,13 +344,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
+ .format_group = &icl_uncore_clock_format_group,
+- .ops = &skl_uncore_msr_ops,
++ .ops = &icl_uncore_msr_ops,
+ .event_descs = icl_uncore_events,
+ };
+
++static struct intel_uncore_type icl_uncore_arb = {
++ .name = "arb",
++ .num_counters = 1,
++ .num_boxes = 1,
++ .perf_ctr_bits = 44,
++ .perf_ctr = ICL_UNC_ARB_PER_CTR,
++ .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
++ .event_mask = SNB_UNC_RAW_EVENT_MASK,
++ .ops = &icl_uncore_msr_ops,
++ .format_group = &snb_uncore_format_group,
++};
++
+ static struct intel_uncore_type *icl_msr_uncores[] = {
+ &icl_uncore_cbox,
+- &snb_uncore_arb,
++ &icl_uncore_arb,
+ &icl_uncore_clockbox,
+ NULL,
+ };
+@@ -358,7 +380,6 @@ void icl_uncore_cpu_init(void)
+ {
+ uncore_msr_uncores = icl_msr_uncores;
+ icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+- snb_uncore_arb.ops = &skl_uncore_msr_ops;
+ }
+
+ enum {
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index 6d37b8fcfc778..2e0cdc64cb50d 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -10,45 +10,47 @@
+ #include <linux/jump_label.h>
+
+ /*
+- * Volatile isn't enough to prevent the compiler from reordering the
+- * read/write functions for the control registers and messing everything up.
+- * A memory clobber would solve the problem, but would prevent reordering of
+- * all loads stores around it, which can hurt performance. Solution is to
+- * use a variable and mimic reads and writes to it to enforce serialization
++ * The compiler should not reorder volatile asm statements with respect to each
++ * other: they should execute in program order. However GCC 4.9.x and 5.x have
++ * a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
++ * volatile asm. The write functions are not affected since they have memory
++ * clobbers preventing reordering. To prevent reads from being reordered with
++ * respect to writes, use a dummy memory operand.
+ */
+-extern unsigned long __force_order;
++
++#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
+
+ void native_write_cr0(unsigned long val);
+
+ static inline unsigned long native_read_cr0(void)
+ {
+ unsigned long val;
+- asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
++ asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ return val;
+ }
+
+ static inline unsigned long native_read_cr2(void)
+ {
+ unsigned long val;
+- asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
++ asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ return val;
+ }
+
+ static inline void native_write_cr2(unsigned long val)
+ {
+- asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
++ asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
+ }
+
+ static inline unsigned long __native_read_cr3(void)
+ {
+ unsigned long val;
+- asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
++ asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ return val;
+ }
+
+ static inline void native_write_cr3(unsigned long val)
+ {
+- asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
++ asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
+ }
+
+ static inline unsigned long native_read_cr4(void)
+@@ -63,10 +65,10 @@ static inline unsigned long native_read_cr4(void)
+ asm volatile("1: mov %%cr4, %0\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b)
+- : "=r" (val), "=m" (__force_order) : "0" (0));
++ : "=r" (val) : "0" (0), __FORCE_ORDER);
+ #else
+ /* CR4 always exists on x86_64. */
+- asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
++ asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
+ #endif
+ return val;
+ }
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 9b3f25e146087..8a85c2e144a6f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -377,7 +377,7 @@ void native_write_cr0(unsigned long val)
+ unsigned long bits_missing = 0;
+
+ set_register:
+- asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
++ asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
+
+ if (static_branch_likely(&cr_pinning)) {
+ if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
+@@ -396,7 +396,7 @@ void native_write_cr4(unsigned long val)
+ unsigned long bits_changed = 0;
+
+ set_register:
+- asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
++ asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
+
+ if (static_branch_likely(&cr_pinning)) {
+ if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index fd76e3733dd3d..92331de16d70e 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -388,10 +388,28 @@ static int msr_to_offset(u32 msr)
+ return -1;
+ }
+
++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
++ struct pt_regs *regs, int trapnr,
++ unsigned long error_code,
++ unsigned long fault_addr)
++{
++ pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
++ (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
++
++ show_stack_regs(regs);
++
++ panic("MCA architectural violation!\n");
++
++ while (true)
++ cpu_relax();
++
++ return true;
++}
++
+ /* MSR access wrappers used for error injection */
+ static u64 mce_rdmsrl(u32 msr)
+ {
+- u64 v;
++ DECLARE_ARGS(val, low, high);
+
+ if (__this_cpu_read(injectm.finished)) {
+ int offset = msr_to_offset(msr);
+@@ -401,21 +419,43 @@ static u64 mce_rdmsrl(u32 msr)
+ return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
+ }
+
+- if (rdmsrl_safe(msr, &v)) {
+- WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
+- /*
+- * Return zero in case the access faulted. This should
+- * not happen normally but can happen if the CPU does
+- * something weird, or if the code is buggy.
+- */
+- v = 0;
+- }
++ /*
++ * RDMSR on MCA MSRs should not fault. If they do, this is very much an
++ * architectural violation and needs to be reported to hw vendor. Panic
++ * the box to not allow any further progress.
++ */
++ asm volatile("1: rdmsr\n"
++ "2:\n"
++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
++ : EAX_EDX_RET(val, low, high) : "c" (msr));
+
+- return v;
++
++ return EAX_EDX_VAL(val, low, high);
++}
++
++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
++ struct pt_regs *regs, int trapnr,
++ unsigned long error_code,
++ unsigned long fault_addr)
++{
++ pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
++ (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
++ regs->ip, (void *)regs->ip);
++
++ show_stack_regs(regs);
++
++ panic("MCA architectural violation!\n");
++
++ while (true)
++ cpu_relax();
++
++ return true;
+ }
+
+ static void mce_wrmsrl(u32 msr, u64 v)
+ {
++ u32 low, high;
++
+ if (__this_cpu_read(injectm.finished)) {
+ int offset = msr_to_offset(msr);
+
+@@ -423,7 +463,15 @@ static void mce_wrmsrl(u32 msr, u64 v)
+ *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
+ return;
+ }
+- wrmsrl(msr, v);
++
++ low = (u32)v;
++ high = (u32)(v >> 32);
++
++ /* See comment in mce_rdmsrl() */
++ asm volatile("1: wrmsr\n"
++ "2:\n"
++ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
++ : : "c" (msr), "a"(low), "d" (high) : "memory");
+ }
+
+ /*
+diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
+index 43031db429d24..231954fe5b4e6 100644
+--- a/arch/x86/kernel/cpu/mce/internal.h
++++ b/arch/x86/kernel/cpu/mce/internal.h
+@@ -172,4 +172,14 @@ extern bool amd_filter_mce(struct mce *m);
+ static inline bool amd_filter_mce(struct mce *m) { return false; };
+ #endif
+
++__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
++ struct pt_regs *regs, int trapnr,
++ unsigned long error_code,
++ unsigned long fault_addr);
++
++__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
++ struct pt_regs *regs, int trapnr,
++ unsigned long error_code,
++ unsigned long fault_addr);
++
+ #endif /* __X86_MCE_INTERNAL_H__ */
+diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
+index 87bcdc6dc2f0c..0d09eb13743b4 100644
+--- a/arch/x86/kernel/cpu/mce/severity.c
++++ b/arch/x86/kernel/cpu/mce/severity.c
+@@ -9,9 +9,11 @@
+ #include <linux/seq_file.h>
+ #include <linux/init.h>
+ #include <linux/debugfs.h>
+-#include <asm/mce.h>
+ #include <linux/uaccess.h>
+
++#include <asm/mce.h>
++#include <asm/intel-family.h>
++
+ #include "internal.h"
+
+ /*
+@@ -40,9 +42,14 @@ static struct severity {
+ unsigned char context;
+ unsigned char excp;
+ unsigned char covered;
++ unsigned char cpu_model;
++ unsigned char cpu_minstepping;
++ unsigned char bank_lo, bank_hi;
+ char *msg;
+ } severities[] = {
+ #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
++#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
++#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
+ #define KERNEL .context = IN_KERNEL
+ #define USER .context = IN_USER
+ #define KERNEL_RECOV .context = IN_KERNEL_RECOV
+@@ -97,7 +104,6 @@ static struct severity {
+ KEEP, "Corrected error",
+ NOSER, BITCLR(MCI_STATUS_UC)
+ ),
+-
+ /*
+ * known AO MCACODs reported via MCE or CMC:
+ *
+@@ -113,6 +119,18 @@ static struct severity {
+ AO, "Action optional: last level cache writeback error",
+ SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ ),
++ /*
++ * Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
++ * to report uncorrected errors using CMCI with a special signature.
++ * UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
++ * in one of the memory controller banks.
++ * Set severity to "AO" for same action as normal patrol scrub error.
++ */
++ MCESEV(
++ AO, "Uncorrected Patrol Scrub Error",
++ SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
++ MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
++ ),
+
+ /* ignore OVER for UCNA */
+ MCESEV(
+@@ -320,6 +338,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
+ continue;
+ if (s->excp && excp != s->excp)
+ continue;
++ if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
++ continue;
++ if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
++ continue;
++ if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
++ continue;
+ if (msg)
+ *msg = s->msg;
+ s->covered = 1;
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index 6ce7e0a23268f..b271da0fa2193 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -242,9 +242,9 @@ static void __init fpu__init_system_ctx_switch(void)
+ */
+ static void __init fpu__init_parse_early_param(void)
+ {
+- char arg[32];
++ char arg[128];
+ char *argptr = arg;
+- int bit;
++ int arglen, res, bit;
+
+ #ifdef CONFIG_X86_32
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+@@ -267,12 +267,26 @@ static void __init fpu__init_parse_early_param(void)
+ if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+- if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
+- sizeof(arg)) &&
+- get_option(&argptr, &bit) &&
+- bit >= 0 &&
+- bit < NCAPINTS * 32)
+- setup_clear_cpu_cap(bit);
++ arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
++ if (arglen <= 0)
++ return;
++
++ pr_info("Clearing CPUID bits:");
++ do {
++ res = get_option(&argptr, &bit);
++ if (res == 0 || res == 3)
++ break;
++
++ /* If the argument was too long, the last bit may be cut off */
++ if (res == 1 && arglen >= sizeof(arg))
++ break;
++
++ if (bit >= 0 && bit < NCAPINTS * 32) {
++ pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
++ setup_clear_cpu_cap(bit);
++ }
++ } while (res == 2);
++ pr_cont("\n");
+ }
+
+ /*
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index 54c21d6abd5ac..5bb001c0c771a 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -106,7 +106,6 @@ fs_initcall(nmi_warning_debugfs);
+
+ static void nmi_check_duration(struct nmiaction *action, u64 duration)
+ {
+- u64 whole_msecs = READ_ONCE(action->max_duration);
+ int remainder_ns, decimal_msecs;
+
+ if (duration < nmi_longest_ns || duration < action->max_duration)
+@@ -114,12 +113,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
+
+ action->max_duration = duration;
+
+- remainder_ns = do_div(whole_msecs, (1000 * 1000));
++ remainder_ns = do_div(duration, (1000 * 1000));
+ decimal_msecs = remainder_ns / 1000;
+
+ printk_ratelimited(KERN_INFO
+ "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+- action->handler, whole_msecs, decimal_msecs);
++ action->handler, duration, decimal_msecs);
+ }
+
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index cc7823e7ef96c..484c32b7f79ff 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3617,7 +3617,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
+ u64 tsc_aux = 0;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+- return emulate_gp(ctxt, 0);
++ return emulate_ud(ctxt);
+ ctxt->dst.val = tsc_aux;
+ return X86EMUL_CONTINUE;
+ }
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index bb743f956c232..b90e8fd2f6ced 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -6453,6 +6453,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, rcu_idx);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index b58495fde2e89..c79c1a07f44b9 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5383,6 +5383,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ * - Tell IOMMU to use legacy mode for this interrupt.
+ * - Retrieve ga_tag of prior interrupt remapping data.
+ */
++ pi.prev_ga_tag = 0;
+ pi.is_guest_mode = false;
+ ret = irq_set_vcpu_affinity(host_irq, &pi);
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index a460ddf04d60c..08e1e7544f823 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2231,6 +2231,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
++
++ vmx->segment_cache.bitmask = 0;
+ }
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+@@ -3094,8 +3096,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ prepare_vmcs02_early(vmx, vmcs12);
+
+ if (from_vmentry) {
+- if (unlikely(!nested_get_vmcs12_pages(vcpu)))
++ if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
++ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
++ }
+
+ if (nested_vmx_check_vmentry_hw(vcpu)) {
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 81aafb601df06..d2213220099d3 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -743,11 +743,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
+ {
+ char b[BDEVNAME_SIZE];
+
+- printk(KERN_INFO "attempt to access beyond end of device\n");
+- printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
+- bio_devname(bio, b), bio->bi_opf,
+- (unsigned long long)bio_end_sector(bio),
+- (long long)maxsector);
++ pr_info_ratelimited("attempt to access beyond end of device\n"
++ "%s: rw=%d, want=%llu, limit=%llu\n",
++ bio_devname(bio, b), bio->bi_opf,
++ bio_end_sector(bio), maxsector);
+ }
+
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index a09ab0c3d074d..5dafd7a8ec913 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
+ struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
+ kobj);
+
+- cancel_delayed_work_sync(&hctx->run_work);
+-
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ cleanup_srcu_struct(hctx->srcu);
+ blk_free_flush_queue(hctx->fq);
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 46f5198be0173..bf33570da5ac7 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -891,9 +891,16 @@ static void __blk_release_queue(struct work_struct *work)
+
+ blk_free_queue_stats(q->stats);
+
+- if (queue_is_mq(q))
++ if (queue_is_mq(q)) {
++ struct blk_mq_hw_ctx *hctx;
++ int i;
++
+ cancel_delayed_work_sync(&q->requeue_work);
+
++ queue_for_each_hw_ctx(q, hctx, i)
++ cancel_delayed_work_sync(&hctx->run_work);
++ }
++
+ blk_exit_queue(q);
+
+ blk_queue_free_zone_bitmaps(q);
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index 43c6aa784858b..e62d735ed2660 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+
+ skcipher_request_set_sync_tfm(skreq, null_tfm);
+- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+
+@@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ areq->outlen = outlen;
+
+ aead_request_set_callback(&areq->cra_u.aead_req,
+- CRYPTO_TFM_REQ_MAY_BACKLOG,
++ CRYPTO_TFM_REQ_MAY_SLEEP,
+ af_alg_async_cb, areq);
+ err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+ /* AIO operation in progress */
+- if (err == -EINPROGRESS || err == -EBUSY)
++ if (err == -EINPROGRESS)
+ return -EIOCBQUEUED;
+
+ sock_put(sk);
+ } else {
+ /* Synchronous operation */
+ aead_request_set_callback(&areq->cra_u.aead_req,
++ CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 81c4022285a7c..30069a92a9b22 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+ /* AIO operation in progress */
+- if (err == -EINPROGRESS || err == -EBUSY)
++ if (err == -EINPROGRESS)
+ return -EIOCBQUEUED;
+
+ sock_put(sk);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 110dd4c2977f5..b62b1ab6bb699 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -227,7 +227,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_work {
+ struct list_head entry;
+
+- enum {
++ enum binder_work_type {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
+@@ -889,27 +889,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
+ return w;
+ }
+
+-/**
+- * binder_dequeue_work_head() - Dequeues the item at head of list
+- * @proc: binder_proc associated with list
+- * @list: list to dequeue head
+- *
+- * Removes the head of the list if there are items on the list
+- *
+- * Return: pointer dequeued binder_work, NULL if list was empty
+- */
+-static struct binder_work *binder_dequeue_work_head(
+- struct binder_proc *proc,
+- struct list_head *list)
+-{
+- struct binder_work *w;
+-
+- binder_inner_proc_lock(proc);
+- w = binder_dequeue_work_head_ilocked(list);
+- binder_inner_proc_unlock(proc);
+- return w;
+-}
+-
+ static void
+ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+ static void binder_free_thread(struct binder_thread *thread);
+@@ -2347,8 +2326,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ * file is done when the transaction is torn
+ * down.
+ */
+- WARN_ON(failed_at &&
+- proc->tsk == current->group_leader);
+ } break;
+ case BINDER_TYPE_PTR:
+ /*
+@@ -4591,13 +4568,17 @@ static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
+ {
+ struct binder_work *w;
++ enum binder_work_type wtype;
+
+ while (1) {
+- w = binder_dequeue_work_head(proc, list);
++ binder_inner_proc_lock(proc);
++ w = binder_dequeue_work_head_ilocked(list);
++ wtype = w ? w->type : 0;
++ binder_inner_proc_unlock(proc);
+ if (!w)
+ return;
+
+- switch (w->type) {
++ switch (wtype) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t;
+
+@@ -4631,9 +4612,11 @@ static void binder_release_work(struct binder_proc *proc,
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } break;
++ case BINDER_WORK_NODE:
++ break;
+ default:
+ pr_err("unexpected work type, %d, not freed\n",
+- w->type);
++ wtype);
+ break;
+ }
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f3f0529564da0..b326eeddaadf0 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2664,6 +2664,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ kfree(dr);
++ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 85a30fb9177bb..f83d67eafc9f0 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ percpu_up_write(&hu->proto_lock);
+
++ cancel_work_sync(&hu->init_ready);
+ cancel_work_sync(&hu->write_work);
+
+ if (hdev) {
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index ad2f26cb2622e..5b9aa73ff2b7f 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -357,6 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
+ struct hci_dev *hdev = hu->hdev;
+
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
++
++ cancel_work_sync(&hu->init_ready);
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 6b9a0593d2eb7..b6e7df9e88503 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1977,7 +1977,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ /* Do this early so it's available for logs. */
+ if (!new_smi->io.dev) {
+ pr_err("IPMI interface added with no device\n");
+- rv = EIO;
++ rv = -EIO;
+ goto out_err;
+ }
+
+diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
+index 37c22667e8319..4313ecb2af5b2 100644
+--- a/drivers/clk/at91/clk-main.c
++++ b/drivers/clk/at91/clk-main.c
+@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
+ return -EINVAL;
+
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+- tmp &= ~MOR_KEY_MASK;
+
+ if (index && !(tmp & AT91_PMC_MOSCSEL))
+- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
++ tmp = AT91_PMC_MOSCSEL;
+ else if (!index && (tmp & AT91_PMC_MOSCSEL))
+- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
++ tmp = 0;
++ else
++ return 0;
++
++ regmap_update_bits(regmap, AT91_CKGR_MOR,
++ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
++ tmp | AT91_PMC_KEY);
+
+ while (!clk_sam9x5_main_ready(regmap))
+ cpu_relax();
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 45420b514149f..c5486537b9284 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1336,8 +1336,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
+ pll->hw.init = &init;
+
+ ret = devm_clk_hw_register(cprman->dev, &pll->hw);
+- if (ret)
++ if (ret) {
++ kfree(pll);
+ return NULL;
++ }
+ return &pll->hw;
+ }
+
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index 41fc9c63356ea..1846bd879dd71 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
+ "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
+
+ static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
+
+ static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
++ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
+
+ static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
+index 7edf8c8432b67..64ea895f1a7df 100644
+--- a/drivers/clk/keystone/sci-clk.c
++++ b/drivers/clk/keystone/sci-clk.c
+@@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
+ np = of_find_node_with_property(np, *clk_name);
+ if (!np) {
+ clk_name++;
+- break;
++ continue;
+ }
+
+ if (!of_device_is_available(np))
+diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
+index 608a9a6621a37..00920182bbe63 100644
+--- a/drivers/clk/mediatek/clk-mt6779.c
++++ b/drivers/clk/mediatek/clk-mt6779.c
+@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
+ "pwm_sel", 19),
+ GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
+ "pwm_sel", 21),
++ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
++ "uart_sel", 22),
+ GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
+ "uart_sel", 23),
+ GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
+diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
+index d2760a021301d..3143e16065de6 100644
+--- a/drivers/clk/meson/g12a.c
++++ b/drivers/clk/meson/g12a.c
+@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
+ &g12a_fclk_div2_div.hw
+ },
+ .num_parents = 1,
++ /*
++ * Similar to fclk_div3, it seems that this clock is used by
++ * the resident firmware and is required by the platform to
++ * operate correctly.
++ * Until the following condition are met, we need this clock to
++ * be marked as critical:
++ * a) Mark the clock used by a firmware resource, if possible
++ * b) CCF has a clock hand-off mechanism to make the sure the
++ * clock stays on until the proper driver comes along
++ */
++ .flags = CLK_IS_CRITICAL,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
+index c6fb57cd576f5..aa5c0c6ead017 100644
+--- a/drivers/clk/qcom/gcc-sdm660.c
++++ b/drivers/clk/qcom/gcc-sdm660.c
+@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48044,
+ .mnd_width = 0,
+ .hid_width = 5,
+- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
++ .parent_map = gcc_parent_map_xo_gpll0,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
+index ba9f00dc9740c..7dd2e0b1a5866 100644
+--- a/drivers/clk/rockchip/clk-half-divider.c
++++ b/drivers/clk/rockchip/clk-half-divider.c
+@@ -167,7 +167,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
+ unsigned long flags,
+ spinlock_t *lock)
+ {
+- struct clk *clk;
++ struct clk *clk = ERR_PTR(-ENOMEM);
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_divider *div = NULL;
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index df1c941260d14..b4af4094309b0 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -484,6 +484,12 @@ remove_opp:
+ /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+ late_initcall(armada37xx_cpufreq_driver_init);
+
++static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
++ { .compatible = "marvell,armada-3700-nb-pm" },
++ { },
++};
++MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
++
+ MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+ MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index 3a2f022f6bde2..bc6ccf2c7aae0 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+ {
+ int cpu;
+- struct cpufreq_policy cpu_policy;
++ struct cpufreq_policy *cpu_policy;
+
+ rebooting = true;
+ for_each_online_cpu(cpu) {
+- cpufreq_get_policy(&cpu_policy, cpu);
+- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
++ cpu_policy = cpufreq_cpu_get(cpu);
++ if (!cpu_policy)
++ continue;
++ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
++ cpufreq_cpu_put(cpu_policy);
+ }
+
+ return NOTIFY_DONE;
+diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
+index 137ed3df0c74d..9612da122ceba 100644
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -112,6 +112,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
++ select CRYPTO_XTS
+ help
+ Selecting this will use CAAM Queue Interface (QI) for sending
+ & receiving crypto jobs to/from CAAM. This gives better performance
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index 8e3449670d2f0..2a605a419df8b 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -18,6 +18,7 @@
+ #include "qi.h"
+ #include "jr.h"
+ #include "caamalg_desc.h"
++#include <asm/unaligned.h>
+
+ /*
+ * crypto alg
+@@ -67,6 +68,11 @@ struct caam_ctx {
+ struct device *qidev;
+ spinlock_t lock; /* Protects multiple init of driver context */
+ struct caam_drv_ctx *drv_ctx[NUM_OP];
++ struct crypto_skcipher *fallback;
++};
++
++struct caam_skcipher_req_ctx {
++ struct skcipher_request fallback_req;
+ };
+
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+@@ -745,12 +751,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
++ int err;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(jrdev, "key size mismatch\n");
+ goto badkey;
+ }
+
++ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
++ if (err)
++ return err;
++
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+@@ -1395,6 +1406,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ return edesc;
+ }
+
++static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
++{
++ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
++
++ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
++}
++
+ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ {
+ struct skcipher_edesc *edesc;
+@@ -1405,6 +1424,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
+ if (!req->cryptlen)
+ return 0;
+
++ if (ctx->fallback && xts_skcipher_ivsize(req)) {
++ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
++
++ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
++ skcipher_request_set_callback(&rctx->fallback_req,
++ req->base.flags,
++ req->base.complete,
++ req->base.data);
++ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
++ req->dst, req->cryptlen, req->iv);
++
++ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
++ crypto_skcipher_decrypt(&rctx->fallback_req);
++ }
++
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+@@ -1529,6 +1563,7 @@ static struct caam_skcipher_alg driver_algs[] = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
++ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+@@ -2462,9 +2497,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
++ int ret = 0;
++
++ if (alg_aai == OP_ALG_AAI_XTS) {
++ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
++ struct crypto_skcipher *fallback;
++
++ fallback = crypto_alloc_skcipher(tfm_name, 0,
++ CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(fallback)) {
++ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
++ tfm_name, PTR_ERR(fallback));
++ return PTR_ERR(fallback);
++ }
+
+- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+- false);
++ ctx->fallback = fallback;
++ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
++ crypto_skcipher_reqsize(fallback));
++ }
++
++ ret = caam_init_common(ctx, &caam_alg->caam, false);
++ if (ret && ctx->fallback)
++ crypto_free_skcipher(ctx->fallback);
++
++ return ret;
+ }
+
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -2490,7 +2548,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
+
+ static void caam_cra_exit(struct crypto_skcipher *tfm)
+ {
+- caam_exit_common(crypto_skcipher_ctx(tfm));
++ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
++
++ if (ctx->fallback)
++ crypto_free_skcipher(ctx->fallback);
++ caam_exit_common(ctx);
+ }
+
+ static void caam_aead_exit(struct crypto_aead *tfm)
+@@ -2524,7 +2586,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++ alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 64112c736810e..7234b95241e91 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ break;
+ default:
+ ret = -EINVAL;
+- goto e_ctx;
++ goto e_data;
+ }
+ } else {
+ /* Stash the context */
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index dffa2aa855fdd..9b410ffafc4dd 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -1053,6 +1053,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ ndev = n->dev;
+ if (!ndev)
+ goto free_dst;
++ if (is_vlan_dev(ndev))
++ ndev = vlan_dev_real_dev(ndev);
++
+ port_id = cxgb4_port_idx(ndev);
+
+ csk = chtls_sock_create(cdev);
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
+index c403d6b64e087..a5903cfc83523 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -910,9 +910,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
+ return (__force int)cpu_to_be16(thdr->length);
+ }
+
+-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
++static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+ {
+- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
++ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
+ }
+
+ static int csk_wait_memory(struct chtls_dev *cdev,
+@@ -1210,6 +1210,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
+ copied = 0;
+ csk = rcu_dereference_sk_user_data(sk);
+ cdev = csk->cdev;
++ lock_sock(sk);
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+ err = sk_stream_wait_connect(sk, &timeo);
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 9181523ba7607..acaa504d5a798 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -527,7 +527,7 @@ static void release_ixp_crypto(struct device *dev)
+
+ if (crypt_virt) {
+ dma_free_coherent(dev,
+- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
++ NPE_QLEN * sizeof(struct crypt_ctl),
+ crypt_virt, crypt_phys);
+ }
+ }
+diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
+index 7e3ad085b5bdd..efce3a83b35a8 100644
+--- a/drivers/crypto/mediatek/mtk-platform.c
++++ b/drivers/crypto/mediatek/mtk-platform.c
+@@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
+ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
+ {
+ struct mtk_ring **ring = cryp->ring;
+- int i, err = ENOMEM;
++ int i;
+
+ for (i = 0; i < MTK_RING_MAX; i++) {
+ ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
+@@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
+ return 0;
+
+ err_cleanup:
+- for (; i--; ) {
++ do {
+ dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+ ring[i]->res_base, ring[i]->res_dma);
+ dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
+ ring[i]->cmd_base, ring[i]->cmd_dma);
+ kfree(ring[i]);
+- }
+- return err;
++ } while (i--);
++ return -ENOMEM;
+ }
+
+ static int mtk_crypto_probe(struct platform_device *pdev)
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index aba5db3c0588f..d7c0c982ba433 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -453,6 +453,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 val, mask;
+
++ if (likely(ctx->digcnt))
++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
++
+ /*
+ * Setting ALGO_CONST only for the first iteration and
+ * CLOSE_HASH only for the last one. Note that flags mode bits
+diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
+index 2680e1525db58..13ecbb0e58528 100644
+--- a/drivers/crypto/picoxcell_crypto.c
++++ b/drivers/crypto/picoxcell_crypto.c
+@@ -1697,11 +1697,6 @@ static int spacc_probe(struct platform_device *pdev)
+ goto err_clk_put;
+ }
+
+- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+- if (ret)
+- goto err_clk_disable;
+-
+-
+ /*
+ * Use an IRQ threshold of 50% as a default. This seems to be a
+ * reasonable trade off of latency against throughput but can be
+@@ -1709,6 +1704,10 @@ static int spacc_probe(struct platform_device *pdev)
+ */
+ engine->stat_irq_thresh = (engine->fifo_sz / 2);
+
++ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
++ if (ret)
++ goto err_clk_disable;
++
+ /*
+ * Configure the interrupts. We only use the STAT_CNT interrupt as we
+ * only submit a new packet for processing when we complete another in
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index 62d9825a49e9d..238936e2dfe2d 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -1218,15 +1218,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
+ add_threaded_test(info);
+
+ /* Check if channel was added successfully */
+- dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
+-
+- if (dtc->chan) {
++ if (!list_empty(&info->channels)) {
+ /*
+ * if new channel was not successfully added, revert the
+ * "test_channel" string to the name of the last successfully
+ * added channel. exception for when users issues empty string
+ * to channel parameter.
+ */
++ dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
+ if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
+ && (strcmp("", strim(test_channel)) != 0)) {
+ ret = -EINVAL;
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index a1b56f52db2f2..5e7fdc0b6e3db 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ if (dws->dma_dev != chan->device->dev)
+ return false;
+
++ /* permit channels in accordance with the channels mask */
++ if (dws->channels && !(dws->channels & dwc->mask))
++ return false;
++
+ /* We have to copy data since dws can be temporary storage */
+ memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
+
+diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
+index 7a085b3c1854c..d9810980920a1 100644
+--- a/drivers/dma/dw/dw.c
++++ b/drivers/dma/dw/dw.c
+@@ -14,7 +14,7 @@
+ static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
+ {
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+- u32 cfghi = DWC_CFGH_FIFO_MODE;
++ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ bool hs_polarity = dwc->dws.hs_polarity;
+
+diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
+index 9e27831dee324..43e975fb67142 100644
+--- a/drivers/dma/dw/of.c
++++ b/drivers/dma/dw/of.c
+@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ };
+ dma_cap_mask_t cap;
+
+- if (dma_spec->args_count != 3)
++ if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
+ return NULL;
+
+ slave.src_id = dma_spec->args[0];
+ slave.dst_id = dma_spec->args[0];
+ slave.m_master = dma_spec->args[1];
+ slave.p_master = dma_spec->args[2];
++ if (dma_spec->args_count >= 4)
++ slave.channels = dma_spec->args[3];
+
+ if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
+ slave.m_master >= dw->pdata->nr_masters ||
+- slave.p_master >= dw->pdata->nr_masters))
++ slave.p_master >= dw->pdata->nr_masters ||
++ slave.channels >= BIT(dw->pdata->nr_channels)))
+ return NULL;
+
+ dma_cap_zero(cap);
+diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
+index 5634437bb39d2..66669f9d690be 100644
+--- a/drivers/edac/aspeed_edac.c
++++ b/drivers/edac/aspeed_edac.c
+@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
+ /* register interrupt handler */
+ irq = platform_get_irq(pdev, 0);
+ dev_dbg(&pdev->dev, "got irq %d\n", irq);
+- if (!irq)
+- return -ENODEV;
++ if (irq < 0)
++ return irq;
+
+ rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
+ DRV_NAME, ctx);
+diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
+index 251f2b692785d..0c72daa519ffa 100644
+--- a/drivers/edac/i5100_edac.c
++++ b/drivers/edac/i5100_edac.c
+@@ -1074,16 +1074,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ PCI_DEVICE_ID_INTEL_5100_19, 0);
+ if (!einj) {
+ ret = -ENODEV;
+- goto bail_einj;
++ goto bail_mc_free;
+ }
+
+ rc = pci_enable_device(einj);
+ if (rc < 0) {
+ ret = rc;
+- goto bail_disable_einj;
++ goto bail_einj;
+ }
+
+-
+ mci->pdev = &pdev->dev;
+
+ priv = mci->pvt_info;
+@@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ bail_scrub:
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+- edac_mc_free(mci);
+-
+-bail_disable_einj:
+ pci_disable_device(einj);
+
+ bail_einj:
+ pci_dev_put(einj);
+
++bail_mc_free:
++ edac_mc_free(mci);
++
+ bail_disable_ch1:
+ pci_disable_device(ch1mm);
+
+diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
+index 6ac26d1b929f0..3247689467435 100644
+--- a/drivers/edac/ti_edac.c
++++ b/drivers/edac/ti_edac.c
+@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+- if (!error_irq) {
++ if (error_irq < 0) {
++ ret = error_irq;
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2384aa018993d..7c58085031732 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -6984,8 +6984,7 @@ static int dm_update_plane_state(struct dc *dc,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+- ret = EINVAL;
+- return ret;
++ return -EINVAL;
+ }
+
+
+diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
+index 6a626c82e264b..f6598c5a9a879 100644
+--- a/drivers/gpu/drm/drm_debugfs_crc.c
++++ b/drivers/gpu/drm/drm_debugfs_crc.c
+@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
+ source[len - 1] = '\0';
+
+ ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
+- if (ret)
++ if (ret) {
++ kfree(source);
+ return ret;
++ }
+
+ spin_lock_irq(&crc->lock);
+
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+index 570b59520fd13..2ff4b35151bf8 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
+@@ -2120,7 +2120,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
+ intel_dp->dpcd,
+ sizeof(intel_dp->dpcd));
+ cdv_intel_edp_panel_vdd_off(gma_encoder);
+- if (ret == 0) {
++ if (ret <= 0) {
+ /* if this fails, presume the device is a ghost */
+ DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ cdv_intel_dp_encoder_destroy(encoder);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+index 691c1a277d91b..dfcbb2b7cdda3 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+@@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+- sizeof(a6xx_state->indexed_regs));
++ sizeof(*a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 36c85c05b7cf7..4aed5e9a84a45 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+- int cnt = 0, rc = 0, mixer_width, i, z_pos;
++ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+@@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+- mixer_width = mode->hdisplay / cstate->num_mixers;
++ if (cstate->num_mixers) {
++ mixer_width = mode->hdisplay / cstate->num_mixers;
+
+- _dpu_crtc_setup_lm_bounds(crtc, state);
++ _dpu_crtc_setup_lm_bounds(crtc, state);
++ }
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+index e8506335cd155..1694a7deb9133 100644
+--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+@@ -26,6 +26,7 @@
+ #include <drm/drm_drv.h>
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_fb_helper.h>
++#include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_cma_helper.h>
+ #include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_irq.h>
+@@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
+ clk_disable_unprepare(mxsfb->clk_axi);
+ }
+
++static struct drm_framebuffer *
++mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
++ const struct drm_mode_fb_cmd2 *mode_cmd)
++{
++ const struct drm_format_info *info;
++
++ info = drm_get_format_info(dev, mode_cmd);
++ if (!info)
++ return ERR_PTR(-EINVAL);
++
++ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
++ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
++ return ERR_PTR(-EINVAL);
++ }
++
++ return drm_gem_fb_create(dev, file_priv, mode_cmd);
++}
++
+ static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
+- .fb_create = drm_gem_fb_create,
++ .fb_create = mxsfb_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ };
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 6d9656323a3f4..f0ea782df836d 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -2382,12 +2382,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
+ static const struct panel_desc ortustech_com43h4m85ulc = {
+ .modes = &ortustech_com43h4m85ulc_mode,
+ .num_modes = 1,
+- .bpc = 8,
++ .bpc = 6,
+ .size = {
+ .width = 56,
+ .height = 93,
+ },
+- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ };
+
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+index 8822ec13a0d61..0d39a201c7591 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
+@@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+ return 0;
+ }
+
++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
++{
++ /*
++ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
++ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
++ * to operate correctly.
++ */
++ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
++ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
++}
++
+ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+ {
+ u32 quirks = 0;
+@@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+ int ret;
+ u32 val;
+
++ panfrost_gpu_init_quirks(pfdev);
++
+ /* Just turn on everything for now */
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+@@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
+ return err;
+ }
+
+- panfrost_gpu_init_quirks(pfdev);
+ panfrost_gpu_power_on(pfdev);
+
+ return 0;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
+index 4112412087b27..468c51e7e46db 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
++++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
+@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
+ void panfrost_gpu_power_on(struct panfrost_device *pfdev);
+ void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+
++void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
++
+ #endif
+diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
+index ea38ac60581c6..eddaa62ad8b0e 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
+@@ -51,6 +51,10 @@
+ #define GPU_STATUS 0x34
+ #define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
+ #define GPU_LATEST_FLUSH_ID 0x38
++#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
++#define GPU_PWR_KEY_UNLOCK 0x2968A819
++#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
++#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
+ #define GPU_FAULT_STATUS 0x3C
+ #define GPU_FAULT_ADDRESS_LO 0x40
+ #define GPU_FAULT_ADDRESS_HI 0x44
+diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
+index c190702fab726..6dcc05ab31eba 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
+@@ -96,8 +96,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
+ vgdev->capsets[i].id > 0, 5 * HZ);
+ if (ret == 0) {
+ DRM_ERROR("timed out waiting for cap set %d\n", i);
++ spin_lock(&vgdev->display_info_lock);
+ kfree(vgdev->capsets);
+ vgdev->capsets = NULL;
++ spin_unlock(&vgdev->display_info_lock);
+ return;
+ }
+ DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 7ac20490e1b4c..92022a83bbd5e 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -572,9 +572,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
+ int i = le32_to_cpu(cmd->capset_index);
+
+ spin_lock(&vgdev->display_info_lock);
+- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
++ if (vgdev->capsets) {
++ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
++ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
++ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
++ } else {
++ DRM_ERROR("invalid capset memory.");
++ }
+ spin_unlock(&vgdev->display_info_lock);
+ wake_up(&vgdev->resp_wq);
+ }
+diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
+index d5585695c64d1..45d6ebbdbdb22 100644
+--- a/drivers/gpu/drm/vkms/vkms_composer.c
++++ b/drivers/gpu/drm/vkms/vkms_composer.c
+@@ -33,7 +33,7 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
+ + (i * composer->pitch)
+ + (j * composer->cpp);
+ /* XRGB format ignores Alpha channel */
+- memset(vaddr_out + src_offset + 24, 0, 8);
++ bitmap_clear(vaddr_out + src_offset, 24, 8);
+ crc = crc32_le(crc, vaddr_out + src_offset,
+ sizeof(u32));
+ }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e03a4d794240c..7363d0b488bd8 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1119,6 +1119,7 @@
+ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
+ #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
+
+ #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index e3d475f4baf66..b2bff932c524f 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ case 0x3b: /* Battery Strength */
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ usage->type = EV_PWR;
+- goto ignore;
++ return;
+
+ case 0x3c: /* Invert */
+ map_key_clear(BTN_TOOL_RUBBER);
+@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ case HID_DC_BATTERYSTRENGTH:
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ usage->type = EV_PWR;
+- goto ignore;
++ return;
+ }
+ goto unknown;
+
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 6c55682c59740..044a93f3c1178 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_SYNAPTICS,
++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
+index 1a6e600197d0b..509b9bb1362cb 100644
+--- a/drivers/hid/hid-roccat-kone.c
++++ b/drivers/hid/hid-roccat-kone.c
+@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
+ struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval = 0, difference, old_profile;
++ struct kone_settings *settings = (struct kone_settings *)buf;
+
+ /* I need to get my data in one piece */
+ if (off != 0 || count != sizeof(struct kone_settings))
+ return -EINVAL;
+
+ mutex_lock(&kone->kone_lock);
+- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
++ difference = memcmp(settings, &kone->settings,
++ sizeof(struct kone_settings));
+ if (difference) {
+- retval = kone_set_settings(usb_dev,
+- (struct kone_settings const *)buf);
+- if (retval) {
+- mutex_unlock(&kone->kone_lock);
+- return retval;
++ if (settings->startup_profile < 1 ||
++ settings->startup_profile > 5) {
++ retval = -EINVAL;
++ goto unlock;
+ }
+
++ retval = kone_set_settings(usb_dev, settings);
++ if (retval)
++ goto unlock;
++
+ old_profile = kone->settings.startup_profile;
+- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
++ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
+
+ kone_profile_activated(kone, kone->settings.startup_profile);
+
+ if (kone->settings.startup_profile != old_profile)
+ kone_profile_report(kone, kone->settings.startup_profile);
+ }
++unlock:
+ mutex_unlock(&kone->kone_lock);
+
++ if (retval)
++ return retval;
++
+ return sizeof(struct kone_settings);
+ }
+ static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
+index 5c63a66007293..e22617def70bf 100644
+--- a/drivers/hwmon/pmbus/max34440.c
++++ b/drivers/hwmon/pmbus/max34440.c
+@@ -387,7 +387,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+- .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
+@@ -418,7 +417,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+- .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
+@@ -454,7 +452,6 @@ static struct pmbus_driver_info max34440_info[] = {
+ .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+- .read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 84f1dcb698272..9b0c5d719232f 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
+ cpumask_t *mask = &event_data->mask;
+ struct coresight_device *sink;
+
+- if (WARN_ON(cpumask_empty(mask)))
++ if (!event_data->snk_config)
+ return;
+
+- if (!event_data->snk_config)
++ if (WARN_ON(cpumask_empty(mask)))
+ return;
+
+ cpu = cpumask_first(mask);
+@@ -310,6 +310,16 @@ static void etm_event_start(struct perf_event *event, int flags)
+ if (!event_data)
+ goto fail;
+
++ /*
++ * Check if this ETM is allowed to trace, as decided
++ * at etm_setup_aux(). This could be due to an unreachable
++ * sink from this ETM. We can't do much in this case if
++ * the sink was specified or hinted to the driver. For
++ * now, simply don't record anything on this ETM.
++ */
++ if (!cpumask_test_cpu(cpu, &event_data->mask))
++ goto fail_end_stop;
++
+ path = etm_event_cpu_path(event_data, cpu);
+ /* We need a sink, no need to continue without one */
+ sink = coresight_get_sink(path);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 146ce40d8e0aa..2d08a8719506c 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -1162,6 +1162,7 @@ config I2C_RCAR
+ tristate "Renesas R-Car I2C Controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select I2C_SLAVE
++ select RESET_CONTROLLER if ARCH_RCAR_GEN3
+ help
+ If you say yes to this option, support will be included for the
+ R-Car I2C controller.
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index ce70b5288472c..c70983780ae79 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
+ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ {
+ acpi_status status;
++ acpi_handle handle;
+
+ if (!has_acpi_companion(&adap->dev))
+ return;
+@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
+ adap, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
++
++ if (!adap->dev.parent)
++ return;
++
++ handle = ACPI_HANDLE(adap->dev.parent);
++ if (!handle)
++ return;
++
++ acpi_walk_dep_device_list(handle);
+ }
+
+ const struct acpi_device_id *
+@@ -737,7 +747,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
+ return -ENOMEM;
+ }
+
+- acpi_walk_dep_device_list(handle);
+ return 0;
+ }
+
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 5c051dba32a51..6cc71c90f85ea 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -1760,6 +1760,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+ i3c_master_detach_free_devs(master);
+ }
+
++static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
++{
++ struct i3c_master_controller *master = i3cdev->common.master;
++ struct i3c_dev_boardinfo *i3cboardinfo;
++
++ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
++ if (i3cdev->info.pid != i3cboardinfo->pid)
++ continue;
++
++ i3cdev->boardinfo = i3cboardinfo;
++ i3cdev->info.static_addr = i3cboardinfo->static_addr;
++ return;
++ }
++}
++
+ static struct i3c_dev_desc *
+ i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+ {
+@@ -1815,10 +1830,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ if (ret)
+ goto err_detach_dev;
+
++ i3c_master_attach_boardinfo(newdev);
++
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+- newdev->boardinfo = olddev->boardinfo;
+- newdev->info.static_addr = olddev->info.static_addr;
+ newdev->dev = olddev->dev;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
+index 10db0bf0655a9..6d5719cea9f53 100644
+--- a/drivers/i3c/master/i3c-master-cdns.c
++++ b/drivers/i3c/master/i3c-master-cdns.c
+@@ -1593,8 +1593,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+- if (!master->ibi.slots)
++ if (!master->ibi.slots) {
++ ret = -ENOMEM;
+ goto err_disable_sysclk;
++ }
+
+ writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
+ writel(MST_INT_IBIR_THR, master->regs + MST_IER);
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 74f3a2be17a64..14d6a537289cb 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -780,6 +780,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
+ {
+ return stm32_adc_core_hw_start(dev);
+ }
++
++static int stm32_adc_core_runtime_idle(struct device *dev)
++{
++ pm_runtime_mark_last_busy(dev);
++
++ return 0;
++}
+ #endif
+
+ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
+@@ -787,7 +794,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
+ stm32_adc_core_runtime_resume,
+- NULL)
++ stm32_adc_core_runtime_idle)
+ };
+
+ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index e3cd9d2b0dd2b..98d2d74b96f78 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1803,19 +1803,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
+ mutex_unlock(&lock);
+ }
+
+-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
+- struct cma_multicast *mc)
++static void destroy_mc(struct rdma_id_private *id_priv,
++ struct cma_multicast *mc)
+ {
+- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+- struct net_device *ndev = NULL;
++ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) {
++ ib_sa_free_multicast(mc->multicast.ib);
++ kfree(mc);
++ return;
++ }
+
+- if (dev_addr->bound_dev_if)
+- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+- if (ndev) {
+- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
+- dev_put(ndev);
++ if (rdma_protocol_roce(id_priv->id.device,
++ id_priv->id.port_num)) {
++ struct rdma_dev_addr *dev_addr =
++ &id_priv->id.route.addr.dev_addr;
++ struct net_device *ndev = NULL;
++
++ if (dev_addr->bound_dev_if)
++ ndev = dev_get_by_index(dev_addr->net,
++ dev_addr->bound_dev_if);
++ if (ndev) {
++ cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
++ dev_put(ndev);
++ }
++ kref_put(&mc->mcref, release_mc);
+ }
+- kref_put(&mc->mcref, release_mc);
+ }
+
+ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+@@ -1823,16 +1834,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
+ struct cma_multicast *mc;
+
+ while (!list_empty(&id_priv->mc_list)) {
+- mc = container_of(id_priv->mc_list.next,
+- struct cma_multicast, list);
++ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
++ list);
+ list_del(&mc->list);
+- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
+- id_priv->id.port_num)) {
+- ib_sa_free_multicast(mc->multicast.ib);
+- kfree(mc);
+- } else {
+- cma_leave_roce_mc_group(id_priv, mc);
+- }
++ destroy_mc(id_priv, mc);
+ }
+ }
+
+@@ -4182,16 +4187,6 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+ else
+ pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+ status);
+- mutex_lock(&id_priv->qp_mutex);
+- if (!status && id_priv->id.qp) {
+- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
+- be16_to_cpu(multicast->rec.mlid));
+- if (status)
+- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
+- status);
+- }
+- mutex_unlock(&id_priv->qp_mutex);
+-
+ event.status = status;
+ event.param.ud.private_data = mc->context;
+ if (!status) {
+@@ -4446,6 +4441,10 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ struct cma_multicast *mc;
+ int ret;
+
++ /* Not supported for kernel QPs */
++ if (WARN_ON(id->qp))
++ return -EINVAL;
++
+ if (!id->device)
+ return -EINVAL;
+
+@@ -4496,25 +4495,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
+ id_priv = container_of(id, struct rdma_id_private, id);
+ spin_lock_irq(&id_priv->lock);
+ list_for_each_entry(mc, &id_priv->mc_list, list) {
+- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
+- list_del(&mc->list);
+- spin_unlock_irq(&id_priv->lock);
+-
+- if (id->qp)
+- ib_detach_mcast(id->qp,
+- &mc->multicast.ib->rec.mgid,
+- be16_to_cpu(mc->multicast.ib->rec.mlid));
+-
+- BUG_ON(id_priv->cma_dev->device != id->device);
+-
+- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
+- ib_sa_free_multicast(mc->multicast.ib);
+- kfree(mc);
+- } else if (rdma_protocol_roce(id->device, id->port_num)) {
+- cma_leave_roce_mc_group(id_priv, mc);
+- }
+- return;
+- }
++ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
++ continue;
++ list_del(&mc->list);
++ spin_unlock_irq(&id_priv->lock);
++
++ WARN_ON(id_priv->cma_dev->device != id->device);
++ destroy_mc(id_priv, mc);
++ return;
+ }
+ spin_unlock_irq(&id_priv->lock);
+ }
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index f4f79f1292b91..ef4be14af3bb9 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -581,6 +581,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+ list_move_tail(&uevent->list, &list);
+ }
+ list_del(&ctx->list);
++ events_reported = ctx->events_reported;
+ mutex_unlock(&ctx->file->mut);
+
+ list_for_each_entry_safe(uevent, tmp, &list, list) {
+@@ -590,7 +591,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+ kfree(uevent);
+ }
+
+- events_reported = ctx->events_reported;
+ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
+@@ -1473,7 +1473,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
+ return 0;
+
+ err3:
++ mutex_lock(&ctx->mutex);
+ rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
++ mutex_unlock(&ctx->mutex);
+ ucma_cleanup_mc_events(mc);
+ err2:
+ xa_erase(&multicast_table, mc->id);
+@@ -1639,7 +1641,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
+
+ cur_file = ctx->file;
+ if (cur_file == new_file) {
++ mutex_lock(&cur_file->mut);
+ resp.events_reported = ctx->events_reported;
++ mutex_unlock(&cur_file->mut);
+ goto response;
+ }
+
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 0d42ba8c0b696..650f71dd4ab93 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -151,13 +151,24 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ dma_addr_t mask;
+ int i;
+
++ /* rdma_for_each_block() has a bug if the page size is smaller than the
++ * page size used to build the umem. For now prevent smaller page sizes
++ * from being returned.
++ */
++ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
++
+ /* At minimum, drivers must support PAGE_SIZE or smaller */
+ if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
+ return 0;
+
+ va = virt;
+- /* max page size not to exceed MR length */
+- mask = roundup_pow_of_two(umem->length);
++ /* The best result is the smallest page size that results in the minimum
++ * number of required pages. Compute the largest page size that could
++ * work based on VA address bits that don't change.
++ */
++ mask = pgsz_bitmap &
++ GENMASK(BITS_PER_LONG - 1,
++ bits_per((umem->length - 1 + virt) ^ virt));
+ /* offset into first SGL */
+ pgoff = umem->address & ~PAGE_MASK;
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+index a79fa67df8715..a405c64d2a82b 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
+ ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
+ break;
+ case IB_WR_LOCAL_INV:
+- break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_LSO:
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 0502c90c83edd..bb75328193957 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4616,7 +4616,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S);
+- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
++ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S);
+
+ done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+@@ -4632,6 +4634,7 @@ done:
+ }
+
+ qp_init_attr->cap = qp_attr->cap;
++ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
+
+ out:
+ mutex_unlock(&hr_qp->mutex);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 8dd2d666f6875..730e50c87a760 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1181,8 +1181,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+
+ mutex_lock(&hr_qp->mutex);
+
+- cur_state = attr_mask & IB_QP_CUR_STATE ?
+- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
++ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
++ goto out;
++
++ cur_state = hr_qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (ibqp->uobject &&
+diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
+index 8feec35f95a7c..6d6719fa7e46a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -398,8 +398,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
+ }
+
+ /* i40iw.c */
+-void i40iw_add_ref(struct ib_qp *);
+-void i40iw_rem_ref(struct ib_qp *);
++void i40iw_qp_add_ref(struct ib_qp *ibqp);
++void i40iw_qp_rem_ref(struct ib_qp *ibqp);
+ struct ib_qp *i40iw_get_qp(struct ib_device *, int);
+
+ void i40iw_flush_wqes(struct i40iw_device *iwdev,
+@@ -543,9 +543,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ bool wait);
+ void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
+ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
+-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+- struct i40iw_qp *iwqp,
+- u32 qp_num);
++void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
++
+ enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ struct i40iw_dma_mem *memptr,
+ u32 size, u32 mask);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index fa7a5ff498c73..56c1e9abc52dc 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ iwqp->cm_node = NULL;
+- i40iw_rem_ref(&iwqp->ibqp);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ i40iw_get_addr_info(cm_node, &nfo);
+@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
+ kfree(work);
+ return;
+ }
+- i40iw_add_ref(&iwqp->ibqp);
++ i40iw_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+
+ work->iwqp = iwqp;
+@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
+
+ kfree(dwork);
+ i40iw_cm_disconn_true(iwqp);
+- i40iw_rem_ref(&iwqp->ibqp);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+
+ /**
+@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ cm_id->add_ref(cm_id);
+- i40iw_add_ref(&iwqp->ibqp);
++ i40iw_qp_add_ref(&iwqp->ibqp);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+- i40iw_add_ref(&iwqp->ibqp);
++ i40iw_qp_add_ref(&iwqp->ibqp);
+
+ if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index ae8b97c306657..a7512508f7e60 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+ __func__, info->qp_cq_id);
+ continue;
+ }
+- i40iw_add_ref(&iwqp->ibqp);
++ i40iw_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+@@ -427,7 +427,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
+ break;
+ }
+ if (info->qp)
+- i40iw_rem_ref(&iwqp->ibqp);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
+ } while (1);
+
+ if (aeqcnt)
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index 016524683e17e..72db7c1dc2998 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -479,25 +479,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+ }
+ }
+
+-/**
+- * i40iw_free_qp - callback after destroy cqp completes
+- * @cqp_request: cqp request for destroy qp
+- * @num: not used
+- */
+-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+-{
+- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
+- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+- struct i40iw_device *iwdev;
+- u32 qp_num = iwqp->ibqp.qp_num;
+-
+- iwdev = iwqp->iwdev;
+-
+- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+- i40iw_rem_devusecount(iwdev);
+-}
+-
+ /**
+ * i40iw_wait_event - wait for completion
+ * @iwdev: iwarp device
+@@ -618,26 +599,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
+ }
+
+ /**
+- * i40iw_add_ref - add refcount for qp
++ * i40iw_qp_add_ref - add refcount for qp
+ * @ibqp: iqarp qp
+ */
+-void i40iw_add_ref(struct ib_qp *ibqp)
++void i40iw_qp_add_ref(struct ib_qp *ibqp)
+ {
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
+
+- atomic_inc(&iwqp->refcount);
++ refcount_inc(&iwqp->refcount);
+ }
+
+ /**
+- * i40iw_rem_ref - rem refcount for qp and free if 0
++ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
+ * @ibqp: iqarp qp
+ */
+-void i40iw_rem_ref(struct ib_qp *ibqp)
++void i40iw_qp_rem_ref(struct ib_qp *ibqp)
+ {
+ struct i40iw_qp *iwqp;
+- enum i40iw_status_code status;
+- struct i40iw_cqp_request *cqp_request;
+- struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev;
+ u32 qp_num;
+ unsigned long flags;
+@@ -645,7 +623,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwdev->qptable_lock, flags);
+- if (!atomic_dec_and_test(&iwqp->refcount)) {
++ if (!refcount_dec_and_test(&iwqp->refcount)) {
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+ return;
+ }
+@@ -653,25 +631,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->qp_table[qp_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+- if (!cqp_request)
+- return;
+-
+- cqp_request->callback_fcn = i40iw_free_qp;
+- cqp_request->param = (void *)&iwqp->sc_qp;
+- cqp_info = &cqp_request->info;
+- cqp_info->cqp_cmd = OP_QP_DESTROY;
+- cqp_info->post_sq = 1;
+- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
+- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+- status = i40iw_handle_cqp_op(iwdev, cqp_request);
+- if (!status)
+- return;
++ complete(&iwqp->free_qp);
+
+- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+- i40iw_rem_devusecount(iwdev);
+ }
+
+ /**
+@@ -938,7 +899,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
+ struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+
+ i40iw_terminate_done(qp, 1);
+- i40iw_rem_ref(&iwqp->ibqp);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+
+ /**
+@@ -950,7 +911,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+- i40iw_add_ref(&iwqp->ibqp);
++ i40iw_qp_add_ref(&iwqp->ibqp);
+ timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
+ iwqp->terminate_timer.expires = jiffies + HZ;
+ add_timer(&iwqp->terminate_timer);
+@@ -966,7 +927,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ if (del_timer(&iwqp->terminate_timer))
+- i40iw_rem_ref(&iwqp->ibqp);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index cd9ee1664a69e..22bf4f09c0647 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -366,11 +366,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
+ * @iwqp: qp ptr (user or kernel)
+ * @qp_num: qp number assigned
+ */
+-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+- struct i40iw_qp *iwqp,
+- u32 qp_num)
++void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
+ {
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
++ struct i40iw_device *iwdev = iwqp->iwdev;
++ u32 qp_num = iwqp->ibqp.qp_num;
+
+ i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
+ i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+@@ -404,6 +404,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
+ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ {
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
++ struct ib_qp_attr attr;
++ struct i40iw_device *iwdev = iwqp->iwdev;
++
++ memset(&attr, 0, sizeof(attr));
+
+ iwqp->destroyed = 1;
+
+@@ -418,7 +422,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ }
+ }
+
+- i40iw_rem_ref(&iwqp->ibqp);
++ attr.qp_state = IB_QPS_ERR;
++ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
++ i40iw_qp_rem_ref(&iwqp->ibqp);
++ wait_for_completion(&iwqp->free_qp);
++ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
++ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
++ i40iw_free_qp_resources(iwqp);
++ i40iw_rem_devusecount(iwdev);
++
+ return 0;
+ }
+
+@@ -579,6 +591,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ qp->back_qp = (void *)iwqp;
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+
++ iwqp->iwdev = iwdev;
+ iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
+
+ if (i40iw_allocate_dma_mem(dev->hw,
+@@ -603,7 +616,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ goto error;
+ }
+
+- iwqp->iwdev = iwdev;
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+ qp = &iwqp->sc_qp;
+@@ -717,7 +729,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ goto error;
+ }
+
+- i40iw_add_ref(&iwqp->ibqp);
++ refcount_set(&iwqp->refcount, 1);
+ spin_lock_init(&iwqp->lock);
+ iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ iwdev->qp_table[qp_num] = iwqp;
+@@ -739,10 +751,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ }
+ init_completion(&iwqp->sq_drained);
+ init_completion(&iwqp->rq_drained);
++ init_completion(&iwqp->free_qp);
+
+ return &iwqp->ibqp;
+ error:
+- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
++ i40iw_free_qp_resources(iwqp);
+ return ERR_PTR(err_code);
+ }
+
+@@ -2654,13 +2667,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
+ .get_hw_stats = i40iw_get_hw_stats,
+ .get_port_immutable = i40iw_port_immutable,
+ .iw_accept = i40iw_accept,
+- .iw_add_ref = i40iw_add_ref,
++ .iw_add_ref = i40iw_qp_add_ref,
+ .iw_connect = i40iw_connect,
+ .iw_create_listen = i40iw_create_listen,
+ .iw_destroy_listen = i40iw_destroy_listen,
+ .iw_get_qp = i40iw_get_qp,
+ .iw_reject = i40iw_reject,
+- .iw_rem_ref = i40iw_rem_ref,
++ .iw_rem_ref = i40iw_qp_rem_ref,
+ .map_mr_sg = i40iw_map_mr_sg,
+ .mmap = i40iw_mmap,
+ .modify_qp = i40iw_modify_qp,
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+index 3a413752ccc38..ad7d81041bc9a 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+@@ -140,7 +140,7 @@ struct i40iw_qp {
+ struct i40iw_qp_host_ctx_info ctx_info;
+ struct i40iwarp_offload_info iwarp_info;
+ void *allocated_buffer;
+- atomic_t refcount;
++ refcount_t refcount;
+ struct iw_cm_id *cm_id;
+ void *cm_node;
+ struct ib_mr *lsmm_mr;
+@@ -175,5 +175,6 @@ struct i40iw_qp {
+ struct i40iw_dma_mem ietf_mem;
+ struct completion sq_drained;
+ struct completion rq_drained;
++ struct completion free_qp;
+ };
+ #endif
+diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
+index b591861934b3c..81d6a3460b55d 100644
+--- a/drivers/infiniband/hw/mlx4/cm.c
++++ b/drivers/infiniband/hw/mlx4/cm.c
+@@ -280,6 +280,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
+ if (!sriov->is_going_down && !id->scheduled_delete) {
+ id->scheduled_delete = 1;
+ schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
++ } else if (id->scheduled_delete) {
++ /* Adjust timeout if already scheduled */
++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ }
+ spin_unlock_irqrestore(&sriov->going_down_lock, flags);
+ spin_unlock(&sriov->id_map_lock);
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 57079110af9b5..08eccf2b6967d 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1307,6 +1307,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
+ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+ }
+
++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
++{
++ unsigned long flags;
++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
++
++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
++ queue_work(ctx->wi_wq, &ctx->work);
++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
++}
++
+ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
+ struct mlx4_ib_demux_pv_qp *tun_qp,
+ int index)
+@@ -2009,7 +2021,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
+ cq_size *= 2;
+
+ cq_attr.cqe = cq_size;
+- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
++ ctx->cq = ib_create_cq(ctx->ib_dev,
++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
+ NULL, ctx, &cq_attr);
+ if (IS_ERR(ctx->cq)) {
+ ret = PTR_ERR(ctx->cq);
+@@ -2046,6 +2059,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
+ INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
+
+ ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
+
+ ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
+ if (ret) {
+@@ -2189,7 +2203,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ goto err_mcg;
+ }
+
+- snprintf(name, sizeof name, "mlx4_ibt%d", port);
++ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
+ ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ if (!ctx->wq) {
+ pr_err("Failed to create tunnelling WQ for port %d\n", port);
+@@ -2197,7 +2211,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ goto err_wq;
+ }
+
+- snprintf(name, sizeof name, "mlx4_ibud%d", port);
++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
++ if (!ctx->wi_wq) {
++ pr_err("Failed to create wire WQ for port %d\n", port);
++ ret = -ENOMEM;
++ goto err_wiwq;
++ }
++
++ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
+ ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ if (!ctx->ud_wq) {
+ pr_err("Failed to create up/down WQ for port %d\n", port);
+@@ -2208,6 +2230,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
+ return 0;
+
+ err_udwq:
++ destroy_workqueue(ctx->wi_wq);
++ ctx->wi_wq = NULL;
++
++err_wiwq:
+ destroy_workqueue(ctx->wq);
+ ctx->wq = NULL;
+
+@@ -2255,12 +2281,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
+ ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
+ }
+ flush_workqueue(ctx->wq);
++ flush_workqueue(ctx->wi_wq);
+ for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+ destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
+ free_pv_object(dev, i, ctx->port);
+ }
+ kfree(ctx->tun);
+ destroy_workqueue(ctx->ud_wq);
++ destroy_workqueue(ctx->wi_wq);
+ destroy_workqueue(ctx->wq);
+ }
+ }
+diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+index eb53bb4c0c91c..0173e3931cc7f 100644
+--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
+@@ -459,6 +459,7 @@ struct mlx4_ib_demux_pv_ctx {
+ struct ib_pd *pd;
+ struct work_struct work;
+ struct workqueue_struct *wq;
++ struct workqueue_struct *wi_wq;
+ struct mlx4_ib_demux_pv_qp qp[2];
+ };
+
+@@ -466,6 +467,7 @@ struct mlx4_ib_demux_ctx {
+ struct ib_device *ib_dev;
+ int port;
+ struct workqueue_struct *wq;
++ struct workqueue_struct *wi_wq;
+ struct workqueue_struct *ud_wq;
+ spinlock_t ud_lock;
+ atomic64_t subnet_prefix;
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index ff664355de550..73d5b8dc74d86 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -167,7 +167,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ {
+ enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+- struct mlx5_ib_srq *srq;
++ struct mlx5_ib_srq *srq = NULL;
+ struct mlx5_ib_wq *wq;
+ u16 wqe_ctr;
+ u8 roce_packet_type;
+@@ -179,7 +179,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+
+ if (qp->ibqp.xrcd) {
+ msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
+- srq = to_mibsrq(msrq);
++ if (msrq)
++ srq = to_mibsrq(msrq);
+ } else {
+ srq = to_msrq(qp->ibqp.srq);
+ }
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index b781ad74e6de4..40c1a05c2445d 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -888,7 +888,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ /* We support 'Gappy' memory registration too */
+ props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+ }
+- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
++ /* IB_WR_REG_MR always requires changing the entity size with UMR */
++ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (MLX5_CAP_GEN(mdev, sho)) {
+ props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
+ /* At this stage no support for signature handover */
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index 4494dab8c3d83..93040c994e2e3 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -601,7 +601,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+- page_size = ~dev->attr.page_size_caps + 1;
++ page_size = ~qed_attr->page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 6dea49e11f5f0..e521f3c3dbbf1 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -736,7 +736,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp;
+ struct qed_iwarp_accept_in params;
+- int rc = 0;
++ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+
+@@ -759,8 +759,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ params.ord = conn_param->ord;
+
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+- &qp->iwarp_cm_flags))
++ &qp->iwarp_cm_flags)) {
++ rc = -EINVAL;
+ goto err; /* QP already destroyed */
++ }
+
+ rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+ if (rc) {
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 16a994fd7d0a7..4408d33646647 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2405,7 +2405,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
++ qp_attr->cap.max_inline_data = dev->attr.max_inline;
+ qp_init_attr->cap = qp_attr->cap;
+
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+@@ -2518,6 +2518,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iw_qp_rem_ref(&qp->ibqp);
++ else
++ kfree(qp);
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
+index 18da1e1ea9797..833f3f1b87f5e 100644
+--- a/drivers/infiniband/sw/rdmavt/vt.c
++++ b/drivers/infiniband/sw/rdmavt/vt.c
+@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
+ if (!rdi)
+ return rdi;
+
+- rdi->ports = kcalloc(nports,
+- sizeof(struct rvt_ibport **),
+- GFP_KERNEL);
++ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
+ if (!rdi->ports)
+ ib_dealloc_device(&rdi->ibdev);
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index 46e111c218fd4..9bfb98056fc2a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -281,6 +281,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ struct rxe_mc_elem *mce;
+ struct rxe_qp *qp;
+ union ib_gid dgid;
++ struct sk_buff *per_qp_skb;
++ struct rxe_pkt_info *per_qp_pkt;
+ int err;
+
+ if (skb->protocol == htons(ETH_P_IP))
+@@ -309,21 +311,29 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ if (err)
+ continue;
+
+- /* if *not* the last qp in the list
+- * increase the users of the skb then post to the next qp
++ /* for all but the last qp create a new clone of the
++ * skb and pass to the qp.
+ */
+ if (mce->qp_list.next != &mcg->qp_list)
+- skb_get(skb);
++ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
++ else
++ per_qp_skb = skb;
++
++ if (unlikely(!per_qp_skb))
++ continue;
+
+- pkt->qp = qp;
++ per_qp_pkt = SKB_TO_PKT(per_qp_skb);
++ per_qp_pkt->qp = qp;
+ rxe_add_ref(qp);
+- rxe_rcv_pkt(pkt, skb);
++ rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
+ }
+
+ spin_unlock_bh(&mcg->mcg_lock);
+
+ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+
++ return;
++
+ err1:
+ kfree_skb(skb);
+ }
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 044bcacad6e48..69ecf37053a81 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -2463,6 +2463,8 @@ static struct net_device *ipoib_add_port(const char *format,
+ /* call event handler to ensure pkey in sync */
+ queue_work(ipoib_workqueue, &priv->flush_heavy);
+
++ ndev->rtnl_link_ops = ipoib_get_link_ops();
++
+ result = register_netdev(ndev);
+ if (result) {
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index 38c984d16996d..d5a90a66b45cf 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
+ return 0;
+ }
+
++static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
++{
++ struct ipoib_dev_priv *priv = ipoib_priv(dev);
++
++ if (!priv->parent)
++ return;
++
++ unregister_netdevice_queue(dev, head);
++}
++
+ static size_t ipoib_get_size(const struct net_device *dev)
+ {
+ return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
+@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .priv_size = sizeof(struct ipoib_dev_priv),
+ .setup = ipoib_setup_common,
+ .newlink = ipoib_new_child_link,
++ .dellink = ipoib_del_child_link,
+ .changelink = ipoib_changelink,
+ .get_size = ipoib_get_size,
+ .fill_info = ipoib_fill_info,
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 8ac8e18fbe0c3..58ca5e9c6079c 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -192,6 +192,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ }
+ priv = ipoib_priv(ndev);
+
++ ndev->rtnl_link_ops = ipoib_get_link_ops();
++
+ result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
+
+ if (result && ndev->reg_state == NETREG_UNINITIALIZED)
+diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
+index 7c70492d9d6b5..f831f01501d58 100644
+--- a/drivers/input/keyboard/ep93xx_keypad.c
++++ b/drivers/input/keyboard/ep93xx_keypad.c
+@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
+ }
+
+ keypad->irq = platform_get_irq(pdev, 0);
+- if (!keypad->irq) {
+- err = -ENXIO;
++ if (keypad->irq < 0) {
++ err = keypad->irq;
+ goto failed_free;
+ }
+
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 94c94d7f5155f..d6c924032aaa8 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
+ }
+
+ irq = platform_get_irq(pdev, 0);
+- if (!irq) {
+- dev_err(&pdev->dev, "no keyboard irq assigned\n");
+- return -EINVAL;
+- }
++ if (irq < 0)
++ return irq;
+
+ keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
+ if (!keypad_data) {
+diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
+index af3a6824f1a4d..77e0743a3cf85 100644
+--- a/drivers/input/keyboard/twl4030_keypad.c
++++ b/drivers/input/keyboard/twl4030_keypad.c
+@@ -50,7 +50,7 @@ struct twl4030_keypad {
+ bool autorepeat;
+ unsigned int n_rows;
+ unsigned int n_cols;
+- unsigned int irq;
++ int irq;
+
+ struct device *dbg_dev;
+ struct input_dev *input;
+@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
+ }
+
+ kp->irq = platform_get_irq(pdev, 0);
+- if (!kp->irq) {
+- dev_err(&pdev->dev, "no keyboard irq assigned\n");
+- return -EINVAL;
+- }
++ if (kp->irq < 0)
++ return kp->irq;
+
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ TWL4030_MAX_ROWS,
+diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
+index a681a2c04e399..f15ed3dcdb9b2 100644
+--- a/drivers/input/serio/sun4i-ps2.c
++++ b/drivers/input/serio/sun4i-ps2.c
+@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
+ struct sun4i_ps2data *drvdata;
+ struct serio *serio;
+ struct device *dev = &pdev->dev;
+- unsigned int irq;
+ int error;
+
+ drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
+@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
+ writel(0, drvdata->reg_base + PS2_REG_GCTL);
+
+ /* Get IRQ for the device */
+- irq = platform_get_irq(pdev, 0);
+- if (!irq) {
+- dev_err(dev, "no IRQ found\n");
+- error = -ENXIO;
++ drvdata->irq = platform_get_irq(pdev, 0);
++ if (drvdata->irq < 0) {
++ error = drvdata->irq;
+ goto err_disable_clk;
+ }
+
+- drvdata->irq = irq;
+ drvdata->serio = serio;
+ drvdata->dev = dev;
+
+diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
+index 9ed258854349b..5e6ba5c4eca2a 100644
+--- a/drivers/input/touchscreen/imx6ul_tsc.c
++++ b/drivers/input/touchscreen/imx6ul_tsc.c
+@@ -530,20 +530,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
+
+ mutex_lock(&input_dev->mutex);
+
+- if (input_dev->users) {
+- retval = clk_prepare_enable(tsc->adc_clk);
+- if (retval)
+- goto out;
+-
+- retval = clk_prepare_enable(tsc->tsc_clk);
+- if (retval) {
+- clk_disable_unprepare(tsc->adc_clk);
+- goto out;
+- }
++ if (!input_dev->users)
++ goto out;
+
+- retval = imx6ul_tsc_init(tsc);
++ retval = clk_prepare_enable(tsc->adc_clk);
++ if (retval)
++ goto out;
++
++ retval = clk_prepare_enable(tsc->tsc_clk);
++ if (retval) {
++ clk_disable_unprepare(tsc->adc_clk);
++ goto out;
+ }
+
++ retval = imx6ul_tsc_init(tsc);
++ if (retval) {
++ clk_disable_unprepare(tsc->tsc_clk);
++ clk_disable_unprepare(tsc->adc_clk);
++ goto out;
++ }
+ out:
+ mutex_unlock(&input_dev->mutex);
+ return retval;
+diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
+index b6f95f20f9244..cd8805d71d977 100644
+--- a/drivers/input/touchscreen/stmfts.c
++++ b/drivers/input/touchscreen/stmfts.c
+@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
+
+ mutex_lock(&sdata->mutex);
+
+- if (value & sdata->hover_enabled)
++ if (value && sdata->hover_enabled)
+ goto out;
+
+ if (sdata->running)
+diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
+index 7543e395a2c64..a2ebc75af8c79 100644
+--- a/drivers/lightnvm/core.c
++++ b/drivers/lightnvm/core.c
+@@ -1316,8 +1316,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
+ strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
+ i++;
+
+- if (i > 31) {
+- pr_err("max 31 devices can be reported.\n");
++ if (i >= ARRAY_SIZE(devices->info)) {
++ pr_err("max %zd devices can be reported.\n",
++ ARRAY_SIZE(devices->info));
+ break;
+ }
+ }
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 0b821a5b2db84..3e7d4b20ab34f 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
+ exit:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+- if (!err && (chan->txdone_method & TXDONE_BY_POLL))
+- /* kick start the timer immediately to avoid delays */
+- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
++ /* kick start the timer immediately to avoid delays */
++ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
++ /* but only if not already active */
++ if (!hrtimer_active(&chan->mbox->poll_hrt))
++ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
++ }
+ }
+
+ static void tx_tick(struct mbox_chan *chan, int r)
+@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ if (chan->active_req && chan->cl) {
++ resched = true;
+ txdone = chan->mbox->ops->last_tx_done(chan);
+ if (txdone)
+ tx_tick(chan, 0);
+- else
+- resched = true;
+ }
+ }
+
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 9a6ce9f5a7db5..3c8b365ce635a 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -70,7 +70,7 @@ struct cmdq_task {
+ struct cmdq {
+ struct mbox_controller mbox;
+ void __iomem *base;
+- u32 irq;
++ int irq;
+ u32 thread_nr;
+ u32 irq_mask;
+ struct cmdq_thread *thread;
+@@ -474,10 +474,8 @@ static int cmdq_probe(struct platform_device *pdev)
+ }
+
+ cmdq->irq = platform_get_irq(pdev, 0);
+- if (!cmdq->irq) {
+- dev_err(dev, "failed to get irq\n");
+- return -EINVAL;
+- }
++ if (cmdq->irq < 0)
++ return cmdq->irq;
+
+ cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
+ cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 3ad18246fcb3c..7227d03dbbea7 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1954,6 +1954,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(md_bitmap_load);
+
++/* caller need to free returned bitmap with md_bitmap_free() */
+ struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
+ {
+ int rv = 0;
+@@ -2017,6 +2018,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ md_bitmap_unplug(mddev->bitmap);
+ *low = lo;
+ *high = hi;
++ md_bitmap_free(bitmap);
+
+ return rv;
+ }
+@@ -2620,4 +2622,3 @@ struct attribute_group md_bitmap_group = {
+ .name = "bitmap",
+ .attrs = md_bitmap_attrs,
+ };
+-
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index d50737ec40394..afbbc552c3275 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
+ * can't resize bitmap
+ */
+ goto out;
++ md_bitmap_free(bitmap);
+ }
+
+ return 0;
+diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
+index 3f1ca40b9b987..8a8585261bb80 100644
+--- a/drivers/media/firewire/firedtv-fw.c
++++ b/drivers/media/firewire/firedtv-fw.c
+@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+
+ name_len = fw_csr_string(unit->directory, CSR_MODEL,
+ name, sizeof(name));
+- if (name_len < 0)
+- return name_len;
++ if (name_len < 0) {
++ err = name_len;
++ goto fail_free;
++ }
+ for (i = ARRAY_SIZE(model_names); --i; )
+ if (strlen(model_names[i]) <= name_len &&
+ strncmp(name, model_names[i], name_len) == 0)
+diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
+index de295114ca482..21666d705e372 100644
+--- a/drivers/media/i2c/m5mols/m5mols_core.c
++++ b/drivers/media/i2c/m5mols/m5mols_core.c
+@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+- info->set_power(&client->dev, 0);
++ if (info->set_power)
++ info->set_power(&client->dev, 0);
+ return ret;
+ }
+
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 266e947572c1e..be6c882dd1d54 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -34,6 +34,8 @@
+ #define OV5640_REG_SYS_RESET02 0x3002
+ #define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
+ #define OV5640_REG_SYS_CTRL0 0x3008
++#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
++#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
+ #define OV5640_REG_CHIP_ID 0x300a
+ #define OV5640_REG_IO_MIPI_CTRL00 0x300e
+ #define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
+@@ -272,8 +274,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+ /* YUV422 UYVY VGA@30fps */
+ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
+ {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
+- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
+- {0x3630, 0x36, 0, 0},
++ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
+ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
+ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
+ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
+@@ -740,7 +741,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
+ * +->| PLL Root Div | - reg 0x3037, bit 4
+ * +-+------------+
+ * | +---------+
+- * +->| Bit Div | - reg 0x3035, bits 0-3
++ * +->| Bit Div | - reg 0x3034, bits 0-3
+ * +-+-------+
+ * | +-------------+
+ * +->| SCLK Div | - reg 0x3108, bits 0-1
+@@ -1109,6 +1110,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
+ val = regs->val;
+ mask = regs->mask;
+
++ /* remain in power down mode for DVP */
++ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
++ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
++ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
++ continue;
++
+ if (mask)
+ ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
+ else
+@@ -1264,31 +1271,9 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
+ if (ret)
+ return ret;
+
+- /*
+- * enable VSYNC/HREF/PCLK DVP control lines
+- * & D[9:6] DVP data lines
+- *
+- * PAD OUTPUT ENABLE 01
+- * - 6: VSYNC output enable
+- * - 5: HREF output enable
+- * - 4: PCLK output enable
+- * - [3:0]: D[9:6] output enable
+- */
+- ret = ov5640_write_reg(sensor,
+- OV5640_REG_PAD_OUTPUT_ENABLE01,
+- on ? 0x7f : 0);
+- if (ret)
+- return ret;
+-
+- /*
+- * enable D[5:0] DVP data lines
+- *
+- * PAD OUTPUT ENABLE 02
+- * - [7:2]: D[5:0] output enable
+- */
+- return ov5640_write_reg(sensor,
+- OV5640_REG_PAD_OUTPUT_ENABLE02,
+- on ? 0xfc : 0);
++ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
++ OV5640_REG_SYS_CTRL0_SW_PWUP :
++ OV5640_REG_SYS_CTRL0_SW_PWDN);
+ }
+
+ static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
+@@ -1987,6 +1972,95 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
+ clk_disable_unprepare(sensor->xclk);
+ }
+
++static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
++{
++ int ret;
++
++ if (!on) {
++ /* Reset MIPI bus settings to their default values. */
++ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
++ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
++ return 0;
++ }
++
++ /*
++ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
++ *
++ * 0x300e = 0x40
++ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
++ * "ov5640_set_stream_mipi()")
++ * [4] = 0 : Power up MIPI HS Tx
++ * [3] = 0 : Power up MIPI LS Rx
++ * [2] = 0 : MIPI interface disabled
++ */
++ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
++ if (ret)
++ return ret;
++
++ /*
++ * Gate clock and set LP11 in 'no packets mode' (idle)
++ *
++ * 0x4800 = 0x24
++ * [5] = 1 : Gate clock when 'no packets'
++ * [2] = 1 : MIPI bus in LP11 when 'no packets'
++ */
++ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
++ if (ret)
++ return ret;
++
++ /*
++ * Set data lanes and clock in LP11 when 'sleeping'
++ *
++ * 0x3019 = 0x70
++ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
++ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
++ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
++ */
++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
++ if (ret)
++ return ret;
++
++ /* Give lanes some time to coax into LP11 state. */
++ usleep_range(500, 1000);
++
++ return 0;
++}
++
++static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
++{
++ int ret;
++
++ if (!on) {
++ /* Reset settings to their default values. */
++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
++ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
++ return 0;
++ }
++
++ /*
++ * enable VSYNC/HREF/PCLK DVP control lines
++ * & D[9:6] DVP data lines
++ *
++ * PAD OUTPUT ENABLE 01
++ * - 6: VSYNC output enable
++ * - 5: HREF output enable
++ * - 4: PCLK output enable
++ * - [3:0]: D[9:6] output enable
++ */
++ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x7f);
++ if (ret)
++ return ret;
++
++ /*
++ * enable D[5:0] DVP data lines
++ *
++ * PAD OUTPUT ENABLE 02
++ * - [7:2]: D[5:0] output enable
++ */
++ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
++}
++
+ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+ {
+ int ret = 0;
+@@ -1999,67 +2073,17 @@ static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+ ret = ov5640_restore_mode(sensor);
+ if (ret)
+ goto power_off;
++ }
+
+- /* We're done here for DVP bus, while CSI-2 needs setup. */
+- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
+- return 0;
+-
+- /*
+- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
+- *
+- * 0x300e = 0x40
+- * [7:5] = 010 : 2 data lanes mode (see FIXME note in
+- * "ov5640_set_stream_mipi()")
+- * [4] = 0 : Power up MIPI HS Tx
+- * [3] = 0 : Power up MIPI LS Rx
+- * [2] = 0 : MIPI interface disabled
+- */
+- ret = ov5640_write_reg(sensor,
+- OV5640_REG_IO_MIPI_CTRL00, 0x40);
+- if (ret)
+- goto power_off;
+-
+- /*
+- * Gate clock and set LP11 in 'no packets mode' (idle)
+- *
+- * 0x4800 = 0x24
+- * [5] = 1 : Gate clock when 'no packets'
+- * [2] = 1 : MIPI bus in LP11 when 'no packets'
+- */
+- ret = ov5640_write_reg(sensor,
+- OV5640_REG_MIPI_CTRL00, 0x24);
+- if (ret)
+- goto power_off;
+-
+- /*
+- * Set data lanes and clock in LP11 when 'sleeping'
+- *
+- * 0x3019 = 0x70
+- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
+- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
+- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
+- */
+- ret = ov5640_write_reg(sensor,
+- OV5640_REG_PAD_OUTPUT00, 0x70);
+- if (ret)
+- goto power_off;
+-
+- /* Give lanes some time to coax into LP11 state. */
+- usleep_range(500, 1000);
+-
+- } else {
+- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+- /* Reset MIPI bus settings to their default values. */
+- ov5640_write_reg(sensor,
+- OV5640_REG_IO_MIPI_CTRL00, 0x58);
+- ov5640_write_reg(sensor,
+- OV5640_REG_MIPI_CTRL00, 0x04);
+- ov5640_write_reg(sensor,
+- OV5640_REG_PAD_OUTPUT00, 0x00);
+- }
++ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
++ ret = ov5640_set_power_mipi(sensor, on);
++ else
++ ret = ov5640_set_power_dvp(sensor, on);
++ if (ret)
++ goto power_off;
+
++ if (!on)
+ ov5640_set_power_off(sensor);
+- }
+
+ return 0;
+
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index dbbab75f135ec..cff99cf61ed4d 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
+ .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
+ };
+
+-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+- bool *handled)
++static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
++ bool *handled)
+ {
+ struct tc358743_state *state = to_state(sd);
+ unsigned int cec_rxint, cec_txint;
+@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ERROR);
+ }
+- *handled = true;
++ if (handled)
++ *handled = true;
+ }
+ if ((intstatus & MASK_CEC_RINT) &&
+ (cec_rxint & MASK_CECRIEND)) {
+@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ msg.msg[i] = v & 0xff;
+ }
+ cec_received_msg(state->cec_adap, &msg);
+- *handled = true;
++ if (handled)
++ *handled = true;
+ }
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+
+ #ifdef CONFIG_VIDEO_TC358743_CEC
+ if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
+- tc358743_cec_isr(sd, intstatus, handled);
++ tc358743_cec_handler(sd, intstatus, handled);
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+ intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
+@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
+ {
+ struct tc358743_state *state = dev_id;
+- bool handled;
++ bool handled = false;
+
+ tc358743_isr(&state->sd, 0, &handled);
+
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index a359da7773a90..ff2962cea6164 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ btv->id = dev->device;
+ if (pci_enable_device(dev)) {
+ pr_warn("%d: Can't enable device\n", btv->c.nr);
+- return -EIO;
++ result = -EIO;
++ goto free_mem;
+ }
+ if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ pr_warn("%d: No suitable DMA available\n", btv->c.nr);
+- return -EIO;
++ result = -EIO;
++ goto free_mem;
+ }
+ if (!request_mem_region(pci_resource_start(dev,0),
+ pci_resource_len(dev,0),
+@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ pr_warn("%d: can't request iomem (0x%llx)\n",
+ btv->c.nr,
+ (unsigned long long)pci_resource_start(dev, 0));
+- return -EBUSY;
++ result = -EBUSY;
++ goto free_mem;
+ }
+ pci_set_master(dev);
+ pci_set_command(dev);
+@@ -4211,6 +4214,10 @@ fail0:
+ release_mem_region(pci_resource_start(btv->c.pci,0),
+ pci_resource_len(btv->c.pci,0));
+ pci_disable_device(btv->c.pci);
++
++free_mem:
++ bttvs[btv->c.nr] = NULL;
++ kfree(btv);
+ return result;
+ }
+
+diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
+index 79e1afb710758..5cc4ef21f9d37 100644
+--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
+@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
+ {
+ int err;
+
+- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
++ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
++ (reg << 2) & 0xffffffff, value);
+ err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
+ if (err < 0)
+ return err;
+diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
+index cde0d254ec1c4..a77c49b185115 100644
+--- a/drivers/media/platform/exynos4-is/fimc-isp.c
++++ b/drivers/media/platform/exynos4-is/fimc-isp.c
+@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
+
+ if (on) {
+ ret = pm_runtime_get_sync(&is->pdev->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(&is->pdev->dev);
+ return ret;
++ }
+ set_bit(IS_ST_PWR_ON, &is->state);
+
+ ret = fimc_is_start_firmware(is);
+diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
+index e87c6a09205bd..efd06621951c7 100644
+--- a/drivers/media/platform/exynos4-is/fimc-lite.c
++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
+@@ -470,7 +470,7 @@ static int fimc_lite_open(struct file *file)
+ set_bit(ST_FLITE_IN_USE, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+- goto unlock;
++ goto err_pm;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index 9c31d950cddf7..a07d796f63df0 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -484,8 +484,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+ return -ENXIO;
+
+ ret = pm_runtime_get_sync(fmd->pmf);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(fmd->pmf);
+ return ret;
++ }
+
+ fmd->num_sensors = 0;
+
+@@ -1268,11 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
+ if (IS_ERR(pctl->state_default))
+ return PTR_ERR(pctl->state_default);
+
++ /* PINCTRL_STATE_IDLE is optional */
+ pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_IDLE);
+- if (IS_ERR(pctl->state_idle))
+- return PTR_ERR(pctl->state_idle);
+-
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
+index 540151bbf58f2..1aac167abb175 100644
+--- a/drivers/media/platform/exynos4-is/mipi-csis.c
++++ b/drivers/media/platform/exynos4-is/mipi-csis.c
+@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
+ if (enable) {
+ s5pcsis_clear_counters(state);
+ ret = pm_runtime_get_sync(&state->pdev->dev);
+- if (ret && ret != 1)
++ if (ret && ret != 1) {
++ pm_runtime_put_noidle(&state->pdev->dev);
+ return ret;
++ }
+ }
+
+ mutex_lock(&state->lock);
+diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
+index 27779b75df543..ac112cf06ab31 100644
+--- a/drivers/media/platform/mx2_emmaprp.c
++++ b/drivers/media/platform/mx2_emmaprp.c
+@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, pcdev);
+
+ irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return irq;
++ if (irq < 0) {
++ ret = irq;
++ goto rel_vdev;
++ }
++
+ ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
+ dev_name(&pdev->dev), pcdev);
+ if (ret)
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 327c5716922ac..dce6b3685e135 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -2330,8 +2330,10 @@ static int isp_probe(struct platform_device *pdev)
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ isp->mmio_base[map_idx] =
+ devm_ioremap_resource(isp->dev, mem);
+- if (IS_ERR(isp->mmio_base[map_idx]))
+- return PTR_ERR(isp->mmio_base[map_idx]);
++ if (IS_ERR(isp->mmio_base[map_idx])) {
++ ret = PTR_ERR(isp->mmio_base[map_idx]);
++ goto error;
++ }
+ }
+
+ ret = isp_get_clocks(isp);
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
+index 008afb85023be..3c5b9082ad723 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
+@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_sync(dev);
+ return ret;
++ }
+
+ ret = csiphy_set_clock_rates(csiphy);
+ if (ret < 0) {
+diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
+index 84e982f259a06..bbc430a003443 100644
+--- a/drivers/media/platform/qcom/venus/core.c
++++ b/drivers/media/platform/qcom/venus/core.c
+@@ -316,8 +316,10 @@ static int venus_probe(struct platform_device *pdev)
+ goto err_core_deinit;
+
+ ret = pm_runtime_put_sync(dev);
+- if (ret)
++ if (ret) {
++ pm_runtime_get_noresume(dev);
+ goto err_dev_unregister;
++ }
+
+ return 0;
+
+@@ -328,6 +330,7 @@ err_core_deinit:
+ err_venus_shutdown:
+ venus_shutdown(core);
+ err_runtime_disable:
++ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+ hfi_destroy(core);
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index 05b80a66e80ed..658825b4c4e8d 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -993,8 +993,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
+ break;
+ }
+
+- INIT_LIST_HEAD(&inst->registeredbufs);
+-
+ return ret;
+ }
+
+@@ -1091,6 +1089,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
+ static void vdec_buf_cleanup(struct vb2_buffer *vb)
+ {
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
++ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
++ struct venus_buffer *buf = to_venus_buffer(vbuf);
++
++ mutex_lock(&inst->lock);
++ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
++ if (!list_empty(&inst->registeredbufs))
++ list_del_init(&buf->reg_list);
++ mutex_unlock(&inst->lock);
+
+ inst->buf_count--;
+ if (!inst->buf_count)
+diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
+index 5c6b00737fe75..05c712e00a2a7 100644
+--- a/drivers/media/platform/rcar-fcp.c
++++ b/drivers/media/platform/rcar-fcp.c
+@@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
+ return 0;
+
+ ret = pm_runtime_get_sync(fcp->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(fcp->dev);
+ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
+index c14af1b929dff..d27eccfa57cae 100644
+--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
+@@ -361,7 +361,6 @@ struct rcar_csi2 {
+ struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
+
+ struct v4l2_async_notifier notifier;
+- struct v4l2_async_subdev asd;
+ struct v4l2_subdev *remote;
+
+ struct v4l2_mbus_framefmt mf;
+@@ -810,6 +809,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
+
+ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+ {
++ struct v4l2_async_subdev *asd;
++ struct fwnode_handle *fwnode;
+ struct device_node *ep;
+ struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
+ int ret;
+@@ -833,24 +834,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+ return ret;
+ }
+
+- priv->asd.match.fwnode =
+- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
+- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+-
++ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
+ of_node_put(ep);
+
+- v4l2_async_notifier_init(&priv->notifier);
+-
+- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
+- if (ret) {
+- fwnode_handle_put(priv->asd.match.fwnode);
+- return ret;
+- }
++ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
++ v4l2_async_notifier_init(&priv->notifier);
+ priv->notifier.ops = &rcar_csi2_notify_ops;
+
+- dev_dbg(priv->dev, "Found '%pOF'\n",
+- to_of_node(priv->asd.match.fwnode));
++ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
++ sizeof(*asd));
++ fwnode_handle_put(fwnode);
++ if (IS_ERR(asd))
++ return PTR_ERR(asd);
+
+ ret = v4l2_async_subdev_notifier_register(&priv->subdev,
+ &priv->notifier);
+diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
+index 3cb29b2e0b2b1..e5f6360801082 100644
+--- a/drivers/media/platform/rcar-vin/rcar-dma.c
++++ b/drivers/media/platform/rcar-vin/rcar-dma.c
+@@ -1334,8 +1334,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
+ int ret;
+
+ ret = pm_runtime_get_sync(vin->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(vin->dev);
+ return ret;
++ }
+
+ /* Make register writes take effect immediately. */
+ vnmc = rvin_read(vin, VNMC_REG);
+diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
+index 0f267a237b424..af3c8d405509e 100644
+--- a/drivers/media/platform/rcar_drif.c
++++ b/drivers/media/platform/rcar_drif.c
+@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
+ /* OF graph endpoint's V4L2 async data */
+ struct rcar_drif_graph_ep {
+ struct v4l2_subdev *subdev; /* Async matched subdev */
+- struct v4l2_async_subdev asd; /* Async sub-device descriptor */
+ };
+
+ /* DMA buffer */
+@@ -1105,12 +1104,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+- if (sdr->ep.asd.match.fwnode !=
+- of_fwnode_handle(subdev->dev->of_node)) {
+- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
+- return -EINVAL;
+- }
+-
+ v4l2_set_subdev_hostdata(subdev, sdr);
+ sdr->ep.subdev = subdev;
+ rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
+@@ -1214,7 +1207,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+ {
+ struct v4l2_async_notifier *notifier = &sdr->notifier;
+ struct fwnode_handle *fwnode, *ep;
+- int ret;
++ struct v4l2_async_subdev *asd;
+
+ v4l2_async_notifier_init(notifier);
+
+@@ -1223,26 +1216,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+ if (!ep)
+ return 0;
+
++ /* Get the endpoint properties */
++ rcar_drif_get_ep_properties(sdr, ep);
++
+ fwnode = fwnode_graph_get_remote_port_parent(ep);
++ fwnode_handle_put(ep);
+ if (!fwnode) {
+ dev_warn(sdr->dev, "bad remote port parent\n");
+- fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+- sdr->ep.asd.match.fwnode = fwnode;
+- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
+- if (ret) {
+- fwnode_handle_put(fwnode);
+- return ret;
+- }
+-
+- /* Get the endpoint properties */
+- rcar_drif_get_ep_properties(sdr, ep);
+-
++ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
++ sizeof(*asd));
+ fwnode_handle_put(fwnode);
+- fwnode_handle_put(ep);
++ if (IS_ERR(asd))
++ return PTR_ERR(asd);
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
+index 36b821ccc1dba..bf9a75b75083b 100644
+--- a/drivers/media/platform/rockchip/rga/rga-buf.c
++++ b/drivers/media/platform/rockchip/rga/rga-buf.c
+@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+
+ ret = pm_runtime_get_sync(rga->dev);
+ if (ret < 0) {
++ pm_runtime_put_noidle(rga->dev);
+ rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
+index c6fbcd7036d6d..ee624804862e2 100644
+--- a/drivers/media/platform/s3c-camif/camif-core.c
++++ b/drivers/media/platform/s3c-camif/camif-core.c
+@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
+
+ ret = camif_media_dev_init(camif);
+ if (ret < 0)
+- goto err_alloc;
++ goto err_pm;
+
+ ret = camif_register_sensor(camif);
+ if (ret < 0)
+@@ -498,10 +498,9 @@ err_sens:
+ media_device_unregister(&camif->media_dev);
+ media_device_cleanup(&camif->media_dev);
+ camif_unregister_media_entities(camif);
+-err_alloc:
++err_pm:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+-err_pm:
+ camif_clk_put(camif);
+ err_clk:
+ s3c_camif_unregister_subdev(camif);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+index 7d52431c2c837..62d2320a72186 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
+ int i, ret = 0;
+
+ ret = pm_runtime_get_sync(pm->device);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(pm->device);
+ return ret;
++ }
+
+ /* clock control */
+ for (i = 0; i < pm->num_clocks; i++) {
+diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+index 675b5f2b4c2ee..a55ddf8d185d5 100644
+--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+@@ -1367,7 +1367,7 @@ static int bdisp_probe(struct platform_device *pdev)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to set PM\n");
+- goto err_dbg;
++ goto err_pm;
+ }
+
+ /* Filters */
+@@ -1395,7 +1395,6 @@ err_filter:
+ bdisp_hw_free_filters(bdisp->dev);
+ err_pm:
+ pm_runtime_put(dev);
+-err_dbg:
+ bdisp_debugfs_remove(bdisp);
+ err_v4l2:
+ v4l2_device_unregister(&bdisp->v4l2_dev);
+diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
+index 91369fb3ffaa4..2791107e641bc 100644
+--- a/drivers/media/platform/sti/delta/delta-v4l2.c
++++ b/drivers/media/platform/sti/delta/delta-v4l2.c
+@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
+ /* enable the hardware */
+ if (!dec->pm) {
+ ret = delta_get_sync(ctx);
+- if (ret)
++ if (ret) {
++ delta_put_autosuspend(ctx);
+ goto err;
++ }
+ }
+
+ /* decode this access unit */
+diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
+index 401aaafa17109..43f279e2a6a38 100644
+--- a/drivers/media/platform/sti/hva/hva-hw.c
++++ b/drivers/media/platform/sti/hva/hva-hw.c
+@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
++ pm_runtime_put_noidle(dev);
+ mutex_unlock(&hva->protect_mutex);
+ return -EFAULT;
+ }
+@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
+- goto err_clk;
++ goto err_pm;
+ }
+
+ /* check IP hardware version */
+@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
++ pm_runtime_put_noidle(dev);
+ mutex_unlock(&hva->protect_mutex);
+ return;
+ }
+diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
+index 9392e3409fba0..d41475f56ab54 100644
+--- a/drivers/media/platform/stm32/stm32-dcmi.c
++++ b/drivers/media/platform/stm32/stm32-dcmi.c
+@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ __func__, ret);
+- goto err_release_buffers;
++ goto err_pm_put;
+ }
+
+ ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
+@@ -837,8 +837,6 @@ err_media_pipeline_stop:
+
+ err_pm_put:
+ pm_runtime_put(dcmi->dev);
+-
+-err_release_buffers:
+ spin_lock_irq(&dcmi->irqlock);
+ /*
+ * Return all buffers to vb2 in QUEUED state.
+diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
+index 8b14ba4a3d9ea..817bd13370eff 100644
+--- a/drivers/media/platform/ti-vpe/vpe.c
++++ b/drivers/media/platform/ti-vpe/vpe.c
+@@ -2435,6 +2435,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
+
+ r = pm_runtime_get_sync(&pdev->dev);
+ WARN_ON(r < 0);
++ if (r)
++ pm_runtime_put_noidle(&pdev->dev);
+ return r < 0 ? r : 0;
+ }
+
+diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
+index c650e45bb0ad1..dc62533cf32ce 100644
+--- a/drivers/media/platform/vsp1/vsp1_drv.c
++++ b/drivers/media/platform/vsp1/vsp1_drv.c
+@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
+ int ret;
+
+ ret = pm_runtime_get_sync(vsp1->dev);
+- return ret < 0 ? ret : 0;
++ if (ret < 0) {
++ pm_runtime_put_noidle(vsp1->dev);
++ return ret;
++ }
++
++ return 0;
+ }
+
+ /*
+@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
+ /* Configure device parameters based on the version register. */
+ pm_runtime_enable(&pdev->dev);
+
+- ret = pm_runtime_get_sync(&pdev->dev);
++ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ goto done;
+
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+- pm_runtime_put_sync(&pdev->dev);
++ vsp1_device_put(vsp1);
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
+diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
+index 9cdef17b4793f..c12dda73cdd53 100644
+--- a/drivers/media/rc/ati_remote.c
++++ b/drivers/media/rc/ati_remote.c
+@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
+ err("%s: endpoint_in message size==0? \n", __func__);
+ return -ENODEV;
+ }
++ if (!usb_endpoint_is_int_out(endpoint_out)) {
++ err("%s: Unexpected endpoint_out\n", __func__);
++ return -ENODEV;
++ }
+
+ ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
+index b6e70fada3fb2..8fb186b25d6af 100644
+--- a/drivers/media/tuners/tuner-simple.c
++++ b/drivers/media/tuners/tuner-simple.c
+@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
+ case TUNER_TENA_9533_DI:
+ case TUNER_YMEC_TVF_5533MF:
+ tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
+- return 0;
++ return -EINVAL;
+ case TUNER_PHILIPS_FM1216ME_MK3:
+ case TUNER_PHILIPS_FM1236_MK3:
+ case TUNER_PHILIPS_FMD1216ME_MK3:
+@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
+ TUNER_RATIO_SELECT_50; /* 50 kHz step */
+
+ /* Bandswitch byte */
+- simple_radio_bandswitch(fe, &buffer[0]);
++ if (simple_radio_bandswitch(fe, &buffer[0]))
++ return 0;
+
+ /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
+ freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index e399b9fad7574..a30a8a731eda8 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
+ offset &= 7;
+ mask = ((1LL << bits) - 1) << offset;
+
+- for (; bits > 0; data++) {
++ while (1) {
+ u8 byte = *data & mask;
+ value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
+ bits -= 8 - (offset > 0 ? offset : 0);
++ if (bits <= 0)
++ break;
++
+ offset -= 8;
+ mask = (1 << bits) - 1;
++ data++;
+ }
+
+ /* Sign-extend the value if needed. */
+diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
+index b4499cddeffe5..ca3a9c2eec271 100644
+--- a/drivers/media/usb/uvc/uvc_entity.c
++++ b/drivers/media/usb/uvc/uvc_entity.c
+@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
+ int ret;
+
+ if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
++ u32 function;
++
+ v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
+ strscpy(entity->subdev.name, entity->name,
+ sizeof(entity->subdev.name));
+
++ switch (UVC_ENTITY_TYPE(entity)) {
++ case UVC_VC_SELECTOR_UNIT:
++ function = MEDIA_ENT_F_VID_MUX;
++ break;
++ case UVC_VC_PROCESSING_UNIT:
++ case UVC_VC_EXTENSION_UNIT:
++ /* For lack of a better option. */
++ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
++ break;
++ case UVC_COMPOSITE_CONNECTOR:
++ case UVC_COMPONENT_CONNECTOR:
++ function = MEDIA_ENT_F_CONN_COMPOSITE;
++ break;
++ case UVC_SVIDEO_CONNECTOR:
++ function = MEDIA_ENT_F_CONN_SVIDEO;
++ break;
++ case UVC_ITT_CAMERA:
++ function = MEDIA_ENT_F_CAM_SENSOR;
++ break;
++ case UVC_TT_VENDOR_SPECIFIC:
++ case UVC_ITT_VENDOR_SPECIFIC:
++ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
++ case UVC_OTT_VENDOR_SPECIFIC:
++ case UVC_OTT_DISPLAY:
++ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
++ case UVC_EXTERNAL_VENDOR_SPECIFIC:
++ default:
++ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
++ break;
++ }
++
++ entity->subdev.entity.function = function;
++
+ ret = media_entity_pads_init(&entity->subdev.entity,
+ entity->num_pads, entity->pads);
+
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 0335e69b70abe..5e6f3153b5ff8 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -247,11 +247,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ if (ret < 0)
+ goto done;
+
++ /* After the probe, update fmt with the values returned from
++ * negotiation with the device.
++ */
++ for (i = 0; i < stream->nformats; ++i) {
++ if (probe->bFormatIndex == stream->format[i].index) {
++ format = &stream->format[i];
++ break;
++ }
++ }
++
++ if (i == stream->nformats) {
++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
++ probe->bFormatIndex);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < format->nframes; ++i) {
++ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
++ frame = &format->frame[i];
++ break;
++ }
++ }
++
++ if (i == format->nframes) {
++ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
++ probe->bFrameIndex);
++ return -EINVAL;
++ }
++
+ fmt->fmt.pix.width = frame->wWidth;
+ fmt->fmt.pix.height = frame->wHeight;
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
+ fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
++ fmt->fmt.pix.pixelformat = format->fcc;
+ fmt->fmt.pix.colorspace = format->colorspace;
+
+ if (uvc_format != NULL)
+diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
+index 0b0ed72016da8..0309bd5a18008 100644
+--- a/drivers/memory/fsl-corenet-cf.c
++++ b/drivers/memory/fsl-corenet-cf.c
+@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
+ dev_set_drvdata(&pdev->dev, ccf);
+
+ irq = platform_get_irq(pdev, 0);
+- if (!irq) {
+- dev_err(&pdev->dev, "%s: no irq\n", __func__);
+- return -ENXIO;
+- }
++ if (irq < 0)
++ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
+ if (ret) {
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index eff26c1b13940..27bc417029e11 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -949,7 +949,7 @@ static int gpmc_cs_remap(int cs, u32 base)
+ int ret;
+ u32 old_base, size;
+
+- if (cs > gpmc_cs_num) {
++ if (cs >= gpmc_cs_num) {
+ pr_err("%s: requested chip-select is disabled\n", __func__);
+ return -ENODEV;
+ }
+@@ -984,7 +984,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
+ struct resource *res = &gpmc->mem;
+ int r = -1;
+
+- if (cs > gpmc_cs_num) {
++ if (cs >= gpmc_cs_num) {
+ pr_err("%s: requested chip-select is disabled\n", __func__);
+ return -ENODEV;
+ }
+@@ -2274,6 +2274,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
+ }
+ }
+ #else
++void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
++{
++ memset(p, 0, sizeof(*p));
++}
+ static int gpmc_probe_dt(struct platform_device *pdev)
+ {
+ return 0;
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index 154270f8d8d78..bbcde58e2a11e 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -1424,8 +1424,14 @@ static int sm501_plat_probe(struct platform_device *dev)
+ goto err_claim;
+ }
+
+- return sm501_init_dev(sm);
++ ret = sm501_init_dev(sm);
++ if (ret)
++ goto err_unmap;
++
++ return 0;
+
++ err_unmap:
++ iounmap(sm->regs);
+ err_claim:
+ release_resource(sm->regs_claim);
+ kfree(sm->regs_claim);
+diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
+index 1958833b3b74e..4fd57052ddd3d 100644
+--- a/drivers/misc/cardreader/rtsx_pcr.c
++++ b/drivers/misc/cardreader/rtsx_pcr.c
+@@ -1534,12 +1534,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
+ ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
+ if (ret < 0)
+- goto disable_irq;
++ goto free_slots;
+
+ schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+ return 0;
+
++free_slots:
++ kfree(pcr->slots);
+ disable_irq:
+ free_irq(pcr->irq, (void *)pcr);
+ disable_msi:
+diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
+index cde9a2fc13250..490ff49d11ede 100644
+--- a/drivers/misc/eeprom/at25.c
++++ b/drivers/misc/eeprom/at25.c
+@@ -358,7 +358,7 @@ static int at25_probe(struct spi_device *spi)
+ at25->nvmem_config.reg_read = at25_ee_read;
+ at25->nvmem_config.reg_write = at25_ee_write;
+ at25->nvmem_config.priv = at25;
+- at25->nvmem_config.stride = 4;
++ at25->nvmem_config.stride = 1;
+ at25->nvmem_config.word_size = 1;
+ at25->nvmem_config.size = chip.byte_len;
+
+diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
+index 01e27682ea303..a486c6c7f4077 100644
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1381,6 +1381,8 @@ retry:
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
+ pinned_pages->pages);
+ if (nr_pages != pinned_pages->nr_pages) {
++ if (pinned_pages->nr_pages < 0)
++ pinned_pages->nr_pages = 0;
+ if (try_upgrade) {
+ if (ulimit)
+ __scif_dec_pinned_vm_lock(mm, nr_pages);
+@@ -1400,7 +1402,6 @@ retry:
+
+ if (pinned_pages->nr_pages < nr_pages) {
+ err = -EFAULT;
+- pinned_pages->nr_pages = nr_pages;
+ goto dec_pinned;
+ }
+
+@@ -1413,7 +1414,6 @@ dec_pinned:
+ __scif_dec_pinned_vm_lock(mm, nr_pages);
+ /* Something went wrong! Rollback */
+ error_unmap:
+- pinned_pages->nr_pages = nr_pages;
+ scif_destroy_pinned_pages(pinned_pages);
+ *pages = NULL;
+ dev_dbg(scif_info.mdev.this_device,
+diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
+index 85942f6717c57..8aadc6055df17 100644
+--- a/drivers/misc/mic/vop/vop_main.c
++++ b/drivers/misc/mic/vop/vop_main.c
+@@ -320,7 +320,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+ /* First assign the vring's allocated in host memory */
+ vqconfig = _vop_vq_config(vdev->desc) + index;
+ memcpy_fromio(&config, vqconfig, sizeof(config));
+- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
++ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4);
+ vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+ va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
+ if (!va)
+diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
+index 30eac172f0170..7014ffe88632e 100644
+--- a/drivers/misc/mic/vop/vop_vringh.c
++++ b/drivers/misc/mic/vop/vop_vringh.c
+@@ -296,7 +296,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
+
+ num = le16_to_cpu(vqconfig[i].num);
+ mutex_init(&vvr->vr_mutex);
+- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
++ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) +
+ sizeof(struct _mic_vring_info));
+ vr->va = (void *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
+ goto err;
+ }
+ vr->len = vr_size;
+- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
++ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4);
+ vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+ vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+ DMA_BIDIRECTIONAL);
+@@ -602,6 +602,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+ size_t partlen;
+ bool dma = VOP_USE_DMA && vi->dma_ch;
+ int err = 0;
++ size_t offset = 0;
+
+ if (dma) {
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+@@ -655,13 +656,20 @@ memcpy:
+ * We are copying to IO below and should ideally use something
+ * like copy_from_user_toio(..) if it existed.
+ */
+- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+- err = -EFAULT;
+- dev_err(vop_dev(vdev), "%s %d err %d\n",
+- __func__, __LINE__, err);
+- goto err;
++ while (len) {
++ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
++
++ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) {
++ err = -EFAULT;
++ dev_err(vop_dev(vdev), "%s %d err %d\n",
++ __func__, __LINE__, err);
++ goto err;
++ }
++ memcpy_toio(dbuf + offset, vvr->buf, partlen);
++ offset += partlen;
++ vdev->out_bytes += partlen;
++ len -= partlen;
+ }
+- vdev->out_bytes += len;
+ err = 0;
+ err:
+ vpdev->hw_ops->unmap(vpdev, dbuf);
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index 8531ae7811956..c49065887e8f5 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
+ if (retval < (int)produce_q->kernel_if->num_pages) {
+ pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
+ retval);
+- qp_release_pages(produce_q->kernel_if->u.h.header_page,
+- retval, false);
++ if (retval > 0)
++ qp_release_pages(produce_q->kernel_if->u.h.header_page,
++ retval, false);
+ err = VMCI_ERROR_NO_MEM;
+ goto out;
+ }
+@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
+ if (retval < (int)consume_q->kernel_if->num_pages) {
+ pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
+ retval);
+- qp_release_pages(consume_q->kernel_if->u.h.header_page,
+- retval, false);
++ if (retval > 0)
++ qp_release_pages(consume_q->kernel_if->u.h.header_page,
++ retval, false);
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ produce_q->kernel_if->num_pages, false);
+ err = VMCI_ERROR_NO_MEM;
+diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
+index e0655278c5c32..3efaa9534a777 100644
+--- a/drivers/mmc/core/sdio_cis.c
++++ b/drivers/mmc/core/sdio_cis.c
+@@ -26,6 +26,9 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
+ unsigned i, nr_strings;
+ char **buffer, *string;
+
++ if (size < 2)
++ return 0;
++
+ /* Find all null-terminated (including zero length) strings in
+ the TPLLV1_INFO field. Trailing garbage is ignored. */
+ buf += 2;
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
+index 0f1547f09d08b..72f5c7b300790 100644
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
+@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
+ return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
+ }
+
++static const struct mtd_info lpddr2_nvm_mtd_info = {
++ .type = MTD_RAM,
++ .writesize = 1,
++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
++ ._read = lpddr2_nvm_read,
++ ._write = lpddr2_nvm_write,
++ ._erase = lpddr2_nvm_erase,
++ ._unlock = lpddr2_nvm_unlock,
++ ._lock = lpddr2_nvm_lock,
++};
++
+ /*
+ * lpddr2_nvm driver probe method
+ */
+@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+ .pfow_base = OW_BASE_ADDRESS,
+ .fldrv_priv = pcm_data,
+ };
++
+ if (IS_ERR(map->virt))
+ return PTR_ERR(map->virt);
+
+@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+ return PTR_ERR(pcm_data->ctl_regs);
+
+ /* Populate mtd_info data structure */
+- *mtd = (struct mtd_info) {
+- .dev = { .parent = &pdev->dev },
+- .name = pdev->dev.init_name,
+- .type = MTD_RAM,
+- .priv = map,
+- .size = resource_size(add_range),
+- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
+- .writesize = 1,
+- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
+- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+- ._read = lpddr2_nvm_read,
+- ._write = lpddr2_nvm_write,
+- ._erase = lpddr2_nvm_erase,
+- ._unlock = lpddr2_nvm_unlock,
+- ._lock = lpddr2_nvm_lock,
+- };
++ *mtd = lpddr2_nvm_mtd_info;
++ mtd->dev.parent = &pdev->dev;
++ mtd->name = pdev->dev.init_name;
++ mtd->priv = map;
++ mtd->size = resource_size(add_range);
++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
+
+ /* Verify the presence of the device looking for PFOW string */
+ if (!lpddr2_nvm_pfow_present(map)) {
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index 4ced68be7ed7e..774970bfcf859 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
+
+- /* Panics must be written immediately */
+- if (reason != KMSG_DUMP_OOPS)
++ if (reason != KMSG_DUMP_OOPS) {
++ /* Panics must be written immediately */
+ mtdoops_write(cxt, 1);
+-
+- /* For other cases, schedule work to write it "nicely" */
+- schedule_work(&cxt->work_write);
++ } else {
++ /* For other cases, schedule work to write it "nicely" */
++ schedule_work(&cxt->work_write);
++ }
+ }
+
+ static void mtdoops_notify_add(struct mtd_info *mtd)
+diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
+index 6b399a75f9aec..b6f114da57143 100644
+--- a/drivers/mtd/nand/raw/vf610_nfc.c
++++ b/drivers/mtd/nand/raw/vf610_nfc.c
+@@ -850,8 +850,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
+ }
+
+ of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+- if (!of_id)
+- return -ENODEV;
++ if (!of_id) {
++ err = -ENODEV;
++ goto err_disable_clk;
++ }
+
+ nfc->variant = (enum vf610_nfc_variant)of_id->data;
+
+diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
+index e99d425aa93f5..b13b39763a405 100644
+--- a/drivers/mtd/nand/spi/gigadevice.c
++++ b/drivers/mtd/nand/spi/gigadevice.c
+@@ -21,7 +21,7 @@
+ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
+
+ static SPINAND_OP_VARIANTS(read_cache_variants,
+- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+ static SPINAND_OP_VARIANTS(read_cache_variants_f,
+- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
+@@ -201,7 +201,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+- 0,
++ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+@@ -210,7 +210,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+- 0,
++ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+@@ -219,7 +219,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+- 0,
++ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+@@ -228,7 +228,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+- 0,
++ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
+@@ -237,7 +237,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+- 0,
++ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
+ gd5fxgq4ufxxg_ecc_get_status)),
+ };
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e5c207ad3c77d..aaa7ed1dc97ee 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1232,18 +1232,23 @@ static int flexcan_chip_start(struct net_device *dev)
+ return err;
+ }
+
+-/* flexcan_chip_stop
++/* __flexcan_chip_stop
+ *
+- * this functions is entered with clocks enabled
++ * this function is entered with clocks enabled
+ */
+-static void flexcan_chip_stop(struct net_device *dev)
++static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
+ {
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
++ int err;
+
+ /* freeze + disable module */
+- flexcan_chip_freeze(priv);
+- flexcan_chip_disable(priv);
++ err = flexcan_chip_freeze(priv);
++ if (err && !disable_on_error)
++ return err;
++ err = flexcan_chip_disable(priv);
++ if (err && !disable_on_error)
++ goto out_chip_unfreeze;
+
+ /* Disable all interrupts */
+ priv->write(0, &regs->imask2);
+@@ -1253,6 +1258,23 @@ static void flexcan_chip_stop(struct net_device *dev)
+
+ flexcan_transceiver_disable(priv);
+ priv->can.state = CAN_STATE_STOPPED;
++
++ return 0;
++
++ out_chip_unfreeze:
++ flexcan_chip_unfreeze(priv);
++
++ return err;
++}
++
++static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
++{
++ return __flexcan_chip_stop(dev, true);
++}
++
++static inline int flexcan_chip_stop(struct net_device *dev)
++{
++ return __flexcan_chip_stop(dev, false);
+ }
+
+ static int flexcan_open(struct net_device *dev)
+@@ -1341,7 +1363,7 @@ static int flexcan_close(struct net_device *dev)
+
+ netif_stop_queue(dev);
+ can_rx_offload_disable(&priv->offload);
+- flexcan_chip_stop(dev);
++ flexcan_chip_stop_disable_on_error(dev);
+
+ can_rx_offload_del(&priv->offload);
+ free_irq(dev->irq, dev);
+diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
+index 38ea5e600fb84..e6d0cb9ee02f0 100644
+--- a/drivers/net/can/m_can/m_can_platform.c
++++ b/drivers/net/can/m_can/m_can_platform.c
+@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+
+- m_can_class_suspend(dev);
+-
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+
+diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
+index 9a63b51e1d82f..6f2dab7e33d65 100644
+--- a/drivers/net/dsa/realtek-smi-core.h
++++ b/drivers/net/dsa/realtek-smi-core.h
+@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
+ const char *name;
+ };
+
++/**
++ * struct rtl8366_vlan_mc - Virtual LAN member configuration
++ */
+ struct rtl8366_vlan_mc {
+ u16 vid;
+ u16 untag;
+@@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
+ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
+ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid);
+-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
+ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid);
+ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index 99cdb2f18fa2f..49c626a336803 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+
++/**
++ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
++ * @smi: the Realtek SMI device instance
++ * @vid: the VLAN ID to look up or allocate
++ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
++ * if successful
++ * @return: index of a new member config or negative error number
++ */
++static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
++ struct rtl8366_vlan_mc *vlanmc)
++{
++ struct rtl8366_vlan_4k vlan4k;
++ int ret;
++ int i;
++
++ /* Try to find an existing member config entry for this VID */
++ for (i = 0; i < smi->num_vlan_mc; i++) {
++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
++ if (ret) {
++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
++ i, vid);
++ return ret;
++ }
++
++ if (vid == vlanmc->vid)
++ return i;
++ }
++
++ /* We have no MC entry for this VID, try to find an empty one */
++ for (i = 0; i < smi->num_vlan_mc; i++) {
++ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
++ if (ret) {
++ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
++ i, vid);
++ return ret;
++ }
++
++ if (vlanmc->vid == 0 && vlanmc->member == 0) {
++ /* Update the entry from the 4K table */
++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
++ if (ret) {
++ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
++ i, vid);
++ return ret;
++ }
++
++ vlanmc->vid = vid;
++ vlanmc->member = vlan4k.member;
++ vlanmc->untag = vlan4k.untag;
++ vlanmc->fid = vlan4k.fid;
++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
++ if (ret) {
++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
++ i, vid);
++ return ret;
++ }
++
++ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
++ i, vid);
++ return i;
++ }
++ }
++
++ /* MC table is full, try to find an unused entry and replace it */
++ for (i = 0; i < smi->num_vlan_mc; i++) {
++ int used;
++
++ ret = rtl8366_mc_is_used(smi, i, &used);
++ if (ret)
++ return ret;
++
++ if (!used) {
++ /* Update the entry from the 4K table */
++ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
++ if (ret)
++ return ret;
++
++ vlanmc->vid = vid;
++ vlanmc->member = vlan4k.member;
++ vlanmc->untag = vlan4k.untag;
++ vlanmc->fid = vlan4k.fid;
++ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
++ if (ret) {
++ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
++ i, vid);
++ return ret;
++ }
++ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
++ i, vid);
++ return i;
++ }
++ }
++
++ dev_err(smi->dev, "all VLAN member configurations are in use\n");
++ return -ENOSPC;
++}
++
+ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ u32 untag, u32 fid)
+ {
++ struct rtl8366_vlan_mc vlanmc;
+ struct rtl8366_vlan_4k vlan4k;
++ int mc;
+ int ret;
+- int i;
++
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return -EINVAL;
+
+ dev_dbg(smi->dev,
+ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlan4k.member, vlan4k.untag);
+
+- /* Try to find an existing MC entry for this VID */
+- for (i = 0; i < smi->num_vlan_mc; i++) {
+- struct rtl8366_vlan_mc vlanmc;
+-
+- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
+-
+- if (vid == vlanmc.vid) {
+- /* update the MC entry */
+- vlanmc.member |= member;
+- vlanmc.untag |= untag;
+- vlanmc.fid = fid;
+-
+- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
++ /* Find or allocate a member config for this VID */
++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
++ if (ret < 0)
++ return ret;
++ mc = ret;
+
+- dev_dbg(smi->dev,
+- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+- vid, vlanmc.member, vlanmc.untag);
++ /* Update the MC entry */
++ vlanmc.member |= member;
++ vlanmc.untag |= untag;
++ vlanmc.fid = fid;
+
+- break;
+- }
+- }
++ /* Commit updates to the MC entry */
++ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
++ if (ret)
++ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
++ mc, vid);
++ else
++ dev_dbg(smi->dev,
++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
++ vid, vlanmc.member, vlanmc.untag);
+
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+
+-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
+-{
+- struct rtl8366_vlan_mc vlanmc;
+- int ret;
+- int index;
+-
+- ret = smi->ops->get_mc_index(smi, port, &index);
+- if (ret)
+- return ret;
+-
+- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
+- if (ret)
+- return ret;
+-
+- *val = vlanmc.vid;
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
+-
+ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+ unsigned int vid)
+ {
+ struct rtl8366_vlan_mc vlanmc;
+- struct rtl8366_vlan_4k vlan4k;
++ int mc;
+ int ret;
+- int i;
+-
+- /* Try to find an existing MC entry for this VID */
+- for (i = 0; i < smi->num_vlan_mc; i++) {
+- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
+-
+- if (vid == vlanmc.vid) {
+- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
+-
+- ret = smi->ops->set_mc_index(smi, port, i);
+- return ret;
+- }
+- }
+-
+- /* We have no MC entry for this VID, try to find an empty one */
+- for (i = 0; i < smi->num_vlan_mc; i++) {
+- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
+-
+- if (vlanmc.vid == 0 && vlanmc.member == 0) {
+- /* Update the entry from the 4K table */
+- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+- if (ret)
+- return ret;
+
+- vlanmc.vid = vid;
+- vlanmc.member = vlan4k.member;
+- vlanmc.untag = vlan4k.untag;
+- vlanmc.fid = vlan4k.fid;
+- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
+-
+- ret = smi->ops->set_mc_index(smi, port, i);
+- return ret;
+- }
+- }
+-
+- /* MC table is full, try to find an unused entry and replace it */
+- for (i = 0; i < smi->num_vlan_mc; i++) {
+- int used;
+-
+- ret = rtl8366_mc_is_used(smi, i, &used);
+- if (ret)
+- return ret;
+-
+- if (!used) {
+- /* Update the entry from the 4K table */
+- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+- if (ret)
+- return ret;
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return -EINVAL;
+
+- vlanmc.vid = vid;
+- vlanmc.member = vlan4k.member;
+- vlanmc.untag = vlan4k.untag;
+- vlanmc.fid = vlan4k.fid;
+- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+- if (ret)
+- return ret;
++ /* Find or allocate a member config for this VID */
++ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
++ if (ret < 0)
++ return ret;
++ mc = ret;
+
+- ret = smi->ops->set_mc_index(smi, port, i);
+- return ret;
+- }
++ ret = smi->ops->set_mc_index(smi, port, mc);
++ if (ret) {
++ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
++ mc, port);
++ return ret;
+ }
+
+- dev_err(smi->dev,
+- "all VLAN member configurations are in use\n");
++ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
++ port, vid, mc);
+
+- return -ENOSPC;
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+
+@@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return;
+
+- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
++ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
++ vlan->vid_begin,
+ port,
+ untagged ? "untagged" : "tagged",
+ pvid ? " PVID" : "no PVID");
+@@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ dev_err(smi->dev, "port is DSA or CPU port\n");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+- int pvid_val = 0;
+-
+- dev_info(smi->dev, "add VLAN %04x\n", vid);
+ member |= BIT(port);
+
+ if (untagged)
+ untag |= BIT(port);
+
+- /* To ensure that we have a valid MC entry for this VLAN,
+- * initialize the port VLAN ID here.
+- */
+- ret = rtl8366_get_pvid(smi, port, &pvid_val);
+- if (ret < 0) {
+- dev_err(smi->dev, "could not lookup PVID for port %d\n",
+- port);
+- return;
+- }
+- if (pvid_val == 0) {
+- ret = rtl8366_set_pvid(smi, port, vid);
+- if (ret < 0)
+- return;
+- }
+-
+ ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set up VLAN %04x",
+ vid);
++
++ if (!pvid)
++ continue;
++
++ ret = rtl8366_set_pvid(smi, port, vid);
++ if (ret)
++ dev_err(smi->dev,
++ "failed to set PVID on port %d to VLAN %04x",
++ port, vid);
++
++ if (!ret)
++ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
++ vid, port);
+ }
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
+index f5cc8b0a7c74c..7f731bf369980 100644
+--- a/drivers/net/dsa/rtl8366rb.c
++++ b/drivers/net/dsa/rtl8366rb.c
+@@ -1269,7 +1269,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+ if (smi->vlan4k_enabled)
+ max = RTL8366RB_NUM_VIDS - 1;
+
+- if (vlan == 0 || vlan >= max)
++ if (vlan == 0 || vlan > max)
+ return false;
+
+ return true;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index 16a939f9b04d5..22d634111b818 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
+ PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+ };
+
++static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
++ /* Default supported NAT modes */
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_NONE,
++ .natmode = NAT_MODE_NONE,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP,
++ .natmode = NAT_MODE_DIP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
++ .natmode = NAT_MODE_DIP_DP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_DIP_DP_SIP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_DP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T5,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
++ CXGB4_ACTION_NATMODE_DPORT |
++ CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_ALL,
++ },
++ /* T6+ can ignore L4 ports when they're disabled. */
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_SIP_SP,
++ },
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
++ .natmode = NAT_MODE_DIP_DP_SP,
++ },
++ {
++ .chip = CHELSIO_T6,
++ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
++ .natmode = NAT_MODE_ALL,
++ },
++};
++
++static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
++ u8 natmode_flags)
++{
++ u8 i = 0;
++
++ /* Translate the enabled NAT 4-tuple fields to one of the
++ * hardware supported NAT mode configurations. This ensures
++ * that we pick a valid combination, where the disabled fields
++ * do not get overwritten to 0.
++ */
++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
++ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
++ return;
++ }
++ }
++}
++
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+@@ -287,7 +370,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
+ }
+
+ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+- u32 mask, u32 offset, u8 htype)
++ u32 mask, u32 offset, u8 htype,
++ u8 *natmode_flags)
+ {
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+@@ -312,60 +396,95 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ offload_pedit(fs, val, mask, IP4_SRC);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP4_DST:
+ offload_pedit(fs, val, mask, IP4_DST);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ offload_pedit(fs, val, mask, IP6_SRC_31_0);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_63_32:
+ offload_pedit(fs, val, mask, IP6_SRC_63_32);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_95_64:
+ offload_pedit(fs, val, mask, IP6_SRC_95_64);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_SRC_127_96:
+ offload_pedit(fs, val, mask, IP6_SRC_127_96);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
+ case PEDIT_IP6_DST_31_0:
+ offload_pedit(fs, val, mask, IP6_DST_31_0);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_63_32:
+ offload_pedit(fs, val, mask, IP6_DST_63_32);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_95_64:
+ offload_pedit(fs, val, mask, IP6_DST_95_64);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ case PEDIT_IP6_DST_127_96:
+ offload_pedit(fs, val, mask, IP6_DST_127_96);
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ fs->nat_fport = val;
+- else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ } else {
+ fs->nat_lport = val >> 16;
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++ }
+ }
+- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
+ fs->nat_fport = val;
+- else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ } else {
+ fs->nat_lport = val >> 16;
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
++ }
+ }
+- fs->nat_mode = NAT_MODE_ALL;
++ break;
++ }
++}
++
++static int cxgb4_action_natmode_validate(struct net_device *dev,
++ struct adapter *adap, u8 natmode_flags)
++{
++ u8 i = 0;
++
++ /* Extract the NAT mode to enable based on what 4-tuple fields
++ * are enabled to be overwritten. This ensures that the
++ * disabled fields don't get overwritten to 0.
++ */
++ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
++ const struct cxgb4_natmode_config *c;
++
++ c = &cxgb4_natmode_config_array[i];
++ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
++ natmode_flags == c->flags)
++ return 0;
+ }
++ netdev_err(dev, "%s: Unsupported NAT mode 4-tuple combination\n",
++ __func__);
++ return -EOPNOTSUPP;
+ }
+
+ static void cxgb4_process_flow_actions(struct net_device *in,
+@@ -374,6 +493,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_action_entry *act;
++ u8 natmode_flags = 0;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+@@ -424,13 +544,17 @@ static void cxgb4_process_flow_actions(struct net_device *in,
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+
+- process_pedit_field(fs, val, mask, offset, htype);
++ process_pedit_field(fs, val, mask, offset, htype,
++ &natmode_flags);
+ }
+ break;
+ default:
+ break;
+ }
+ }
++ if (natmode_flags)
++ cxgb4_action_natmode_tweak(fs, natmode_flags);
++
+ }
+
+ static bool valid_l4_mask(u32 mask)
+@@ -447,7 +571,8 @@ static bool valid_l4_mask(u32 mask)
+ }
+
+ static bool valid_pedit_action(struct net_device *dev,
+- const struct flow_action_entry *act)
++ const struct flow_action_entry *act,
++ u8 *natmode_flags)
+ {
+ u32 mask, offset;
+ u8 htype;
+@@ -472,7 +597,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++ break;
+ case PEDIT_IP4_DST:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -486,10 +614,13 @@ static bool valid_pedit_action(struct net_device *dev,
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
++ break;
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -505,6 +636,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ __func__);
+ return false;
+ }
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -520,6 +655,10 @@ static bool valid_pedit_action(struct net_device *dev,
+ __func__);
+ return false;
+ }
++ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
++ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
++ else
++ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+@@ -538,10 +677,12 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct flow_cls_offload *cls)
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
++ struct adapter *adap = netdev2adap(dev);
+ struct flow_action_entry *act;
+ bool act_redir = false;
+ bool act_pedit = false;
+ bool act_vlan = false;
++ u8 natmode_flags = 0;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+@@ -551,7 +692,6 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
+ /* Do nothing */
+ break;
+ case FLOW_ACTION_REDIRECT: {
+- struct adapter *adap = netdev2adap(dev);
+ struct net_device *n_dev, *target_dev;
+ unsigned int i;
+ bool found = false;
+@@ -601,7 +741,8 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
+ }
+ break;
+ case FLOW_ACTION_MANGLE: {
+- bool pedit_valid = valid_pedit_action(dev, act);
++ bool pedit_valid = valid_pedit_action(dev, act,
++ &natmode_flags);
+
+ if (!pedit_valid)
+ return -EOPNOTSUPP;
+@@ -620,6 +761,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
+ return -EINVAL;
+ }
+
++ if (act_pedit) {
++ int ret;
++
++ ret = cxgb4_action_natmode_validate(dev, adap, natmode_flags);
++ if (ret)
++ return ret;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+index eb4c95248baf6..c905debe6f7ac 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
+ #define PEDIT_TCP_SPORT_DPORT 0x0
+ #define PEDIT_UDP_SPORT_DPORT 0x0
+
++enum cxgb4_action_natmode_flags {
++ CXGB4_ACTION_NATMODE_NONE = 0,
++ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
++ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
++ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
++ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
++};
++
++/* TC PEDIT action to NATMODE translation entry */
++struct cxgb4_natmode_config {
++ enum chip_type chip;
++ u8 flags;
++ u8 natmode;
++};
++
+ int cxgb4_tc_flower_replace(struct net_device *dev,
+ struct flow_cls_offload *cls);
+ int cxgb4_tc_flower_destroy(struct net_device *dev,
+diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
+index 0dd64acd2a3fb..08cac1bfacafb 100644
+--- a/drivers/net/ethernet/cisco/enic/enic.h
++++ b/drivers/net/ethernet/cisco/enic/enic.h
+@@ -171,6 +171,7 @@ struct enic {
+ u16 num_vfs;
+ #endif
+ spinlock_t enic_api_lock;
++ bool enic_api_busy;
+ struct enic_port_profile *pp;
+
+ /* work queue cache line section */
+diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
+index b161f24522b87..b028ea2dec2b9 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_api.c
++++ b/drivers/net/ethernet/cisco/enic/enic_api.c
+@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ struct vnic_dev *vdev = enic->vdev;
+
+ spin_lock(&enic->enic_api_lock);
++ while (enic->enic_api_busy) {
++ spin_unlock(&enic->enic_api_lock);
++ cpu_relax();
++ spin_lock(&enic->enic_api_lock);
++ }
++
+ spin_lock_bh(&enic->devcmd_lock);
+
+ vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 6e2ab10ad2e6f..8314102002b0f 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -2142,8 +2142,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
+ int done;
+ int err;
+
+- BUG_ON(in_interrupt());
+-
+ err = start(vdev, arg);
+ if (err)
+ return err;
+@@ -2331,6 +2329,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
+ rss_hash_bits, rss_base_cpu, rss_enable);
+ }
+
++static void enic_set_api_busy(struct enic *enic, bool busy)
++{
++ spin_lock(&enic->enic_api_lock);
++ enic->enic_api_busy = busy;
++ spin_unlock(&enic->enic_api_lock);
++}
++
+ static void enic_reset(struct work_struct *work)
+ {
+ struct enic *enic = container_of(work, struct enic, reset);
+@@ -2340,7 +2345,9 @@ static void enic_reset(struct work_struct *work)
+
+ rtnl_lock();
+
+- spin_lock(&enic->enic_api_lock);
++ /* Stop any activity from infiniband */
++ enic_set_api_busy(enic, true);
++
+ enic_stop(enic->netdev);
+ enic_dev_soft_reset(enic);
+ enic_reset_addr_lists(enic);
+@@ -2348,7 +2355,10 @@ static void enic_reset(struct work_struct *work)
+ enic_set_rss_nic_cfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_open(enic->netdev);
+- spin_unlock(&enic->enic_api_lock);
++
++ /* Allow infiniband to fiddle with the device again */
++ enic_set_api_busy(enic, false);
++
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+ rtnl_unlock();
+@@ -2360,7 +2370,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
+
+ rtnl_lock();
+
+- spin_lock(&enic->enic_api_lock);
++ /* Stop any activity from infiniband */
++ enic_set_api_busy(enic, true);
++
+ enic_dev_hang_notify(enic);
+ enic_stop(enic->netdev);
+ enic_dev_hang_reset(enic);
+@@ -2369,7 +2381,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
+ enic_set_rss_nic_cfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_open(enic->netdev);
+- spin_unlock(&enic->enic_api_lock);
++
++ /* Allow infiniband to fiddle with the device again */
++ enic_set_api_busy(enic, false);
++
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 96e9565f1e08a..1fbc243fc3f4c 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -1807,6 +1807,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ priv->is_aspeed = true;
++ /* Disable ast2600 problematic HW arbitration */
++ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
++ iowrite32(FTGMAC100_TM_DEFAULT,
++ priv->base + FTGMAC100_OFFSET_TM);
++ }
+ } else {
+ priv->rxdes0_edorr_mask = BIT(15);
+ priv->txdes0_edotr_mask = BIT(15);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index e5876a3fda91d..63b3e02fab162 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -169,6 +169,14 @@
+ #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
+ #define FTGMAC100_MACCR_SW_RST (1 << 31)
+
++/*
++ * test mode control register
++ */
++#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
++#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
++#define FTGMAC100_TM_DEFAULT \
++ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
++
+ /*
+ * PHY control register
+ */
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index a0e4b12ac4ea2..fd7fc6f20c9da 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1945,6 +1945,27 @@ out:
+ return ret;
+ }
+
++static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++ struct phy_device *phy_dev = ndev->phydev;
++
++ if (phy_dev) {
++ phy_reset_after_clk_enable(phy_dev);
++ } else if (fep->phy_node) {
++ /*
++ * If the PHY still is not bound to the MAC, but there is
++ * OF PHY node and a matching PHY device instance already,
++ * use the OF PHY node to obtain the PHY device instance,
++ * and then use that PHY device instance when triggering
++ * the PHY reset.
++ */
++ phy_dev = of_phy_find_device(fep->phy_node);
++ phy_reset_after_clk_enable(phy_dev);
++ put_device(&phy_dev->mdio.dev);
++ }
++}
++
+ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ {
+ struct fec_enet_private *fep = netdev_priv(ndev);
+@@ -1971,7 +1992,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+ if (ret)
+ goto failed_clk_ref;
+
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
+ } else {
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+@@ -2982,16 +3003,16 @@ fec_enet_open(struct net_device *ndev)
+ /* Init MAC prior to mii bus probe */
+ fec_restart(ndev);
+
+- /* Probe and connect to PHY when open the interface */
+- ret = fec_enet_mii_probe(ndev);
+- if (ret)
+- goto err_enet_mii_probe;
+-
+ /* Call phy_reset_after_clk_enable() again if it failed during
+ * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ */
+ if (reset_again)
+- phy_reset_after_clk_enable(ndev->phydev);
++ fec_enet_phy_reset_after_clk_enable(ndev);
++
++ /* Probe and connect to PHY when open the interface */
++ ret = fec_enet_mii_probe(ndev);
++ if (ret)
++ goto err_enet_mii_probe;
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index aa32a5b041129..a20d9147d5f22 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1317,6 +1317,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
+ int offset = ibmveth_rxq_frame_offset(adapter);
+ int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
++ __sum16 iph_check = 0;
+
+ skb = ibmveth_rxq_get_buffer(adapter);
+
+@@ -1353,16 +1354,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+- if (csum_good) {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- ibmveth_rx_csum_helper(skb, adapter);
++ /* PHYP without PLSO support places a -1 in the ip
++ * checksum for large send frames.
++ */
++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
++ struct iphdr *iph = (struct iphdr *)skb->data;
++
++ iph_check = iph->check;
+ }
+
+- if (length > netdev->mtu + ETH_HLEN) {
++ if ((length > netdev->mtu + ETH_HLEN) ||
++ lrg_pkt || iph_check == 0xffff) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
++ if (csum_good) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ ibmveth_rx_csum_helper(skb, adapter);
++ }
++
+ napi_gro_receive(napi, skb); /* send it up */
+
+ netdev->stats.rx_packets++;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 5329af2337a91..48105a2eebe4d 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -4074,8 +4074,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
+ goto out;
+ }
++ /* crq->change_mac_addr.mac_addr is the requested one
++ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
++ */
+ ether_addr_copy(netdev->dev_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
++ ether_addr_copy(adapter->mac_addr,
++ &crq->change_mac_addr_rsp.mac_addr[0]);
+ out:
+ complete(&adapter->fw_done);
+ return rc;
+@@ -4472,7 +4477,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ case IBMVNIC_1GBPS:
+ adapter->speed = SPEED_1000;
+ break;
+- case IBMVNIC_10GBP:
++ case IBMVNIC_10GBPS:
+ adapter->speed = SPEED_10000;
+ break;
+ case IBMVNIC_25GBPS:
+@@ -4487,6 +4492,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ case IBMVNIC_100GBPS:
+ adapter->speed = SPEED_100000;
+ break;
++ case IBMVNIC_200GBPS:
++ adapter->speed = SPEED_200000;
++ break;
+ default:
+ if (netif_carrier_ok(netdev))
+ netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index ebc39248b334a..0da20f19bb17c 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
+ #define IBMVNIC_10MBPS 0x40000000
+ #define IBMVNIC_100MBPS 0x20000000
+ #define IBMVNIC_1GBPS 0x10000000
+-#define IBMVNIC_10GBP 0x08000000
++#define IBMVNIC_10GBPS 0x08000000
+ #define IBMVNIC_40GBPS 0x04000000
+ #define IBMVNIC_100GBPS 0x02000000
+ #define IBMVNIC_25GBPS 0x01000000
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index ae195f8adff58..993f495e2bf7b 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -1113,7 +1113,7 @@ out:
+ return rc;
+
+ probe_err_register:
+- kfree(lp->td_ring);
++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
+ probe_err_td_ring:
+ iounmap(lp->tx_dma_regs);
+ probe_err_dma_tx:
+@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
+ iounmap(lp->eth_regs);
+ iounmap(lp->rx_dma_regs);
+ iounmap(lp->tx_dma_regs);
++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
+
+ unregister_netdev(bif->dev);
+ free_netdev(bif->dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index db3552f2d0877..f9797e5038841 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -942,6 +942,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+ bool clean_complete = true;
+ int done;
+
++ if (!budget)
++ return 0;
++
+ if (priv->tx_ring_num[TX_XDP]) {
+ xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
+ if (xdp_tx_cq->xdp_busy) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index a30edb436f4af..191ead7a7fa59 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ .dma = tx_info->map0_dma,
+ };
+
+- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
++ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ PAGE_SIZE, priv->dma_dir);
+ put_page(tx_info->page);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 75fc283cacc36..492ff2ef9a404 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -498,8 +498,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
+ switch (clock->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ ptp_event.index = pin;
+- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+- be64_to_cpu(eqe->data.pps.time_stamp));
++ ptp_event.timestamp =
++ mlx5_timecounter_cyc2time(clock,
++ be64_to_cpu(eqe->data.pps.time_stamp));
+ if (clock->pps_info.enabled) {
+ ptp_event.type = PTP_CLOCK_PPSUSR;
+ ptp_event.pps_times.ts_real =
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 66c97049f52b7..f838abdb35e1d 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -5514,6 +5514,10 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
++ /* Reportedly at least Asus X453MA truncates packets otherwise */
++ if (tp->mac_version == RTL_GIGA_MAC_VER_37)
++ rtl_init_rxcfg(tp);
++
+ return 0;
+ }
+
+@@ -6414,7 +6418,7 @@ static int rtl8169_close(struct net_device *dev)
+
+ phy_disconnect(tp->phydev);
+
+- pci_free_irq(pdev, 0, tp);
++ free_irq(pci_irq_vector(pdev, 0), tp);
+
+ dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+ tp->RxPhyAddr);
+@@ -6465,8 +6469,8 @@ static int rtl_open(struct net_device *dev)
+
+ rtl_request_firmware(tp);
+
+- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
+- dev->name);
++ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
++ IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+ if (retval < 0)
+ goto err_release_fw_2;
+
+@@ -6499,7 +6503,7 @@ out:
+ return retval;
+
+ err_free_irq:
+- pci_free_irq(pdev, 0, tp);
++ free_irq(pci_irq_vector(pdev, 0), tp);
+ err_release_fw_2:
+ rtl_release_firmware(tp);
+ rtl8169_rx_clear(tp);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 982be75fde833..189cdb7633671 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -175,32 +175,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+ }
+ }
+
+-/**
+- * stmmac_stop_all_queues - Stop all queues
+- * @priv: driver private structure
+- */
+-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
+-{
+- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+- u32 queue;
+-
+- for (queue = 0; queue < tx_queues_cnt; queue++)
+- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+-}
+-
+-/**
+- * stmmac_start_all_queues - Start all queues
+- * @priv: driver private structure
+- */
+-static void stmmac_start_all_queues(struct stmmac_priv *priv)
+-{
+- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+- u32 queue;
+-
+- for (queue = 0; queue < tx_queues_cnt; queue++)
+- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
+-}
+-
+ static void stmmac_service_event_schedule(struct stmmac_priv *priv)
+ {
+ if (!test_bit(STMMAC_DOWN, &priv->state) &&
+@@ -2737,7 +2711,7 @@ static int stmmac_open(struct net_device *dev)
+ }
+
+ stmmac_enable_all_queues(priv);
+- stmmac_start_all_queues(priv);
++ netif_tx_start_all_queues(priv->dev);
+
+ return 0;
+
+@@ -2778,8 +2752,6 @@ static int stmmac_release(struct net_device *dev)
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+- stmmac_stop_all_queues(priv);
+-
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+@@ -4770,7 +4742,6 @@ int stmmac_suspend(struct device *dev)
+ mutex_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+- stmmac_stop_all_queues(priv);
+
+ stmmac_disable_all_queues(priv);
+
+@@ -4883,8 +4854,6 @@ int stmmac_resume(struct device *dev)
+
+ stmmac_enable_all_queues(priv);
+
+- stmmac_start_all_queues(priv);
+-
+ mutex_unlock(&priv->lock);
+
+ if (!device_may_wakeup(priv->device)) {
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index e57d59b0a7ae9..21d905d90650b 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1375,6 +1375,7 @@ static const struct usb_device_id products[] = {
+ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
+ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
+ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
++ {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+
+ /* 4. Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
+index dfc16770458d8..8b6598a3713d1 100644
+--- a/drivers/net/wan/hdlc.c
++++ b/drivers/net/wan/hdlc.c
+@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
+ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *p, struct net_device *orig_dev)
+ {
+- struct hdlc_device *hdlc = dev_to_hdlc(dev);
++ struct hdlc_device *hdlc;
++
++ /* First make sure "dev" is an HDLC device */
++ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
++ kfree_skb(skb);
++ return NET_RX_SUCCESS;
++ }
++
++ hdlc = dev_to_hdlc(dev);
+
+ if (!net_eq(dev_net(dev), &init_net)) {
+ kfree_skb(skb);
+diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
+index 08e0a46501dec..c70a518b8b478 100644
+--- a/drivers/net/wan/hdlc_raw_eth.c
++++ b/drivers/net/wan/hdlc_raw_eth.c
+@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+ old_qlen = dev->tx_queue_len;
+ ether_setup(dev);
+ dev->tx_queue_len = old_qlen;
++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ eth_hw_addr_random(dev);
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
+ netif_dormant_off(dev);
+diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
+index eca87f7c5b6c1..01e05af5ae085 100644
+--- a/drivers/net/wireless/ath/ath10k/ce.c
++++ b/drivers/net/wireless/ath/ath10k/ce.c
+@@ -1555,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+- (nentries * sizeof(struct ce_desc_64) +
++ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 9f0e7b4943ec6..8ca0a808a644d 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
+ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
++
++ if (idx < 0 || idx >= htt->rx_ring.size) {
++ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
++ idx &= htt->rx_ring.size_mask;
++ ret = -ENOMEM;
++ goto fail;
++ }
++
+ while (num > 0) {
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ if (!skb) {
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index d373602a80145..915ba2a7f7448 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -7131,7 +7131,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+ {
+ int ret;
+- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
+index 5e7ea838a9218..814131a0680a4 100644
+--- a/drivers/net/wireless/ath/ath6kl/main.c
++++ b/drivers/net/wireless/ath/ath6kl/main.c
+@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+
++ if (aid < 1 || aid > AP_MAX_NUM_STA)
++ return;
++
+ if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
+ struct ieee80211_mgmt *mgmt =
+ (struct ieee80211_mgmt *) assoc_info;
+diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
+index 2382c6c46851e..c610fe21c85c0 100644
+--- a/drivers/net/wireless/ath/ath6kl/wmi.c
++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
+@@ -2645,6 +2645,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ return -EINVAL;
+ }
+
++ if (tsid >= 16) {
++ ath6kl_err("invalid tsid: %d\n", tsid);
++ return -EINVAL;
++ }
++
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 3f563e02d17da..2ed98aaed6fb5 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ /* The pending URBs have to be canceled. */
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
++ usb_get_urb(tx_buf->urb);
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ usb_kill_urb(tx_buf->urb);
++ list_del(&tx_buf->list);
++ usb_free_urb(tx_buf->urb);
++ kfree(tx_buf->buf);
++ kfree(tx_buf);
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+ }
+@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+ unsigned long flags;
+
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
++ usb_get_urb(tx_buf->urb);
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
++ usb_get_urb(tx_buf->urb);
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index d2e062eaf5614..510e61e97dbcb 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+
+ if (skb) {
+ htc_hdr = (struct htc_frame_hdr *) skb->data;
++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
++ goto ret;
+ endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
+ skb_pull(skb, sizeof(struct htc_frame_hdr));
+
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index ad051f34e65b2..46ae4ec4ad47d 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+- .rx_highest = cpu_to_le16(72),
++ .rx_highest = cpu_to_le16(150),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ }
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 85cf96461ddeb..e9bb8dbdc9aa8 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -483,7 +483,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+ ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+
+ if (ret || !(*ifp) || !(*ifp)->ndev) {
+- if (ret != -ENODATA && *ifp)
++ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
+ (*ifp)->ndev->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return -ENODATA;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index e3dd8623be4ec..c2705d7a4247e 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -1619,6 +1619,8 @@ fail:
+ BRCMF_TX_IOCTL_MAX_MSG_SIZE,
+ msgbuf->ioctbuf,
+ msgbuf->ioctbuf_handle);
++ if (msgbuf->txflow_wq)
++ destroy_workqueue(msgbuf->txflow_wq);
+ kfree(msgbuf);
+ }
+ return -ENOMEM;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+index 7ef36234a25dc..66797dc5e90d5 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+@@ -5065,8 +5065,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
+ pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
+ pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
+
+- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
++ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
++ kfree(pi->u.pi_lcnphy);
+ return false;
++ }
+
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+ if (pi_lcn->lcnphy_tempsense_option == 3) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index ed92a8e8cd519..01b26b3327b01 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3650,9 +3650,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+ tail->apply_time_max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+- channel->hw_value, req_dur, duration, delay,
+- dtim_interval);
++ "ROC: Requesting to remain on channel %u for %ums\n",
++ channel->hw_value, req_dur);
++ IWL_DEBUG_TE(mvm,
++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
++ duration, delay, dtim_interval);
++
+ /* Set the node address */
+ memcpy(tail->node_addr, vif->addr, ETH_ALEN);
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index 59f0651d148bb..629af26675cf1 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -1891,7 +1891,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ chan, CFG80211_BSS_FTYPE_UNKNOWN,
+ bssid, timestamp,
+ cap_info_bitmap, beacon_period,
+- ie_buf, ie_len, rssi, GFP_KERNEL);
++ ie_buf, ie_len, rssi, GFP_ATOMIC);
+ if (bss) {
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index fec38b6e86ffd..b322c2755e9a4 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -1996,6 +1996,8 @@ error:
+ kfree(card->mpa_rx.buf);
+ card->mpa_tx.buf_size = 0;
+ card->mpa_rx.buf_size = 0;
++ card->mpa_tx.buf = NULL;
++ card->mpa_rx.buf = NULL;
+ }
+
+ return ret;
+diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
+index c2365eeb70168..528107d70c1cb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/usb.c
++++ b/drivers/net/wireless/marvell/mwifiex/usb.c
+@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
+ skb_dequeue(&port->tx_aggr.aggr_list)))
+ mwifiex_write_data_complete(adapter, skb_tmp,
+ 0, -1);
+- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
++ if (port->tx_aggr.timer_cnxt.hold_timer.function)
++ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
+ port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
+ }
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+index c0c32805fb8de..106f1a846f499 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+@@ -834,6 +834,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
+ default:
+ pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
+ vif->vifid, vif->wdev.iftype);
++ dev_kfree_skb(cmd_skb);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1996,6 +1997,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
+ break;
+ default:
+ pr_err("unsupported iftype %d\n", vif->wdev.iftype);
++ dev_kfree_skb(cmd_skb);
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 3499b211dad51..048984ca81fdb 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -5447,7 +5447,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+- usb_free_urb(urb);
+ goto error;
+ }
+
+@@ -5456,6 +5455,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+ rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+
+ error:
++ usb_free_urb(urb);
+ return ret;
+ }
+
+@@ -5781,6 +5781,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct rtl8xxxu_rx_urb *rx_urb;
+ struct rtl8xxxu_tx_urb *tx_urb;
++ struct sk_buff *skb;
+ unsigned long flags;
+ int ret, i;
+
+@@ -5831,6 +5832,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
+ rx_urb->hw = hw;
+
+ ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
++ if (ret) {
++ if (ret != -ENOMEM) {
++ skb = (struct sk_buff *)rx_urb->urb.context;
++ dev_kfree_skb(skb);
++ }
++ rtl8xxxu_queue_rx_urb(priv, rx_urb);
++ }
+ }
+ exit:
+ /*
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
+index 87824a4caba98..a47d871ae506a 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.h
++++ b/drivers/net/wireless/realtek/rtw88/pci.h
+@@ -13,8 +13,8 @@
+ #define RTK_BEQ_TX_DESC_NUM 256
+
+ #define RTK_MAX_RX_DESC_NUM 512
+-/* 8K + rx desc size */
+-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
++/* 11K + rx desc size */
++#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
+
+ #define RTK_PCI_CTRL 0x300
+ #define BIT_RST_TRXDMA_INTF BIT(20)
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 156c2a18a2394..abb37659de343 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -1036,6 +1036,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
+
+ err_dma_mask:
+ pci_clear_master(pdev);
++ pci_release_regions(pdev);
+ err_pci_regions:
+ pci_disable_device(pdev);
+ err_pci_enable:
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index af0b51d1d43e8..f5d12bf109c78 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3110,7 +3110,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ NVME_QUIRK_DEALLOCATE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+- NVME_QUIRK_MEDIUM_PRIO_SQ },
++ NVME_QUIRK_MEDIUM_PRIO_SQ |
++ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 7d7176369edf7..6b2f1e290fa73 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1048,7 +1048,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+ * in case a host died before it enabled the controller. Hence, simply
+ * reset the keep alive timer when the controller is enabled.
+ */
+- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
++ if (ctrl->kato)
++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 960542dea5adb..84f4078216a36 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -130,16 +130,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
+ }
+
+-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+- const struct nvmem_cell_info *info,
+- struct nvmem_cell *cell)
++static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
++ const struct nvmem_cell_info *info,
++ struct nvmem_cell *cell)
+ {
+ cell->nvmem = nvmem;
+ cell->offset = info->offset;
+ cell->bytes = info->bytes;
+- cell->name = kstrdup_const(info->name, GFP_KERNEL);
+- if (!cell->name)
+- return -ENOMEM;
++ cell->name = info->name;
+
+ cell->bit_offset = info->bit_offset;
+ cell->nbits = info->nbits;
+@@ -151,13 +149,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(&nvmem->dev,
+ "cell %s unaligned to nvmem stride %d\n",
+- cell->name, nvmem->stride);
++ cell->name ?: "<unknown>", nvmem->stride);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
++static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
++ const struct nvmem_cell_info *info,
++ struct nvmem_cell *cell)
++{
++ int err;
++
++ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
++ if (err)
++ return err;
++
++ cell->name = kstrdup_const(info->name, GFP_KERNEL);
++ if (!cell->name)
++ return -ENOMEM;
++
++ return 0;
++}
++
+ /**
+ * nvmem_add_cells() - Add cell information to an nvmem device
+ *
+@@ -1174,7 +1189,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+ if (!nvmem)
+ return -EINVAL;
+
+- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
+ if (rc)
+ return rc;
+
+@@ -1204,7 +1219,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ if (!nvmem)
+ return -EINVAL;
+
+- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
++ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 29dfaa591f8b0..8867bab72e171 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -1796,6 +1796,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
+ {
+ int index;
+
++ if (!opp_table->genpd_virt_devs)
++ return;
++
+ for (index = 0; index < opp_table->required_opp_count; index++) {
+ if (!opp_table->genpd_virt_devs[index])
+ continue;
+@@ -1842,6 +1845,9 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
+
++ if (opp_table->genpd_virt_devs)
++ return opp_table;
++
+ /*
+ * If the genpd's OPP table isn't already initialized, parsing of the
+ * required-opps fail for dev. We should retry this after genpd's OPP
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index f2481e80e2723..d0e60441dc8f2 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -503,7 +503,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
++static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+ {
+ struct pci_bridge_emul *bridge = &pcie->bridge;
+
+@@ -527,8 +527,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+ bridge->data = pcie;
+ bridge->ops = &advk_pci_bridge_emul_ops;
+
+- pci_bridge_emul_init(bridge, 0);
+-
++ return pci_bridge_emul_init(bridge, 0);
+ }
+
+ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
+@@ -1027,7 +1026,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
+
+ advk_pcie_setup_hw(pcie);
+
+- advk_sw_pci_bridge_init(pcie);
++ ret = advk_sw_pci_bridge_init(pcie);
++ if (ret) {
++ dev_err(dev, "Failed to register emulated root PCI bridge\n");
++ return ret;
++ }
+
+ ret = advk_pcie_init_irq_domain(pcie);
+ if (ret) {
+diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
+index 0a3f61be5625b..a1298f6784ac9 100644
+--- a/drivers/pci/controller/pcie-iproc-msi.c
++++ b/drivers/pci/controller/pcie-iproc-msi.c
+@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
+ struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
+ int target_cpu = cpumask_first(mask);
+ int curr_cpu;
++ int ret;
+
+ curr_cpu = hwirq_to_cpu(msi, data->hwirq);
+ if (curr_cpu == target_cpu)
+- return IRQ_SET_MASK_OK_DONE;
++ ret = IRQ_SET_MASK_OK_DONE;
++ else {
++ /* steer MSI to the target CPU */
++ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
++ ret = IRQ_SET_MASK_OK;
++ }
+
+- /* steer MSI to the target CPU */
+- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
++ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
+
+- return IRQ_SET_MASK_OK;
++ return ret;
+ }
+
+ static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 9c116cbaa95d8..e15220385666f 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -158,6 +158,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+ virtfn->device = iov->vf_device;
+ virtfn->is_virtfn = 1;
+ virtfn->physfn = pci_dev_get(dev);
++ virtfn->no_command_memory = 1;
+
+ if (id == 0)
+ pci_read_vf_config_common(virtfn);
+diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
+index 9e1c3c7eeba9b..170ccb164c604 100644
+--- a/drivers/perf/thunderx2_pmu.c
++++ b/drivers/perf/thunderx2_pmu.c
+@@ -627,14 +627,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
+ list_for_each_entry(rentry, &list, node) {
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ res = *rentry->res;
++ rentry = NULL;
+ break;
+ }
+ }
++ acpi_dev_free_resource_list(&list);
+
+- if (!rentry->res)
++ if (rentry) {
++ dev_err(dev, "PMU type %d: Fail to find resource\n", type);
+ return NULL;
++ }
+
+- acpi_dev_free_resource_list(&list);
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
+index 328aea9f6be32..50b37f8f5c7ff 100644
+--- a/drivers/perf/xgene_pmu.c
++++ b/drivers/perf/xgene_pmu.c
+@@ -1459,17 +1459,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+ }
+
+ #if defined(CONFIG_ACPI)
+-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
+-{
+- struct resource *res = data;
+-
+- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
+- acpi_dev_resource_memory(ares, res);
+-
+- /* Always tell the ACPI core to skip this resource */
+- return 1;
+-}
+-
+ static struct
+ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct acpi_device *adev, u32 type)
+@@ -1481,6 +1470,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
++ struct resource_entry *rentry;
+ int enable_bit;
+ int rc;
+
+@@ -1489,11 +1479,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+- rc = acpi_dev_get_resources(adev, &resource_list,
+- acpi_pmu_dev_add_resource, &res);
++ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
++ if (rc <= 0) {
++ dev_err(dev, "PMU type %d: No resources found\n", type);
++ return NULL;
++ }
++
++ list_for_each_entry(rentry, &resource_list, node) {
++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
++ res = *rentry->res;
++ rentry = NULL;
++ break;
++ }
++ }
+ acpi_dev_free_resource_list(&resource_list);
+- if (rc < 0) {
+- dev_err(dev, "PMU type %d: No resource address found\n", type);
++
++ if (rentry) {
++ dev_err(dev, "PMU type %d: No memory resource found\n", type);
+ return NULL;
+ }
+
+diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
+index dcf7df797af75..0ed14de0134cf 100644
+--- a/drivers/pinctrl/bcm/Kconfig
++++ b/drivers/pinctrl/bcm/Kconfig
+@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
++ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ default ARCH_BCM2835 || ARCH_BRCMSTB
+ help
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 3a235487e38d7..d8bcbefcba890 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -122,7 +122,7 @@ static const struct regmap_config mcp23x08_regmap = {
+ .max_register = MCP_OLAT,
+ };
+
+-static const struct reg_default mcp23x16_defaults[] = {
++static const struct reg_default mcp23x17_defaults[] = {
+ {.reg = MCP_IODIR << 1, .def = 0xffff},
+ {.reg = MCP_IPOL << 1, .def = 0x0000},
+ {.reg = MCP_GPINTEN << 1, .def = 0x0000},
+@@ -133,23 +133,23 @@ static const struct reg_default mcp23x16_defaults[] = {
+ {.reg = MCP_OLAT << 1, .def = 0x0000},
+ };
+
+-static const struct regmap_range mcp23x16_volatile_range = {
++static const struct regmap_range mcp23x17_volatile_range = {
+ .range_min = MCP_INTF << 1,
+ .range_max = MCP_GPIO << 1,
+ };
+
+-static const struct regmap_access_table mcp23x16_volatile_table = {
+- .yes_ranges = &mcp23x16_volatile_range,
++static const struct regmap_access_table mcp23x17_volatile_table = {
++ .yes_ranges = &mcp23x17_volatile_range,
+ .n_yes_ranges = 1,
+ };
+
+-static const struct regmap_range mcp23x16_precious_range = {
+- .range_min = MCP_GPIO << 1,
++static const struct regmap_range mcp23x17_precious_range = {
++ .range_min = MCP_INTCAP << 1,
+ .range_max = MCP_GPIO << 1,
+ };
+
+-static const struct regmap_access_table mcp23x16_precious_table = {
+- .yes_ranges = &mcp23x16_precious_range,
++static const struct regmap_access_table mcp23x17_precious_table = {
++ .yes_ranges = &mcp23x17_precious_range,
+ .n_yes_ranges = 1,
+ };
+
+@@ -159,10 +159,10 @@ static const struct regmap_config mcp23x17_regmap = {
+
+ .reg_stride = 2,
+ .max_register = MCP_OLAT << 1,
+- .volatile_table = &mcp23x16_volatile_table,
+- .precious_table = &mcp23x16_precious_table,
+- .reg_defaults = mcp23x16_defaults,
+- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
++ .volatile_table = &mcp23x17_volatile_table,
++ .precious_table = &mcp23x17_precious_table,
++ .reg_defaults = mcp23x17_defaults,
++ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ };
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index 8fe51e43f1bc1..59b5b7eebb05a 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -243,15 +243,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
+ },
+ };
+
+-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
+- {
+- I2C_BOARD_INFO("24c32", 0x51),
+- },
+- {
+- I2C_BOARD_INFO("24c32", 0x50),
+- },
+-};
+-
+ static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
+ {
+ I2C_BOARD_INFO("dps460", 0x59),
+@@ -611,15 +602,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
+- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
+- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ };
+
+diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
+index 599a0f66a3845..a34d95ed70b20 100644
+--- a/drivers/pwm/pwm-img.c
++++ b/drivers/pwm/pwm-img.c
+@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
+ return PTR_ERR(pwm->pwm_clk);
+ }
+
++ platform_set_drvdata(pdev, pwm);
++
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
+ goto err_suspend;
+ }
+
+- platform_set_drvdata(pdev, pwm);
+ return 0;
+
+ err_suspend:
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index 75bbfe5f3bc29..d77cec2769b76 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -93,10 +93,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
+ * The equation is:
+ * base_unit = round(base_unit_range * freq / c)
+ */
+- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
++ base_unit_range = BIT(lpwm->info->base_unit_bits);
+ freq *= base_unit_range;
+
+ base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
++ /* base_unit must not be 0 and we also want to avoid overflowing it */
++ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
+
+ on_time_div = 255ULL * duty_ns;
+ do_div(on_time_div, period_ns);
+@@ -104,8 +106,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
+
+ orig_ctrl = ctrl = pwm_lpss_read(pwm);
+ ctrl &= ~PWM_ON_TIME_DIV_MASK;
+- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
+- base_unit &= base_unit_range;
++ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
+ ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
+ ctrl |= on_time_div;
+
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 0b85a80ae7ef6..2b08fdeb87c18 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -873,15 +873,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
+ rmcd_error("get_user_pages_unlocked err=%ld",
+ pinned);
+ nr_pages = 0;
+- } else
++ } else {
+ rmcd_error("pinned %ld out of %ld pages",
+ pinned, nr_pages);
++ /*
++ * Set nr_pages up to mean "how many pages to unpin, in
++ * the error handler:
++ */
++ nr_pages = pinned;
++ }
+ ret = -EFAULT;
+- /*
+- * Set nr_pages up to mean "how many pages to unpin, in
+- * the error handler:
+- */
+- nr_pages = pinned;
+ goto err_pg;
+ }
+
+@@ -1682,6 +1683,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ struct rio_dev *rdev;
+ struct rio_switch *rswitch = NULL;
+ struct rio_mport *mport;
++ struct device *dev;
+ size_t size;
+ u32 rval;
+ u32 swpinfo = 0;
+@@ -1696,8 +1698,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+ dev_info.comptag, dev_info.destid, dev_info.hopcount);
+
+- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
++ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
++ if (dev) {
+ rmcd_debug(RDEV, "device %s already exists", dev_info.name);
++ put_device(dev);
+ return -EEXIST;
+ }
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 55fc80de5ef16..ee850cffe1542 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5158,15 +5158,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ else if (regulator_desc->supply_name)
+ rdev->supply_name = regulator_desc->supply_name;
+
+- /*
+- * Attempt to resolve the regulator supply, if specified,
+- * but don't return an error if we fail because we will try
+- * to resolve it again later as more regulators are added.
+- */
+- if (regulator_resolve_supply(rdev))
+- rdev_dbg(rdev, "unable to resolve supply\n");
+-
+ ret = set_machine_constraints(rdev, constraints);
++ if (ret == -EPROBE_DEFER) {
++ /* Regulator might be in bypass mode and so needs its supply
++ * to set the constraints */
++ /* FIXME: this currently triggers a chicken-and-egg problem
++ * when creating -SUPPLY symlink in sysfs to a regulator
++ * that is just being created */
++ ret = regulator_resolve_supply(rdev);
++ if (!ret)
++ ret = set_machine_constraints(rdev, constraints);
++ else
++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
++ ERR_PTR(ret));
++ }
+ if (ret < 0)
+ goto wash;
+
+diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
+index 4abbeea782fa4..19903de6268db 100644
+--- a/drivers/rpmsg/qcom_smd.c
++++ b/drivers/rpmsg/qcom_smd.c
+@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
+ ret = of_property_read_u32(node, key, &edge->edge_id);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+- return -EINVAL;
++ goto put_node;
+ }
+
+ edge->remote_pid = QCOM_SMEM_HOST_ANY;
+@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
+ edge->mbox_client.knows_txdone = true;
+ edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
+ if (IS_ERR(edge->mbox_chan)) {
+- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
+- return PTR_ERR(edge->mbox_chan);
++ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
++ ret = PTR_ERR(edge->mbox_chan);
++ goto put_node;
++ }
+
+ edge->mbox_chan = NULL;
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(dev, "no qcom,ipc node\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto put_node;
+ }
+
+ edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+- if (IS_ERR(edge->ipc_regmap))
+- return PTR_ERR(edge->ipc_regmap);
++ if (IS_ERR(edge->ipc_regmap)) {
++ ret = PTR_ERR(edge->ipc_regmap);
++ goto put_node;
++ }
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+- return -EINVAL;
++ goto put_node;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+- return -EINVAL;
++ goto put_node;
+ }
+ }
+
+@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+- return -EINVAL;
++ ret = irq;
++ goto put_node;
+ }
+
+ ret = devm_request_irq(dev, irq,
+@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+- return ret;
++ goto put_node;
+ }
+
+ edge->irq = irq;
+
+ return 0;
++
++put_node:
++ of_node_put(node);
++ edge->of_node = NULL;
++
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 4ce28aa490cdb..8c4613617ef11 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1168,12 +1168,6 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
+ NULL
+ };
+
+- /* Role should not change by itself, but if it did, */
+- /* information from the hardware is authoritative. */
+- mutex_lock(&data->card->sbp_lock);
+- data->card->options.sbp.role = entry->role;
+- mutex_unlock(&data->card->sbp_lock);
+-
+ snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ snprintf(env_role, sizeof(env_role), "ROLE=%s",
+ (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 0760d0bd8a10b..0fa455357594e 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ goto create_eq_error;
+ }
+
++ mem->dma = paddr;
+ mem->va = eq_vaddress;
+ ret = be_fill_queue(eq, phba->params.num_eq_entries,
+ sizeof(struct be_eq_entry), eq_vaddress);
+@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
+ goto create_eq_error;
+ }
+
+- mem->dma = paddr;
+ ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
+ BEISCSI_EQ_DELAY_DEF);
+ if (ret) {
+@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ goto create_cq_error;
+ }
+
++ mem->dma = paddr;
+ ret = be_fill_queue(cq, phba->params.num_cq_entries,
+ sizeof(struct sol_cqe), cq_vaddress);
+ if (ret) {
+@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
+ goto create_cq_error;
+ }
+
+- mem->dma = paddr;
+ ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
+ false, 0);
+ if (ret) {
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index 2f9213b257a4a..93e4011809919 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
+
+ if (bfad->pci_bar0_kva == NULL) {
+ printk(KERN_ERR "Fail to map bar0\n");
++ rc = -ENODEV;
+ goto out_release_region;
+ }
+
+diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
+index e519238864758..1b6f9351b43f9 100644
+--- a/drivers/scsi/csiostor/csio_hw.c
++++ b/drivers/scsi/csiostor/csio_hw.c
+@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
+ FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
+ FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
+ FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
+- ret = EINVAL;
++ ret = -EINVAL;
+ goto bye;
+ }
+
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index df897df5cafee..8a76284b59b08 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4788,6 +4788,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ if (IS_ERR(vhost->work_thread)) {
+ dev_err(dev, "Couldn't create kernel thread: %ld\n",
+ PTR_ERR(vhost->work_thread));
++ rc = PTR_ERR(vhost->work_thread);
+ goto free_host_mem;
+ }
+
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index b7e44634d0dc2..3d58d24de6b61 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -1708,18 +1708,22 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
++ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ if (reply_q->irq_poll_scheduled) {
+ /* Calling irq_poll_disable will wait for any pending
+ * callbacks to have completed.
+ */
+ irq_poll_disable(&reply_q->irqpoll);
+ irq_poll_enable(&reply_q->irqpoll);
+- reply_q->irq_poll_scheduled = false;
+- reply_q->irq_line_enable = true;
+- enable_irq(reply_q->os_irq);
+- continue;
++ /* check how the scheduled poll has ended,
++ * clean up only if necessary
++ */
++ if (reply_q->irq_poll_scheduled) {
++ reply_q->irq_poll_scheduled = false;
++ reply_q->irq_line_enable = true;
++ enable_irq(reply_q->os_irq);
++ }
+ }
+- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ }
+ }
+
+diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
+index 8906aceda4c43..0354898d7cac1 100644
+--- a/drivers/scsi/mvumi.c
++++ b/drivers/scsi/mvumi.c
+@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
+ if (IS_ERR(mhba->dm_thread)) {
+ dev_err(&mhba->pdev->dev,
+ "failed to create device scan thread\n");
++ ret = PTR_ERR(mhba->dm_thread);
+ mutex_unlock(&mhba->sas_discovery_mutex);
+ goto fail_create_thread;
+ }
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 3d0e345947c1f..9c0955c334e3e 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -668,7 +668,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
+ rdata = fcport->rdata;
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+- rc = 1;
++ rc = SUCCESS;
+ goto out;
+ }
+
+diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
+index 946cebc4c9322..90aa64604ad78 100644
+--- a/drivers/scsi/qedi/qedi_fw.c
++++ b/drivers/scsi/qedi/qedi_fw.c
+@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
++ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
++ spin_unlock(&qedi_conn->list_lock);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ "Freeing tid=0x%x for cid=0x%x\n",
+ cmd->task_id, qedi_conn->iscsi_conn_id);
+
++ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
+ cmd->task_id, qedi_conn->iscsi_conn_id,
+ &cmd->io_cmd);
+ }
++ spin_unlock(&qedi_conn->list_lock);
+
+ cmd->state = RESPONSE_RECEIVED;
+ qedi_clear_task_idx(qedi, cmd->task_id);
+@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+
+ tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
++ spin_lock(&qedi_conn->list_lock);
+ if (likely(qedi_cmd->io_cmd_in_list)) {
+ qedi_cmd->io_cmd_in_list = false;
+ list_del_init(&qedi_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
++ spin_unlock(&qedi_conn->list_lock);
+
+ if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+ ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
+ ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+ qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
++ spin_lock(&qedi_conn->list_lock);
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
++ spin_unlock(&qedi_conn->list_lock);
+
+ memset(task_ctx, '\0', sizeof(*task_ctx));
+
+@@ -817,8 +825,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+ qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+ spin_lock(&qedi_conn->list_lock);
+- list_del_init(&dbg_cmd->io_cmd);
+- qedi_conn->active_cmd_count--;
++ if (likely(dbg_cmd->io_cmd_in_list)) {
++ dbg_cmd->io_cmd_in_list = false;
++ list_del_init(&dbg_cmd->io_cmd);
++ qedi_conn->active_cmd_count--;
++ }
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+ wake_up_interruptible(&qedi_conn->wait_queue);
+@@ -1236,6 +1247,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+ qedi_conn->cmd_cleanup_req++;
+ qedi_iscsi_cleanup_task(ctask, true);
+
++ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ QEDI_WARN(&qedi->dbg_ctx,
+@@ -1447,8 +1459,11 @@ ldel_exit:
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+ spin_lock(&qedi_conn->list_lock);
+- list_del_init(&cmd->io_cmd);
+- qedi_conn->active_cmd_count--;
++ if (likely(cmd->io_cmd_in_list)) {
++ cmd->io_cmd_in_list = false;
++ list_del_init(&cmd->io_cmd);
++ qedi_conn->active_cmd_count--;
++ }
+ spin_unlock(&qedi_conn->list_lock);
+
+ clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 0f2622a48311c..755f66b1ff9c7 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -972,11 +972,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+ {
+ struct qedi_cmd *cmd, *cmd_tmp;
+
++ spin_lock(&qedi_conn->list_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+ io_cmd) {
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
++ spin_unlock(&qedi_conn->list_lock);
+ }
+
+ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 62d2ee825c97a..b300e11095828 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -71,6 +71,16 @@ void qla2x00_sp_free(srb_t *sp)
+ qla2x00_rel_sp(sp);
+ }
+
++void qla2xxx_rel_done_warning(srb_t *sp, int res)
++{
++ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
++}
++
++void qla2xxx_rel_free_warning(srb_t *sp)
++{
++ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
++}
++
+ /* Asynchronous Login/Logout Routines -------------------------------------- */
+
+ unsigned long
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 0c3d907af7692..6dfde42d799b5 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -183,10 +183,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+ return sp;
+ }
+
++void qla2xxx_rel_done_warning(srb_t *sp, int res);
++void qla2xxx_rel_free_warning(srb_t *sp);
++
+ static inline void
+ qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+ {
+ sp->qpair = NULL;
++ sp->done = qla2xxx_rel_done_warning;
++ sp->free = qla2xxx_rel_free_warning;
+ mempool_free(sp, qpair->srb_mempool);
+ QLA_QPAIR_MARK_NOT_BUSY(qpair);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index f4815a4084d8c..11656e864fca9 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -682,7 +682,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
+ struct nvme_fc_port_template *tmpl;
+ struct qla_hw_data *ha;
+ struct nvme_fc_port_info pinfo;
+- int ret = EINVAL;
++ int ret = -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return ret;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index a7acc266cec06..d84d95cac2a13 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -5677,7 +5677,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+ /* found existing exchange */
+ qpair->retry_term_cnt++;
+ if (qpair->retry_term_cnt >= 5) {
+- rc = EIO;
++ rc = -EIO;
+ qpair->retry_term_cnt = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to send ABTS Respond. Dumping firmware.\n");
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 5504ab11decc7..df43cf6405a8e 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -1220,7 +1220,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
+ le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
+ exit_host_stats:
+ if (ql_iscsi_stats)
+- dma_free_coherent(&ha->pdev->dev, host_stats_size,
++ dma_free_coherent(&ha->pdev->dev, stats_size,
+ ql_iscsi_stats, iscsi_stats_dma);
+
+ ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index 7a3a942b40df0..dd2175e9bfa17 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -357,7 +357,7 @@ struct pqi_event_response {
+ struct pqi_iu_header header;
+ u8 event_type;
+ u8 reserved2 : 7;
+- u8 request_acknowlege : 1;
++ u8 request_acknowledge : 1;
+ __le16 event_id;
+ __le32 additional_event_id;
+ union {
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 5ae074505386a..093ed5d1eef20 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -527,8 +527,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
+ put_unaligned_be16(cdb_length, &cdb[7]);
+ break;
+ default:
+- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
+- cmd);
++ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
+ break;
+ }
+
+@@ -2450,7 +2449,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ offload_to_mirror =
+ (offload_to_mirror >= layout_map_count - 1) ?
+ 0 : offload_to_mirror + 1;
+- WARN_ON(offload_to_mirror >= layout_map_count);
+ device->offload_to_mirror = offload_to_mirror;
+ /*
+ * Avoid direct use of device->offload_to_mirror within this
+@@ -2903,10 +2901,14 @@ static int pqi_interpret_task_management_response(
+ return rc;
+ }
+
+-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+- struct pqi_queue_group *queue_group)
++static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
++{
++ pqi_take_ctrl_offline(ctrl_info);
++}
++
++static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+ {
+- unsigned int num_responses;
++ int num_responses;
+ pqi_index_t oq_pi;
+ pqi_index_t oq_ci;
+ struct pqi_io_request *io_request;
+@@ -2918,6 +2920,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+
+ while (1) {
+ oq_pi = readl(queue_group->oq_pi);
++ if (oq_pi >= ctrl_info->num_elements_per_oq) {
++ pqi_invalid_response(ctrl_info);
++ dev_err(&ctrl_info->pci_dev->dev,
++ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
++ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
++ return -1;
++ }
+ if (oq_pi == oq_ci)
+ break;
+
+@@ -2926,10 +2935,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+
+ request_id = get_unaligned_le16(&response->request_id);
+- WARN_ON(request_id >= ctrl_info->max_io_slots);
++ if (request_id >= ctrl_info->max_io_slots) {
++ pqi_invalid_response(ctrl_info);
++ dev_err(&ctrl_info->pci_dev->dev,
++ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
++ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
++ return -1;
++ }
+
+ io_request = &ctrl_info->io_request_pool[request_id];
+- WARN_ON(atomic_read(&io_request->refcount) == 0);
++ if (atomic_read(&io_request->refcount) == 0) {
++ pqi_invalid_response(ctrl_info);
++ dev_err(&ctrl_info->pci_dev->dev,
++ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
++ request_id, oq_pi, oq_ci);
++ return -1;
++ }
+
+ switch (response->header.iu_type) {
+ case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+@@ -2959,24 +2980,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+ io_request->error_info = ctrl_info->error_buffer +
+ (get_unaligned_le16(&response->error_index) *
+ PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+- pqi_process_io_error(response->header.iu_type,
+- io_request);
++ pqi_process_io_error(response->header.iu_type, io_request);
+ break;
+ default:
++ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+- "unexpected IU type: 0x%x\n",
+- response->header.iu_type);
+- break;
++ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
++ response->header.iu_type, oq_pi, oq_ci);
++ return -1;
+ }
+
+- io_request->io_complete_callback(io_request,
+- io_request->context);
++ io_request->io_complete_callback(io_request, io_request->context);
+
+ /*
+ * Note that the I/O request structure CANNOT BE TOUCHED after
+ * returning from the I/O completion callback!
+ */
+-
+ oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
+ }
+
+@@ -3289,9 +3308,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
+ }
+ }
+
+-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
++static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+ {
+- unsigned int num_events;
++ int num_events;
+ pqi_index_t oq_pi;
+ pqi_index_t oq_ci;
+ struct pqi_event_queue *event_queue;
+@@ -3305,26 +3324,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+
+ while (1) {
+ oq_pi = readl(event_queue->oq_pi);
++ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
++ pqi_invalid_response(ctrl_info);
++ dev_err(&ctrl_info->pci_dev->dev,
++ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
++ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
++ return -1;
++ }
++
+ if (oq_pi == oq_ci)
+ break;
+
+ num_events++;
+- response = event_queue->oq_element_array +
+- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
++ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+ event_index =
+ pqi_event_type_to_event_index(response->event_type);
+
+- if (event_index >= 0) {
+- if (response->request_acknowlege) {
+- event = &ctrl_info->events[event_index];
+- event->pending = true;
+- event->event_type = response->event_type;
+- event->event_id = response->event_id;
+- event->additional_event_id =
+- response->additional_event_id;
++ if (event_index >= 0 && response->request_acknowledge) {
++ event = &ctrl_info->events[event_index];
++ event->pending = true;
++ event->event_type = response->event_type;
++ event->event_id = response->event_id;
++ event->additional_event_id = response->additional_event_id;
++ if (event->event_type == PQI_EVENT_TYPE_OFA)
+ pqi_ofa_capture_event_payload(event, response);
+- }
+ }
+
+ oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
+@@ -3439,7 +3463,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
+ {
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_queue_group *queue_group;
+- unsigned int num_responses_handled;
++ int num_io_responses_handled;
++ int num_events_handled;
+
+ queue_group = data;
+ ctrl_info = queue_group->ctrl_info;
+@@ -3447,17 +3472,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
+ if (!pqi_is_valid_irq(ctrl_info))
+ return IRQ_NONE;
+
+- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
++ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
++ if (num_io_responses_handled < 0)
++ goto out;
+
+- if (irq == ctrl_info->event_irq)
+- num_responses_handled += pqi_process_event_intr(ctrl_info);
++ if (irq == ctrl_info->event_irq) {
++ num_events_handled = pqi_process_event_intr(ctrl_info);
++ if (num_events_handled < 0)
++ goto out;
++ } else {
++ num_events_handled = 0;
++ }
+
+- if (num_responses_handled)
++ if (num_io_responses_handled + num_events_handled > 0)
+ atomic_inc(&ctrl_info->num_interrupts);
+
+ pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+
++out:
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
+index c49e9f6c46f87..4f066e3b19af1 100644
+--- a/drivers/scsi/ufs/ufs-qcom.c
++++ b/drivers/scsi/ufs/ufs-qcom.c
+@@ -1492,9 +1492,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
+ */
+ }
+ mask <<= offset;
+-
+- pm_runtime_get_sync(host->hba->dev);
+- ufshcd_hold(host->hba, false);
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << 19,
+ REG_UFS_CFG1);
+@@ -1507,8 +1504,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
+ * committed before returning.
+ */
+ mb();
+- ufshcd_release(host->hba);
+- pm_runtime_put_sync(host->hba->dev);
+
+ return 0;
+ }
+diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
+index 63ee96eb58c68..130c798921b5d 100644
+--- a/drivers/slimbus/core.c
++++ b/drivers/slimbus/core.c
+@@ -302,8 +302,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
+ {
+ /* Remove all clients */
+ device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
+- /* Enter Clock Pause */
+- slim_ctrl_clk_pause(ctrl, false, 0);
+ ida_simple_remove(&ctrl_ida, ctrl->id);
+
+ return 0;
+@@ -327,8 +325,8 @@ void slim_report_absent(struct slim_device *sbdev)
+ mutex_lock(&ctrl->lock);
+ sbdev->is_laddr_valid = false;
+ mutex_unlock(&ctrl->lock);
+-
+- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
++ if (!ctrl->get_laddr)
++ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
+ }
+ EXPORT_SYMBOL_GPL(slim_report_absent);
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 01a17d84b6064..ce265bf7de868 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1273,9 +1273,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
+ {
+ struct qcom_slim_ngd_qmi *qmi =
+ container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
++ struct qcom_slim_ngd_ctrl *ctrl =
++ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
+
+ qmi->svc_info.sq_node = 0;
+ qmi->svc_info.sq_port = 0;
++
++ qcom_slim_ngd_enable(ctrl, false);
+ }
+
+ static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
+index f4fb527d83018..c5dd026fe889f 100644
+--- a/drivers/soc/fsl/qbman/bman.c
++++ b/drivers/soc/fsl/qbman/bman.c
+@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
+ }
+ done:
+ put_affine_portal();
+- return 0;
++ return err;
+ }
+
+ struct gen_pool *bm_bpalloc;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 4433cb4de564e..7646b4b56bed9 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -24,7 +24,6 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/gcd.h>
+-#include <linux/iopoll.h>
+
+ #include <linux/spi/spi.h>
+ #include <linux/gpio.h>
+@@ -348,9 +347,19 @@ disable_fifo:
+
+ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+ {
+- u32 val;
+-
+- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(1000);
++ while (!(readl_relaxed(reg) & bit)) {
++ if (time_after(jiffies, timeout)) {
++ if (!(readl_relaxed(reg) & bit))
++ return -ETIMEDOUT;
++ else
++ return 0;
++ }
++ cpu_relax();
++ }
++ return 0;
+ }
+
+ static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
+index 7b7151ec14c8a..1d948fee1a039 100644
+--- a/drivers/spi/spi-s3c64xx.c
++++ b/drivers/spi/spi-s3c64xx.c
+@@ -122,6 +122,7 @@
+
+ struct s3c64xx_spi_dma_data {
+ struct dma_chan *ch;
++ dma_cookie_t cookie;
+ enum dma_transfer_direction direction;
+ };
+
+@@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ }
+
+-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
++static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ struct sg_table *sgt)
+ {
+ struct s3c64xx_spi_driver_data *sdd;
+ struct dma_slave_config config;
+ struct dma_async_tx_descriptor *desc;
++ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+@@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
++ if (!desc) {
++ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
++ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
++ return -ENOMEM;
++ }
+
+ desc->callback = s3c64xx_spi_dmacb;
+ desc->callback_param = dma;
+
+- dmaengine_submit(desc);
++ dma->cookie = dmaengine_submit(desc);
++ ret = dma_submit_error(dma->cookie);
++ if (ret) {
++ dev_err(&sdd->pdev->dev, "DMA submission failed");
++ return -EIO;
++ }
++
+ dma_async_issue_pending(dma->ch);
++ return 0;
+ }
+
+ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
+@@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ }
+
+-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
++static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer, int dma_mode)
+ {
+ void __iomem *regs = sdd->regs;
+ u32 modecfg, chcfg;
++ int ret = 0;
+
+ modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+@@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
++ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ } else {
+ switch (sdd->cur_bpw) {
+ case 32:
+@@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
++ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ }
+ }
+
++ if (ret)
++ return ret;
++
+ writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
++
++ return 0;
+ }
+
+ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+@@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ return 0;
+ }
+
+-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
++static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ {
+ void __iomem *regs = sdd->regs;
++ int ret;
+ u32 val;
+
+ /* Disable Clock */
+@@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+
+ if (sdd->port_conf->clk_from_cmu) {
+ /* The src_clk clock is divided internally by 2 */
+- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
++ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
++ if (ret)
++ return ret;
+ } else {
+ /* Configure Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+@@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+ val |= S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ }
++
++ return 0;
+ }
+
+ #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+@@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ sdd->cur_mode = spi->mode;
+- s3c64xx_spi_config(sdd);
++ status = s3c64xx_spi_config(sdd);
++ if (status)
++ return status;
+ }
+
+ if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+@@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
+
+- s3c64xx_enable_datapath(sdd, xfer, use_dma);
+-
+ /* Start the signals */
+ s3c64xx_spi_set_cs(spi, true);
+
++ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
++
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
++ if (status) {
++ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
++ break;
++ }
++
+ if (use_dma)
+ status = s3c64xx_wait_for_dma(sdd, xfer);
+ else
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index 147481bf680c3..a6c893ddbf280 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2594,7 +2594,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
+
+ if (req->unaligned) {
+ if (!ep->virt_buf)
+- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
++ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
+ &ep->phys_buf,
+ GFP_ATOMIC | GFP_DMA);
+ if (ep->epnum > 0) {
+@@ -3153,7 +3153,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ if (ep->virt_buf)
+- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
++ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
+ ep->phys_buf);
+ }
+
+diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
+index 4533dacad4beb..ef3b5d07137a1 100644
+--- a/drivers/staging/media/ipu3/ipu3-css-params.c
++++ b/drivers/staging/media/ipu3/ipu3-css-params.c
+@@ -161,7 +161,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
+
+ memset(&cfg->scaler_coeffs_chroma, 0,
+ sizeof(cfg->scaler_coeffs_chroma));
+- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
++ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
+ do {
+ phase_step_correction++;
+
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+index 5c33bcb0db2ee..00e34c392a388 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+@@ -585,7 +585,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
+
+ prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
+ sizeof(struct ieee80211_rxb *),
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (!prxbIndicateArray)
+ return;
+
+diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/wilc_mon.c
+index d6f14f69ad64e..017e8e91334f1 100644
+--- a/drivers/staging/wilc1000/wilc_mon.c
++++ b/drivers/staging/wilc1000/wilc_mon.c
+@@ -236,11 +236,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
+
+ if (register_netdevice(wl->monitor_dev)) {
+ netdev_err(real_dev, "register_netdevice failed\n");
++ free_netdev(wl->monitor_dev);
+ return NULL;
+ }
+ priv = netdev_priv(wl->monitor_dev);
+- if (!priv)
+- return NULL;
+
+ priv->real_ndev = real_dev;
+
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index ea925b102b322..d6634baebb474 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -669,7 +669,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
+ void *from, *to = NULL;
+ size_t copy_bytes, to_offset, offset;
+ struct scatterlist *sg;
+- struct page *page;
++ struct page *page = NULL;
+
+ for_each_sg(data_sg, sg, data_nents, i) {
+ int sg_remaining = sg->length;
+diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
+index ee0604cd9c6b2..0c498b20d8cb5 100644
+--- a/drivers/tty/hvc/hvcs.c
++++ b/drivers/tty/hvc/hvcs.c
+@@ -1218,13 +1218,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
+
+ tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
+
+- /*
+- * This line is important because it tells hvcs_open that this
+- * device needs to be re-configured the next time hvcs_open is
+- * called.
+- */
+- tty->driver_data = NULL;
+-
+ free_irq(irq, hvcsd);
+ return;
+ } else if (hvcsd->port.count < 0) {
+@@ -1239,6 +1232,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
+ {
+ struct hvcs_struct *hvcsd = tty->driver_data;
+
++ /*
++ * This line is important because it tells hvcs_open that this
++ * device needs to be re-configured the next time hvcs_open is
++ * called.
++ */
++ tty->driver_data = NULL;
++
+ tty_port_put(&hvcsd->port);
+ }
+
+diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
+index cf20616340a1a..fe569f6294a24 100644
+--- a/drivers/tty/ipwireless/network.c
++++ b/drivers/tty/ipwireless/network.c
+@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ skb->len,
+ notify_packet_sent,
+ network);
+- if (ret == -1) {
++ if (ret < 0) {
+ skb_pull(skb, 2);
+ return 0;
+ }
+@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ notify_packet_sent,
+ network);
+ kfree(buf);
+- if (ret == -1)
++ if (ret < 0)
+ return 0;
+ }
+ kfree_skb(skb);
+diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
+index fad3401e604d9..23584769fc292 100644
+--- a/drivers/tty/ipwireless/tty.c
++++ b/drivers/tty/ipwireless/tty.c
+@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
+ ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
+ buf, count,
+ ipw_write_packet_sent_callback, tty);
+- if (ret == -1) {
++ if (ret < 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return 0;
+ }
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 00099a8439d21..c6a1d8c4e6894 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
+ spin_lock_irqsave(&to->port->lock, flags);
+ /* Stuff the data into the input queue of the other end */
+ c = tty_insert_flip_string(to->port, buf, c);
++ spin_unlock_irqrestore(&to->port->lock, flags);
+ /* And shovel */
+ if (c)
+ tty_flip_buffer_push(to->port);
+- spin_unlock_irqrestore(&to->port->lock, flags);
+ }
+ return c;
+ }
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 67a9eb3f94cec..a9751a83d5dbb 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -10,6 +10,7 @@ menu "Serial drivers"
+
+ config SERIAL_EARLYCON
+ bool
++ depends on SERIAL_CORE
+ help
+ Support for early consoles with the earlycon parameter. This enables
+ the console before standard serial driver is probed. The console is
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index d2fc050a34454..ec8e608b46baa 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -635,26 +635,24 @@ static int lpuart32_poll_init(struct uart_port *port)
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /* Disable Rx & Tx */
+- lpuart32_write(&sport->port, UARTCTRL, 0);
++ lpuart32_write(&sport->port, 0, UARTCTRL);
+
+ temp = lpuart32_read(&sport->port, UARTFIFO);
+
+ /* Enable Rx and Tx FIFO */
+- lpuart32_write(&sport->port, UARTFIFO,
+- temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
++ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
+
+ /* flush Tx and Rx FIFO */
+- lpuart32_write(&sport->port, UARTFIFO,
+- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
++ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
+
+ /* explicitly clear RDRF */
+ if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
+ lpuart32_read(&sport->port, UARTDATA);
+- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
++ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
+ }
+
+ /* Enable Rx and Tx */
+- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
++ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ return 0;
+@@ -663,12 +661,12 @@ static int lpuart32_poll_init(struct uart_port *port)
+ static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
+ {
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
+- lpuart32_write(port, UARTDATA, c);
++ lpuart32_write(port, c, UARTDATA);
+ }
+
+ static int lpuart32_poll_get_char(struct uart_port *port)
+ {
+- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
++ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
+ return NO_POLL_CHAR;
+
+ return lpuart32_read(port, UARTDATA);
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index 9900888afbcd8..c0c39cf303871 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -2545,12 +2545,12 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
+
+ priv_dev = cdns->gadget_dev;
+
+- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
+
+ pm_runtime_mark_last_busy(cdns->dev);
+ pm_runtime_put_autosuspend(cdns->dev);
+
+ usb_del_gadget_udc(&priv_dev->gadget);
++ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
+
+ cdns3_free_all_eps(priv_dev);
+
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 7499ba118665a..808722b8294a4 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1243,9 +1243,21 @@ static int acm_probe(struct usb_interface *intf,
+ }
+ }
+ } else {
++ int class = -1;
++
+ data_intf_num = union_header->bSlaveInterface0;
+ control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
++
++ if (control_interface)
++ class = control_interface->cur_altsetting->desc.bInterfaceClass;
++
++ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
++ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
++ combined_interfaces = 1;
++ control_interface = data_interface = intf;
++ goto look_for_collapsed_interface;
++ }
+ }
+
+ if (!control_interface || !data_interface) {
+@@ -1900,6 +1912,17 @@ static const struct usb_device_id acm_ids[] = {
+ .driver_info = IGNORE_DEVICE,
+ },
+
++ /* Exclude ETAS ES58x */
++ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
++ .driver_info = IGNORE_DEVICE,
++ },
++ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
++ .driver_info = IGNORE_DEVICE,
++ },
++ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
++ .driver_info = IGNORE_DEVICE,
++ },
++
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 70afb2ca1eabd..9875e2fe33db2 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+
+ #define WDM_MAX 16
+
++/* we cannot wait forever at flush() */
++#define WDM_FLUSH_TIMEOUT (30 * HZ)
++
+ /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
+ #define WDM_DEFAULT_BUFSIZE 256
+
+@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
+ kfree(desc->outbuf);
+ desc->outbuf = NULL;
+ clear_bit(WDM_IN_USE, &desc->flags);
+- wake_up(&desc->wait);
++ wake_up_all(&desc->wait);
+ }
+
+ static void wdm_in_callback(struct urb *urb)
+@@ -393,6 +396,9 @@ static ssize_t wdm_write
+ if (test_bit(WDM_RESETTING, &desc->flags))
+ r = -EIO;
+
++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
++ r = -ENODEV;
++
+ if (r < 0) {
+ rv = r;
+ goto out_free_mem_pm;
+@@ -424,6 +430,7 @@ static ssize_t wdm_write
+ if (rv < 0) {
+ desc->outbuf = NULL;
+ clear_bit(WDM_IN_USE, &desc->flags);
++ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
+ dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
+ rv = usb_translate_errors(rv);
+ goto out_free_mem_pm;
+@@ -583,28 +590,58 @@ err:
+ return rv;
+ }
+
+-static int wdm_flush(struct file *file, fl_owner_t id)
++static int wdm_wait_for_response(struct file *file, long timeout)
+ {
+ struct wdm_device *desc = file->private_data;
++ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
++
++ /*
++ * Needs both flags. We cannot do with one because resetting it would
++ * cause a race with write() yet we need to signal a disconnect.
++ */
++ rv = wait_event_interruptible_timeout(desc->wait,
++ !test_bit(WDM_IN_USE, &desc->flags) ||
++ test_bit(WDM_DISCONNECTING, &desc->flags),
++ timeout);
+
+- wait_event(desc->wait,
+- /*
+- * needs both flags. We cannot do with one
+- * because resetting it would cause a race
+- * with write() yet we need to signal
+- * a disconnect
+- */
+- !test_bit(WDM_IN_USE, &desc->flags) ||
+- test_bit(WDM_DISCONNECTING, &desc->flags));
+-
+- /* cannot dereference desc->intf if WDM_DISCONNECTING */
++ /*
++ * To report the correct error. This is best effort.
++ * We are inevitably racing with the hardware.
++ */
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ return -ENODEV;
+- if (desc->werr < 0)
+- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
+- desc->werr);
++ if (!rv)
++ return -EIO;
++ if (rv < 0)
++ return -EINTR;
++
++ spin_lock_irq(&desc->iuspin);
++ rv = desc->werr;
++ desc->werr = 0;
++ spin_unlock_irq(&desc->iuspin);
++
++ return usb_translate_errors(rv);
++
++}
++
++/*
++ * You need to send a signal when you react to malicious or defective hardware.
++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
++ * not implement wdm_flush() will return -EINVAL.
++ */
++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
++}
+
+- return usb_translate_errors(desc->werr);
++/*
++ * Same with wdm_fsync(), except it uses finite timeout in order to react to
++ * malicious or defective hardware which ceased communication after close() was
++ * implicitly called due to process termination.
++ */
++static int wdm_flush(struct file *file, fl_owner_t id)
++{
++ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
+ }
+
+ static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
+@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
+ .owner = THIS_MODULE,
+ .read = wdm_read,
+ .write = wdm_write,
++ .fsync = wdm_fsync,
+ .open = wdm_open,
+ .flush = wdm_flush,
+ .release = wdm_release,
+diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
+index da923ec176122..31ca5abb4c12a 100644
+--- a/drivers/usb/core/urb.c
++++ b/drivers/usb/core/urb.c
+@@ -772,11 +772,12 @@ void usb_block_urb(struct urb *urb)
+ EXPORT_SYMBOL_GPL(usb_block_urb);
+
+ /**
+- * usb_kill_anchored_urbs - cancel transfer requests en masse
++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
+ * @anchor: anchor the requests are bound to
+ *
+- * this allows all outstanding URBs to be killed starting
+- * from the back of the queue
++ * This kills all outstanding URBs starting from the back of the queue,
++ * with guarantee that no completer callbacks will take place from the
++ * anchor after this function returns.
+ *
+ * This routine should not be called by a driver after its disconnect
+ * method has returned.
+@@ -784,20 +785,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
+ void usb_kill_anchored_urbs(struct usb_anchor *anchor)
+ {
+ struct urb *victim;
++ int surely_empty;
+
+- spin_lock_irq(&anchor->lock);
+- while (!list_empty(&anchor->urb_list)) {
+- victim = list_entry(anchor->urb_list.prev, struct urb,
+- anchor_list);
+- /* we must make sure the URB isn't freed before we kill it*/
+- usb_get_urb(victim);
+- spin_unlock_irq(&anchor->lock);
+- /* this will unanchor the URB */
+- usb_kill_urb(victim);
+- usb_put_urb(victim);
++ do {
+ spin_lock_irq(&anchor->lock);
+- }
+- spin_unlock_irq(&anchor->lock);
++ while (!list_empty(&anchor->urb_list)) {
++ victim = list_entry(anchor->urb_list.prev,
++ struct urb, anchor_list);
++ /* make sure the URB isn't freed before we kill it */
++ usb_get_urb(victim);
++ spin_unlock_irq(&anchor->lock);
++ /* this will unanchor the URB */
++ usb_kill_urb(victim);
++ usb_put_urb(victim);
++ spin_lock_irq(&anchor->lock);
++ }
++ surely_empty = usb_anchor_check_wakeup(anchor);
++
++ spin_unlock_irq(&anchor->lock);
++ cpu_relax();
++ } while (!surely_empty);
+ }
+ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+
+@@ -816,21 +823,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+ void usb_poison_anchored_urbs(struct usb_anchor *anchor)
+ {
+ struct urb *victim;
++ int surely_empty;
+
+- spin_lock_irq(&anchor->lock);
+- anchor->poisoned = 1;
+- while (!list_empty(&anchor->urb_list)) {
+- victim = list_entry(anchor->urb_list.prev, struct urb,
+- anchor_list);
+- /* we must make sure the URB isn't freed before we kill it*/
+- usb_get_urb(victim);
+- spin_unlock_irq(&anchor->lock);
+- /* this will unanchor the URB */
+- usb_poison_urb(victim);
+- usb_put_urb(victim);
++ do {
+ spin_lock_irq(&anchor->lock);
+- }
+- spin_unlock_irq(&anchor->lock);
++ anchor->poisoned = 1;
++ while (!list_empty(&anchor->urb_list)) {
++ victim = list_entry(anchor->urb_list.prev,
++ struct urb, anchor_list);
++ /* make sure the URB isn't freed before we kill it */
++ usb_get_urb(victim);
++ spin_unlock_irq(&anchor->lock);
++ /* this will unanchor the URB */
++ usb_poison_urb(victim);
++ usb_put_urb(victim);
++ spin_lock_irq(&anchor->lock);
++ }
++ surely_empty = usb_anchor_check_wakeup(anchor);
++
++ spin_unlock_irq(&anchor->lock);
++ cpu_relax();
++ } while (!surely_empty);
+ }
+ EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
+
+@@ -970,14 +983,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
+ {
+ struct urb *victim;
+ unsigned long flags;
++ int surely_empty;
++
++ do {
++ spin_lock_irqsave(&anchor->lock, flags);
++ while (!list_empty(&anchor->urb_list)) {
++ victim = list_entry(anchor->urb_list.prev,
++ struct urb, anchor_list);
++ __usb_unanchor_urb(victim, anchor);
++ }
++ surely_empty = usb_anchor_check_wakeup(anchor);
+
+- spin_lock_irqsave(&anchor->lock, flags);
+- while (!list_empty(&anchor->urb_list)) {
+- victim = list_entry(anchor->urb_list.prev, struct urb,
+- anchor_list);
+- __usb_unanchor_urb(victim, anchor);
+- }
+- spin_unlock_irqrestore(&anchor->lock, flags);
++ spin_unlock_irqrestore(&anchor->lock, flags);
++ cpu_relax();
++ } while (!surely_empty);
+ }
+
+ EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index f7528f732b2aa..70ac47a341ac2 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -712,8 +712,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+ */
+ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ {
++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ int is_isoc = hs_ep->isochronous;
+ unsigned int maxsize;
++ u32 mps = hs_ep->ep.maxpacket;
++ int dir_in = hs_ep->dir_in;
+
+ if (is_isoc)
+ maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+@@ -722,6 +725,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ else
+ maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+
++ /* Interrupt OUT EP with mps not multiple of 4 */
++ if (hs_ep->index)
++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
++ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
++
+ return maxsize;
+ }
+
+@@ -737,11 +745,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
++ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
++ * to a single descriptor.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+ {
++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
+ u32 desc_size = 0;
+@@ -765,6 +776,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+ desc_size -= desc_size % mps;
+ }
+
++ /* Interrupt OUT EP with mps not multiple of 4 */
++ if (hs_ep->index)
++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
++ desc_size = mps;
++ *mask = DEV_DMA_NBYTES_MASK;
++ }
++
+ return desc_size;
+ }
+
+@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ length += (mps - (length % mps));
+ }
+
+- /*
+- * If more data to send, adjust DMA for EP0 out data stage.
+- * ureq->dma stays unchanged, hence increment it by already
+- * passed passed data count before starting new transaction.
+- */
+- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+- continuing)
++ if (continuing)
+ offset = ureq->actual;
+
+ /* Fill DDMA chain entries */
+@@ -2319,22 +2331,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
+ */
+ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+ {
++ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned int bytes_rem = 0;
++ unsigned int bytes_rem_correction = 0;
+ struct dwc2_dma_desc *desc = hs_ep->desc_list;
+ int i;
+ u32 status;
++ u32 mps = hs_ep->ep.maxpacket;
++ int dir_in = hs_ep->dir_in;
+
+ if (!desc)
+ return -EINVAL;
+
++ /* Interrupt OUT EP with mps not multiple of 4 */
++ if (hs_ep->index)
++ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
++ bytes_rem_correction = 4 - (mps % 4);
++
+ for (i = 0; i < hs_ep->desc_count; ++i) {
+ status = desc->status;
+ bytes_rem += status & DEV_DMA_NBYTES_MASK;
++ bytes_rem -= bytes_rem_correction;
+
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
++
++ if (status & DEV_DMA_L)
++ break;
++
+ desc++;
+ }
+
+diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
+index 31e090ac9f1ec..6d3812678b8c6 100644
+--- a/drivers/usb/dwc2/params.c
++++ b/drivers/usb/dwc2/params.c
+@@ -846,7 +846,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+ int dwc2_init_params(struct dwc2_hsotg *hsotg)
+ {
+ const struct of_device_id *match;
+- void (*set_params)(void *data);
++ void (*set_params)(struct dwc2_hsotg *data);
+
+ dwc2_set_default_params(hsotg);
+ dwc2_get_device_properties(hsotg);
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 526c275ad0bc5..4cbf295390062 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -117,6 +117,7 @@ static void __dwc3_set_mode(struct work_struct *work)
+ struct dwc3 *dwc = work_to_dwc(work);
+ unsigned long flags;
+ int ret;
++ u32 reg;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+@@ -168,6 +169,11 @@ static void __dwc3_set_mode(struct work_struct *work)
+ otg_set_vbus(dwc->usb2_phy->otg, true);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
++ if (dwc->dis_split_quirk) {
++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
++ reg |= DWC3_GUCTL3_SPLITDISABLE;
++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
++ }
+ }
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+@@ -1323,6 +1329,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
+ dwc->dis_metastability_quirk = device_property_read_bool(dev,
+ "snps,dis_metastability_quirk");
+
++ dwc->dis_split_quirk = device_property_read_bool(dev,
++ "snps,dis-split-quirk");
++
+ dwc->lpm_nyet_threshold = lpm_nyet_threshold;
+ dwc->tx_de_emphasis = tx_de_emphasis;
+
+@@ -1835,10 +1844,26 @@ static int dwc3_resume(struct device *dev)
+
+ return 0;
+ }
++
++static void dwc3_complete(struct device *dev)
++{
++ struct dwc3 *dwc = dev_get_drvdata(dev);
++ u32 reg;
++
++ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
++ dwc->dis_split_quirk) {
++ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
++ reg |= DWC3_GUCTL3_SPLITDISABLE;
++ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
++ }
++}
++#else
++#define dwc3_complete NULL
+ #endif /* CONFIG_PM_SLEEP */
+
+ static const struct dev_pm_ops dwc3_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
++ .complete = dwc3_complete,
+ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ dwc3_runtime_idle)
+ };
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index ce4acbf7fef90..4dfbffa944de1 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -136,6 +136,7 @@
+ #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
+
+ #define DWC3_GHWPARAMS8 0xc600
++#define DWC3_GUCTL3 0xc60c
+ #define DWC3_GFLADJ 0xc630
+
+ /* Device Registers */
+@@ -375,6 +376,9 @@
+ /* Global User Control Register 2 */
+ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
+
++/* Global User Control Register 3 */
++#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
++
+ /* Device Configuration Register */
+ #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
+ #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
+@@ -1038,6 +1042,7 @@ struct dwc3_scratchpad_array {
+ * 2 - No de-emphasis
+ * 3 - Reserved
+ * @dis_metastability_quirk: set to disable metastability quirk.
++ * @dis_split_quirk: set to disable split boundary.
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
+ */
+@@ -1229,6 +1234,8 @@ struct dwc3 {
+
+ unsigned dis_metastability_quirk:1;
+
++ unsigned dis_split_quirk:1;
++
+ u16 imod_interval;
+ };
+
+diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
+index bdac3e7d7b184..d055e00f81808 100644
+--- a/drivers/usb/dwc3/dwc3-of-simple.c
++++ b/drivers/usb/dwc3/dwc3-of-simple.c
+@@ -183,6 +183,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
+ { .compatible = "amlogic,meson-axg-dwc3" },
+ { .compatible = "amlogic,meson-gxl-dwc3" },
+ { .compatible = "allwinner,sun50i-h6-dwc3" },
++ { .compatible = "hisilicon,hi3670-dwc3" },
+ { /* Sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 1f638759a9533..92a7c3a839454 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+ /* peak (theoretical) bulk transfer rate in bits-per-second */
+ static inline unsigned ncm_bitrate(struct usb_gadget *g)
+ {
+- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+- return 13 * 1024 * 8 * 1000 * 8;
++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
++ return 4250000000U;
++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
++ return 3750000000U;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
+ fs_ncm_notify_desc.bEndpointAddress;
+
+ status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
+- ncm_ss_function, NULL);
++ ncm_ss_function, ncm_ss_function);
+ if (status)
+ goto fail;
+
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index 9c7ed2539ff77..8ed1295d7e350 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -31,6 +31,7 @@
+ #include <linux/types.h>
+ #include <linux/ctype.h>
+ #include <linux/cdev.h>
++#include <linux/kref.h>
+
+ #include <asm/byteorder.h>
+ #include <linux/io.h>
+@@ -64,7 +65,7 @@ struct printer_dev {
+ struct usb_gadget *gadget;
+ s8 interface;
+ struct usb_ep *in_ep, *out_ep;
+-
++ struct kref kref;
+ struct list_head rx_reqs; /* List of free RX structs */
+ struct list_head rx_reqs_active; /* List of Active RX xfers */
+ struct list_head rx_buffers; /* List of completed xfers */
+@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
+
+ /*-------------------------------------------------------------------------*/
+
++static void printer_dev_free(struct kref *kref)
++{
++ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
++
++ kfree(dev);
++}
++
+ static struct usb_request *
+ printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
+ {
+@@ -348,6 +356,7 @@ printer_open(struct inode *inode, struct file *fd)
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
++ kref_get(&dev->kref);
+ DBG(dev, "printer_open returned %x\n", ret);
+ return ret;
+ }
+@@ -365,6 +374,7 @@ printer_close(struct inode *inode, struct file *fd)
+ dev->printer_status &= ~PRINTER_SELECTED;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
++ kref_put(&dev->kref, printer_dev_free);
+ DBG(dev, "printer_close\n");
+
+ return 0;
+@@ -1350,7 +1360,8 @@ static void gprinter_free(struct usb_function *f)
+ struct f_printer_opts *opts;
+
+ opts = container_of(f->fi, struct f_printer_opts, func_inst);
+- kfree(dev);
++
++ kref_put(&dev->kref, printer_dev_free);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+@@ -1419,6 +1430,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
+ return ERR_PTR(-ENOMEM);
+ }
+
++ kref_init(&dev->kref);
+ ++opts->refcnt;
+ dev->minor = opts->minor;
+ dev->pnp_string = opts->pnp_string;
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index fbe96ef1ac7a4..891e9f7f40d59 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -93,7 +93,7 @@ struct eth_dev {
+ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
+ {
+ if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+- gadget->speed == USB_SPEED_SUPER))
++ gadget->speed >= USB_SPEED_SUPER))
+ return qmult * DEFAULT_QLEN;
+ else
+ return DEFAULT_QLEN;
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 4de91653a2c7b..5eb62240c7f87 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -673,20 +673,24 @@ retry:
+
+ /* handle root hub init quirks ... */
+ val = roothub_a (ohci);
+- val &= ~(RH_A_PSM | RH_A_OCPM);
++ /* Configure for per-port over-current protection by default */
++ val &= ~RH_A_NOCP;
++ val |= RH_A_OCPM;
+ if (ohci->flags & OHCI_QUIRK_SUPERIO) {
+- /* NSC 87560 and maybe others */
++ /* NSC 87560 and maybe others.
++ * Ganged power switching, no over-current protection.
++ */
+ val |= RH_A_NOCP;
+- val &= ~(RH_A_POTPGT | RH_A_NPS);
+- ohci_writel (ohci, val, &ohci->regs->roothub.a);
++ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
+ } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+ (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
+ /* hub power always on; required for AMD-756 and some
+- * Mac platforms. ganged overcurrent reporting, if any.
++ * Mac platforms.
+ */
+ val |= RH_A_NPS;
+- ohci_writel (ohci, val, &ohci->regs->roothub.a);
+ }
++ ohci_writel(ohci, val, &ohci->regs->roothub.a);
++
+ ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
+ ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
+ &ohci->regs->roothub.b);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index bad154f446f8d..0d10ede581cbd 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1915,8 +1915,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ trace_xhci_add_endpoint(ep_ctx);
+
+- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
+-
+ xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
+ (unsigned int) ep->desc.bEndpointAddress,
+ udev->slot_id,
+@@ -2949,6 +2947,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
+ virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+ virt_dev->eps[i].new_ring = NULL;
++ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
+ }
+ command_cleanup:
+ kfree(command->completion);
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index 927b608461c82..bf32997c557ff 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+ * PF SR-IOV capability, there's therefore no need to trigger
+ * faults based on the virtual value.
+ */
+- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
++ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
+ }
+
+ /*
+@@ -518,8 +518,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
+
+ count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
+
+- /* Mask in virtual memory enable for SR-IOV devices */
+- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
++ /* Mask in virtual memory enable */
++ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+ u32 tmp_val = le32_to_cpu(*val);
+
+@@ -587,9 +587,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+ * shows it disabled (phys_mem/io, then the device has
+ * undergone some kind of backdoor reset and needs to be
+ * restored before we allow it to enable the bars.
+- * SR-IOV devices will trigger this, but we catch them later
++ * SR-IOV devices will trigger this - for mem enable let's
++ * catch this now and for io enable it will be caught later
+ */
+- if ((new_mem && virt_mem && !phys_mem) ||
++ if ((new_mem && virt_mem && !phys_mem &&
++ !pdev->no_command_memory) ||
+ (new_io && virt_io && !phys_io) ||
+ vfio_need_bar_restore(vdev))
+ vfio_bar_restore(vdev);
+@@ -1732,12 +1734,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
+ vconfig[PCI_INTERRUPT_PIN]);
+
+ vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+-
++ }
++ if (pdev->no_command_memory) {
+ /*
+- * VFs do no implement the memory enable bit of the COMMAND
+- * register therefore we'll not have it set in our initial
+- * copy of config space after pci_enable_device(). For
+- * consistency with PFs, set the virtual enable bit here.
++ * VFs and devices that set pdev->no_command_memory do not
++ * implement the memory enable bit of the COMMAND register
++ * therefore we'll not have it set in our initial copy of
++ * config space after pci_enable_device(). For consistency
++ * with PFs, set the virtual enable bit here.
+ */
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
+ cpu_to_le16(PCI_COMMAND_MEMORY);
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 1d9fb25929459..869dce5f134dd 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ vdev->ctx[vector].producer.token = trigger;
+ vdev->ctx[vector].producer.irq = irq;
+ ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
+- if (unlikely(ret))
++ if (unlikely(ret)) {
+ dev_info(&pdev->dev,
+ "irq bypass producer (token %p) registration fails: %d\n",
+ vdev->ctx[vector].producer.token, ret);
+
++ vdev->ctx[vector].producer.token = NULL;
++ }
+ vdev->ctx[vector].trigger = trigger;
+
+ return 0;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index c6220f57fdf3e..3b31e83a92155 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -631,7 +631,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
+
+ ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+ if (ret) {
+- vfio_unpin_page_external(dma, iova, do_accounting);
++ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
++ vfio_lock_acct(dma, -1, true);
+ goto pin_unwind;
+ }
+ }
+diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
+index 2355f00f57732..1f6301375fd33 100644
+--- a/drivers/video/backlight/sky81452-backlight.c
++++ b/drivers/video/backlight/sky81452-backlight.c
+@@ -196,6 +196,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
+ num_entry);
+ if (ret < 0) {
+ dev_err(dev, "led-sources node is invalid.\n");
++ of_node_put(np);
+ return ERR_PTR(-EINVAL);
+ }
+
+diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
+index 4ca07866f2f66..5dda824d0da3f 100644
+--- a/drivers/video/fbdev/aty/radeon_base.c
++++ b/drivers/video/fbdev/aty/radeon_base.c
+@@ -2323,7 +2323,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
+
+ ret = radeon_kick_out_firmware_fb(pdev);
+ if (ret)
+- return ret;
++ goto err_release_fb;
+
+ /* request the mem regions */
+ ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 97abcd497c7e0..bf76dadbed87f 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1001,6 +1001,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ return 0;
+ }
+
++ /* bitfill_aligned() assumes that it's at least 8x8 */
++ if (var->xres < 8 || var->yres < 8)
++ return -EINVAL;
++
+ ret = info->fbops->fb_check_var(var, info);
+
+ if (ret)
+diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
+index dfe3eb769638b..fde27feae5d0c 100644
+--- a/drivers/video/fbdev/sis/init.c
++++ b/drivers/video/fbdev/sis/init.c
+@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
+
+ i = 0;
+
++ if (SiS_Pr->ChipType == SIS_730)
++ queuedata = &FQBQData730[0];
++ else
++ queuedata = &FQBQData[0];
++
+ if(ModeNo > 0x13) {
+
+ /* Get VCLK */
+@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
+ /* Get half colordepth */
+ colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
+
+- if(SiS_Pr->ChipType == SIS_730) {
+- queuedata = &FQBQData730[0];
+- } else {
+- queuedata = &FQBQData[0];
+- }
+-
+ do {
+ templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
+
+diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
+index 4b83109202b1c..3c4d20618de4c 100644
+--- a/drivers/video/fbdev/vga16fb.c
++++ b/drivers/video/fbdev/vga16fb.c
+@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
+ }
+
+ static void vga16fb_clock_chip(struct vga16fb_par *par,
+- unsigned int pixclock,
++ unsigned int *pixclock,
+ const struct fb_info *info,
+ int mul, int div)
+ {
+@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
+ { 0 /* bad */, 0x00, 0x00}};
+ int err;
+
+- pixclock = (pixclock * mul) / div;
++ *pixclock = (*pixclock * mul) / div;
+ best = vgaclocks;
+- err = pixclock - best->pixclock;
++ err = *pixclock - best->pixclock;
+ if (err < 0) err = -err;
+ for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
+ int tmp;
+
+- tmp = pixclock - ptr->pixclock;
++ tmp = *pixclock - ptr->pixclock;
+ if (tmp < 0) tmp = -tmp;
+ if (tmp < err) {
+ err = tmp;
+@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
+ }
+ par->misc |= best->misc;
+ par->clkdiv = best->seq_clock_mode;
+- pixclock = (best->pixclock * div) / mul;
++ *pixclock = (best->pixclock * div) / mul;
+ }
+
+ #define FAIL(X) return -EINVAL
+@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
+
+ if (mode & MODE_8BPP)
+ /* pixel clock == vga clock / 2 */
+- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
+ else
+ /* pixel clock == vga clock */
+- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
+
+ var->red.offset = var->green.offset = var->blue.offset =
+ var->transp.offset = 0;
+diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
+index 93d5bebf9572a..fb292f9cf29df 100644
+--- a/drivers/virt/fsl_hypervisor.c
++++ b/drivers/virt/fsl_hypervisor.c
+@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+
+ unsigned int i;
+ long ret = 0;
+- int num_pinned; /* return value from get_user_pages() */
++ int num_pinned = 0; /* return value from get_user_pages_fast() */
+ phys_addr_t remote_paddr; /* The next address in the remote buffer */
+ uint32_t count; /* The number of bytes left to copy */
+
+@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ return -EINVAL;
+
+ /*
+- * The array of pages returned by get_user_pages() covers only
++ * The array of pages returned by get_user_pages_fast() covers only
+ * page-aligned memory. Since the user buffer is probably not
+ * page-aligned, we need to handle the discrepancy.
+ *
+@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+
+ /*
+ * 'pages' is an array of struct page pointers that's initialized by
+- * get_user_pages().
++ * get_user_pages_fast().
+ */
+ pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ if (!sg_list_unaligned) {
+ pr_debug("fsl-hv: could not allocate S/G list\n");
+ ret = -ENOMEM;
+- goto exit;
++ goto free_pages;
+ }
+ sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
+
+@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
+
+ if (num_pinned != num_pages) {
+- /* get_user_pages() failed */
+ pr_debug("fsl-hv: could not lock source buffer\n");
+ ret = (num_pinned < 0) ? num_pinned : -EFAULT;
+ goto exit;
+@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+ virt_to_phys(sg_list), num_pages);
+
+ exit:
+- if (pages) {
+- for (i = 0; i < num_pages; i++)
+- if (pages[i])
+- put_page(pages[i]);
++ if (pages && (num_pinned > 0)) {
++ for (i = 0; i < num_pinned; i++)
++ put_page(pages[i]);
+ }
+
+ kfree(sg_list_unaligned);
++free_pages:
+ kfree(pages);
+
+ if (!ret)
+diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
+index 87eaf357ae01f..adf015aa4126f 100644
+--- a/drivers/watchdog/sp5100_tco.h
++++ b/drivers/watchdog/sp5100_tco.h
+@@ -70,7 +70,7 @@
+ #define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
+
+
+-#define EFCH_PM_DECODEEN3 0x00
++#define EFCH_PM_DECODEEN3 0x03
+ #define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
+ #define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
+
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 3729f99fd8eca..8494846ccdc5f 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -971,8 +971,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ wd_data->wdd = wdd;
+ wdd->wd_data = wd_data;
+
+- if (IS_ERR_OR_NULL(watchdog_kworker))
++ if (IS_ERR_OR_NULL(watchdog_kworker)) {
++ kfree(wd_data);
+ return -ENODEV;
++ }
+
+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+@@ -998,7 +1000,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ pr_err("%s: a legacy watchdog module is probably present.\n",
+ wdd->info->identity);
+ old_wd_data = NULL;
+- kfree(wd_data);
++ put_device(&wd_data->dev);
+ return err;
+ }
+ }
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+index 689162e2e1755..3150c19cdc2fb 100644
+--- a/fs/cifs/asn1.c
++++ b/fs/cifs/asn1.c
+@@ -530,8 +530,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ return 0;
+ } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+ || (tag != ASN1_EOC)) {
+- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
+- cls, con, tag, end, *end);
++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
++ cls, con, tag, end);
+ return 0;
+ }
+
+@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ return 0;
+ } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+ || (tag != ASN1_SEQ)) {
+- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
+- cls, con, tag, end, *end);
++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n",
++ cls, con, tag, end);
+ return 0;
+ }
+
+@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ return 0;
+ } else if ((cls != ASN1_CTX) || (con != ASN1_CON)
+ || (tag != ASN1_EOC)) {
+- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n",
+- cls, con, tag, end, *end);
++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n",
++ cls, con, tag, end);
+ return 0;
+ }
+
+@@ -564,8 +564,8 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ return 0;
+ } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+ || (tag != ASN1_SEQ)) {
+- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n",
+- cls, con, tag, end, *end);
++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n",
++ cls, con, tag, sequence_end);
+ return 0;
+ }
+
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 9a89e5f7c4da3..776029a57e717 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3707,7 +3707,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ if (rc) {
+ cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
+ enc ? "en" : "de");
+- return 0;
++ return rc;
+ }
+
+ rc = smb3_crypto_aead_allocate(server);
+@@ -3886,7 +3886,8 @@ smb3_is_transform_hdr(void *buf)
+ static int
+ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+ unsigned int buf_data_size, struct page **pages,
+- unsigned int npages, unsigned int page_data_size)
++ unsigned int npages, unsigned int page_data_size,
++ bool is_offloaded)
+ {
+ struct kvec iov[2];
+ struct smb_rqst rqst = {NULL};
+@@ -3912,7 +3913,8 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
+
+ memmove(buf, iov[1].iov_base, buf_data_size);
+
+- server->total_read = buf_data_size + page_data_size;
++ if (!is_offloaded)
++ server->total_read = buf_data_size + page_data_size;
+
+ return rc;
+ }
+@@ -4126,7 +4128,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
+ struct mid_q_entry *mid;
+
+ rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
+- dw->ppages, dw->npages, dw->len);
++ dw->ppages, dw->npages, dw->len, true);
+ if (rc) {
+ cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
+ goto free_pages;
+@@ -4232,7 +4234,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
+
+ non_offloaded_decrypt:
+ rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
+- pages, npages, len);
++ pages, npages, len, false);
+ if (rc)
+ goto free_pages;
+
+@@ -4288,7 +4290,7 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ server->total_read += length;
+
+ buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
+- length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0);
++ length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
+ if (length)
+ return length;
+
+diff --git a/fs/d_path.c b/fs/d_path.c
+index 0f1fc1743302f..a69e2cd36e6e3 100644
+--- a/fs/d_path.c
++++ b/fs/d_path.c
+@@ -102,6 +102,8 @@ restart:
+
+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ struct mount *parent = READ_ONCE(mnt->mnt_parent);
++ struct mnt_namespace *mnt_ns;
++
+ /* Escaped? */
+ if (dentry != vfsmnt->mnt_root) {
+ bptr = *buffer;
+@@ -116,7 +118,9 @@ restart:
+ vfsmnt = &mnt->mnt;
+ continue;
+ }
+- if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
++ mnt_ns = READ_ONCE(mnt->mnt_ns);
++ /* open-coded is_mounted() to use local mnt_ns */
++ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns))
+ error = 1; // absolute root
+ else
+ error = 2; // detached or not attached yet
+diff --git a/fs/dlm/config.c b/fs/dlm/config.c
+index 3b21082e1b550..3b1012a3c4396 100644
+--- a/fs/dlm/config.c
++++ b/fs/dlm/config.c
+@@ -216,6 +216,7 @@ struct dlm_space {
+ struct list_head members;
+ struct mutex members_lock;
+ int members_count;
++ struct dlm_nodes *nds;
+ };
+
+ struct dlm_comms {
+@@ -424,6 +425,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
+ INIT_LIST_HEAD(&sp->members);
+ mutex_init(&sp->members_lock);
+ sp->members_count = 0;
++ sp->nds = nds;
+ return &sp->group;
+
+ fail:
+@@ -445,6 +447,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
+ static void release_space(struct config_item *i)
+ {
+ struct dlm_space *sp = config_item_to_space(i);
++ kfree(sp->nds);
+ kfree(sp);
+ }
+
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index dbccf46f17709..37347ba868b70 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -108,6 +108,9 @@ static int ext4_getfsmap_helper(struct super_block *sb,
+
+ /* Are we just counting mappings? */
+ if (info->gfi_head->fmh_count == 0) {
++ if (info->gfi_head->fmh_entries == UINT_MAX)
++ return EXT4_QUERY_RANGE_ABORT;
++
+ if (rec_fsblk > info->gfi_next_fsblk)
+ info->gfi_head->fmh_entries++;
+
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 170934430d7d7..029e693e201cf 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -788,4 +788,5 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
+ }
+ kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
++ wait_for_completion(&sbi->s_kobj_unregister);
+ }
+diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
+index a30ea7ecb790a..80867a1a94f26 100644
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -559,6 +559,7 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
+
+ if (PageUptodate(page))
+ return 0;
++ ClearPageError(page);
+
+ do {
+ iomap_adjust_read_range(inode, iop, &block_start,
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index 7b5f76efef02d..8c98fd92bf665 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -377,6 +377,16 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+ return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
+ case IOMAP_INLINE:
+ return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
++ case IOMAP_DELALLOC:
++ /*
++ * DIO is not serialised against mmap() access at all, and so
++ * if the page_mkwrite occurs between the writeback and the
++ * iomap_apply() call in the DIO path, then it will see the
++ * DELALLOC block that the page-mkwrite allocated.
++ */
++ pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
++ dio->iocb->ki_filp, current->comm);
++ return -EIO;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index d4359a1df3d5e..84933a0af49b6 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -1809,6 +1809,12 @@ int ntfs_read_inode_mount(struct inode *vi)
+ brelse(bh);
+ }
+
++ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
++ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
++ le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
++ goto err_out;
++ }
++
+ /* Apply the mst fixups. */
+ if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
+ /* FIXME: Try to use the $MFTMirr now. */
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 529d0c6ec6f9c..b690074e65ffa 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1036,7 +1036,6 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
+
+ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
+ {
+- static DEFINE_MUTEX(oom_adj_mutex);
+ struct mm_struct *mm = NULL;
+ struct task_struct *task;
+ int err = 0;
+@@ -1076,7 +1075,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
+ struct task_struct *p = find_lock_task_mm(task);
+
+ if (p) {
+- if (atomic_read(&p->mm->mm_users) > 1) {
++ if (test_bit(MMF_MULTIPROCESS, &p->mm->flags)) {
+ mm = p->mm;
+ mmgrab(mm);
+ }
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index 53429c29c7842..276c27fb99280 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -284,6 +284,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
+ d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+ d->dqb_btime = cpu_to_le64(m->dqb_btime);
+ d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
++ d->dqb_pad = 0;
+ if (qtree_entry_unused(info, dp))
+ d->dqb_itime = cpu_to_le64(1);
+ }
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index 4146954549560..355523f4a4bf3 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -224,7 +224,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
+ if (!pages)
+ goto out_free;
+
+- nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
++ nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
+ if (nr != lpages)
+ goto out_free_pages; /* leave if some pages were missing */
+
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 70387650436cf..ac35ddf0dd603 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -2161,7 +2161,8 @@ out_end_trans:
+ out_inserted_sd:
+ clear_nlink(inode);
+ th->t_trans_id = 0; /* so the caller can't use this handle later */
+- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */
++ if (inode->i_state & I_NEW)
++ unlock_new_inode(inode);
+ iput(inode);
+ return err;
+ }
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index a6bce5b1fb1dc..1b9c7a387dc71 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1258,6 +1258,10 @@ static int reiserfs_parse_options(struct super_block *s,
+ "turned on.");
+ return 0;
+ }
++ if (qf_names[qtype] !=
++ REISERFS_SB(s)->s_qf_names[qtype])
++ kfree(qf_names[qtype]);
++ qf_names[qtype] = NULL;
+ if (*arg) { /* Some filename specified? */
+ if (REISERFS_SB(s)->s_qf_names[qtype]
+ && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
+@@ -1287,10 +1291,6 @@ static int reiserfs_parse_options(struct super_block *s,
+ else
+ *mount_options |= 1 << REISERFS_GRPQUOTA;
+ } else {
+- if (qf_names[qtype] !=
+- REISERFS_SB(s)->s_qf_names[qtype])
+- kfree(qf_names[qtype]);
+- qf_names[qtype] = NULL;
+ if (qtype == USRQUOTA)
+ *mount_options &= ~(1 << REISERFS_USRQUOTA);
+ else
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index ea80036d7897b..97a192eb9949c 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -139,21 +139,24 @@ void udf_evict_inode(struct inode *inode)
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int want_delete = 0;
+
+- if (!inode->i_nlink && !is_bad_inode(inode)) {
+- want_delete = 1;
+- udf_setsize(inode, 0);
+- udf_update_inode(inode, IS_SYNC(inode));
++ if (!is_bad_inode(inode)) {
++ if (!inode->i_nlink) {
++ want_delete = 1;
++ udf_setsize(inode, 0);
++ udf_update_inode(inode, IS_SYNC(inode));
++ }
++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
++ inode->i_size != iinfo->i_lenExtents) {
++ udf_warn(inode->i_sb,
++ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
++ inode->i_ino, inode->i_mode,
++ (unsigned long long)inode->i_size,
++ (unsigned long long)iinfo->i_lenExtents);
++ }
+ }
+ truncate_inode_pages_final(&inode->i_data);
+ invalidate_inode_buffers(inode);
+ clear_inode(inode);
+- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
+- inode->i_size != iinfo->i_lenExtents) {
+- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
+- inode->i_ino, inode->i_mode,
+- (unsigned long long)inode->i_size,
+- (unsigned long long)iinfo->i_lenExtents);
+- }
+ kfree(iinfo->i_ext.i_data);
+ iinfo->i_ext.i_data = NULL;
+ udf_clear_extent_cache(inode);
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 4baa1ca91e9be..a0cd766b41cdb 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1352,6 +1352,12 @@ static int udf_load_sparable_map(struct super_block *sb,
+ (int)spm->numSparingTables);
+ return -EIO;
+ }
++ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Too big sparing table size (%u)\n",
++ le32_to_cpu(spm->sizeSparingTable));
++ return -EIO;
++ }
+
+ for (i = 0; i < spm->numSparingTables; i++) {
+ loc = le32_to_cpu(spm->locSparingTable[i]);
+diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
+index 8ea1efc97b41d..42085e70c01ac 100644
+--- a/fs/xfs/libxfs/xfs_rtbitmap.c
++++ b/fs/xfs/libxfs/xfs_rtbitmap.c
+@@ -1018,7 +1018,6 @@ xfs_rtalloc_query_range(
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_rtblock_t rtstart;
+ xfs_rtblock_t rtend;
+- xfs_rtblock_t rem;
+ int is_free;
+ int error = 0;
+
+@@ -1027,13 +1026,12 @@ xfs_rtalloc_query_range(
+ if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
+ low_rec->ar_startext == high_rec->ar_startext)
+ return 0;
+- if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+- high_rec->ar_startext = mp->m_sb.sb_rextents;
++ high_rec->ar_startext = min(high_rec->ar_startext,
++ mp->m_sb.sb_rextents - 1);
+
+ /* Iterate the bitmap, looking for discrepancies. */
+ rtstart = low_rec->ar_startext;
+- rem = high_rec->ar_startext - rtstart;
+- while (rem) {
++ while (rtstart <= high_rec->ar_startext) {
+ /* Is the first block free? */
+ error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
+ &is_free);
+@@ -1042,7 +1040,7 @@ xfs_rtalloc_query_range(
+
+ /* How long does the extent go for? */
+ error = xfs_rtfind_forw(mp, tp, rtstart,
+- high_rec->ar_startext - 1, &rtend);
++ high_rec->ar_startext, &rtend);
+ if (error)
+ break;
+
+@@ -1055,7 +1053,6 @@ xfs_rtalloc_query_range(
+ break;
+ }
+
+- rem -= rtend - rtstart + 1;
+ rtstart = rtend + 1;
+ }
+
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index c13754e119be1..01c0933a4d10d 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -26,7 +26,7 @@
+ #include "xfs_rtalloc.h"
+
+ /* Convert an xfs_fsmap to an fsmap. */
+-void
++static void
+ xfs_fsmap_from_internal(
+ struct fsmap *dest,
+ struct xfs_fsmap *src)
+@@ -154,8 +154,7 @@ xfs_fsmap_owner_from_rmap(
+ /* getfsmap query state */
+ struct xfs_getfsmap_info {
+ struct xfs_fsmap_head *head;
+- xfs_fsmap_format_t formatter; /* formatting fn */
+- void *format_arg; /* format buffer */
++ struct fsmap *fsmap_recs; /* mapping records */
+ struct xfs_buf *agf_bp; /* AGF, for refcount queries */
+ xfs_daddr_t next_daddr; /* next daddr we expect */
+ u64 missing_owner; /* owner of holes */
+@@ -223,6 +222,20 @@ xfs_getfsmap_is_shared(
+ return 0;
+ }
+
++static inline void
++xfs_getfsmap_format(
++ struct xfs_mount *mp,
++ struct xfs_fsmap *xfm,
++ struct xfs_getfsmap_info *info)
++{
++ struct fsmap *rec;
++
++ trace_xfs_getfsmap_mapping(mp, xfm);
++
++ rec = &info->fsmap_recs[info->head->fmh_entries++];
++ xfs_fsmap_from_internal(rec, xfm);
++}
++
+ /*
+ * Format a reverse mapping for getfsmap, having translated rm_startblock
+ * into the appropriate daddr units.
+@@ -255,6 +268,9 @@ xfs_getfsmap_helper(
+
+ /* Are we just counting mappings? */
+ if (info->head->fmh_count == 0) {
++ if (info->head->fmh_entries == UINT_MAX)
++ return -ECANCELED;
++
+ if (rec_daddr > info->next_daddr)
+ info->head->fmh_entries++;
+
+@@ -284,10 +300,7 @@ xfs_getfsmap_helper(
+ fmr.fmr_offset = 0;
+ fmr.fmr_length = rec_daddr - info->next_daddr;
+ fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
+- error = info->formatter(&fmr, info->format_arg);
+- if (error)
+- return error;
+- info->head->fmh_entries++;
++ xfs_getfsmap_format(mp, &fmr, info);
+ }
+
+ if (info->last)
+@@ -319,11 +332,8 @@ xfs_getfsmap_helper(
+ if (shared)
+ fmr.fmr_flags |= FMR_OF_SHARED;
+ }
+- error = info->formatter(&fmr, info->format_arg);
+- if (error)
+- return error;
+- info->head->fmh_entries++;
+
++ xfs_getfsmap_format(mp, &fmr, info);
+ out:
+ rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (info->next_daddr < rec_daddr)
+@@ -791,11 +801,11 @@ xfs_getfsmap_check_keys(
+ #endif /* CONFIG_XFS_RT */
+
+ /*
+- * Get filesystem's extents as described in head, and format for
+- * output. Calls formatter to fill the user's buffer until all
+- * extents are mapped, until the passed-in head->fmh_count slots have
+- * been filled, or until the formatter short-circuits the loop, if it
+- * is tracking filled-in extents on its own.
++ * Get filesystem's extents as described in head, and format for output. Fills
++ * in the supplied records array until there are no more reverse mappings to
++ * return or head.fmh_entries == head.fmh_count. In the second case, this
++ * function returns -ECANCELED to indicate that more records would have been
++ * returned.
+ *
+ * Key to Confusion
+ * ----------------
+@@ -815,8 +825,7 @@ int
+ xfs_getfsmap(
+ struct xfs_mount *mp,
+ struct xfs_fsmap_head *head,
+- xfs_fsmap_format_t formatter,
+- void *arg)
++ struct fsmap *fsmap_recs)
+ {
+ struct xfs_trans *tp = NULL;
+ struct xfs_fsmap dkeys[2]; /* per-dev keys */
+@@ -891,8 +900,7 @@ xfs_getfsmap(
+
+ info.next_daddr = head->fmh_keys[0].fmr_physical +
+ head->fmh_keys[0].fmr_length;
+- info.formatter = formatter;
+- info.format_arg = arg;
++ info.fsmap_recs = fsmap_recs;
+ info.head = head;
+
+ /*
+diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
+index c6c57739b8626..a0775788e7b13 100644
+--- a/fs/xfs/xfs_fsmap.h
++++ b/fs/xfs/xfs_fsmap.h
+@@ -27,13 +27,9 @@ struct xfs_fsmap_head {
+ struct xfs_fsmap fmh_keys[2]; /* low and high keys */
+ };
+
+-void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
+ void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
+
+-/* fsmap to userspace formatter - copy to user & advance pointer */
+-typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
+-
+ int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
+- xfs_fsmap_format_t formatter, void *arg);
++ struct fsmap *out_recs);
+
+ #endif /* __XFS_FSMAP_H__ */
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 60c4526312771..bf0435dbec436 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1832,39 +1832,17 @@ out_free_buf:
+ return error;
+ }
+
+-struct getfsmap_info {
+- struct xfs_mount *mp;
+- struct fsmap_head __user *data;
+- unsigned int idx;
+- __u32 last_flags;
+-};
+-
+-STATIC int
+-xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
+-{
+- struct getfsmap_info *info = priv;
+- struct fsmap fm;
+-
+- trace_xfs_getfsmap_mapping(info->mp, xfm);
+-
+- info->last_flags = xfm->fmr_flags;
+- xfs_fsmap_from_internal(&fm, xfm);
+- if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
+- sizeof(struct fsmap)))
+- return -EFAULT;
+-
+- return 0;
+-}
+-
+ STATIC int
+ xfs_ioc_getfsmap(
+ struct xfs_inode *ip,
+ struct fsmap_head __user *arg)
+ {
+- struct getfsmap_info info = { NULL };
+ struct xfs_fsmap_head xhead = {0};
+ struct fsmap_head head;
+- bool aborted = false;
++ struct fsmap *recs;
++ unsigned int count;
++ __u32 last_flags = 0;
++ bool done = false;
+ int error;
+
+ if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
+@@ -1876,38 +1854,112 @@ xfs_ioc_getfsmap(
+ sizeof(head.fmh_keys[1].fmr_reserved)))
+ return -EINVAL;
+
++ /*
++ * Use an internal memory buffer so that we don't have to copy fsmap
++ * data to userspace while holding locks. Start by trying to allocate
++ * up to 128k for the buffer, but fall back to a single page if needed.
++ */
++ count = min_t(unsigned int, head.fmh_count,
++ 131072 / sizeof(struct fsmap));
++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
++ if (!recs) {
++ count = min_t(unsigned int, head.fmh_count,
++ PAGE_SIZE / sizeof(struct fsmap));
++ recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
++ if (!recs)
++ return -ENOMEM;
++ }
++
+ xhead.fmh_iflags = head.fmh_iflags;
+- xhead.fmh_count = head.fmh_count;
+ xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
+ xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
+
+ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
+ trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
+
+- info.mp = ip->i_mount;
+- info.data = arg;
+- error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
+- if (error == -ECANCELED) {
+- error = 0;
+- aborted = true;
+- } else if (error)
+- return error;
++ head.fmh_entries = 0;
++ do {
++ struct fsmap __user *user_recs;
++ struct fsmap *last_rec;
++
++ user_recs = &arg->fmh_recs[head.fmh_entries];
++ xhead.fmh_entries = 0;
++ xhead.fmh_count = min_t(unsigned int, count,
++ head.fmh_count - head.fmh_entries);
++
++ /* Run query, record how many entries we got. */
++ error = xfs_getfsmap(ip->i_mount, &xhead, recs);
++ switch (error) {
++ case 0:
++ /*
++ * There are no more records in the result set. Copy
++ * whatever we got to userspace and break out.
++ */
++ done = true;
++ break;
++ case -ECANCELED:
++ /*
++ * The internal memory buffer is full. Copy whatever
++ * records we got to userspace and go again if we have
++ * not yet filled the userspace buffer.
++ */
++ error = 0;
++ break;
++ default:
++ goto out_free;
++ }
++ head.fmh_entries += xhead.fmh_entries;
++ head.fmh_oflags = xhead.fmh_oflags;
+
+- /* If we didn't abort, set the "last" flag in the last fmx */
+- if (!aborted && info.idx) {
+- info.last_flags |= FMR_OF_LAST;
+- if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
+- &info.last_flags, sizeof(info.last_flags)))
+- return -EFAULT;
++ /*
++ * If the caller wanted a record count or there aren't any
++ * new records to return, we're done.
++ */
++ if (head.fmh_count == 0 || xhead.fmh_entries == 0)
++ break;
++
++ /* Copy all the records we got out to userspace. */
++ if (copy_to_user(user_recs, recs,
++ xhead.fmh_entries * sizeof(struct fsmap))) {
++ error = -EFAULT;
++ goto out_free;
++ }
++
++ /* Remember the last record flags we copied to userspace. */
++ last_rec = &recs[xhead.fmh_entries - 1];
++ last_flags = last_rec->fmr_flags;
++
++ /* Set up the low key for the next iteration. */
++ xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
++ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
++ } while (!done && head.fmh_entries < head.fmh_count);
++
++ /*
++ * If there are no more records in the query result set and we're not
++ * in counting mode, mark the last record returned with the LAST flag.
++ */
++ if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
++ struct fsmap __user *user_rec;
++
++ last_flags |= FMR_OF_LAST;
++ user_rec = &arg->fmh_recs[head.fmh_entries - 1];
++
++ if (copy_to_user(&user_rec->fmr_flags, &last_flags,
++ sizeof(last_flags))) {
++ error = -EFAULT;
++ goto out_free;
++ }
+ }
+
+ /* copy back header */
+- head.fmh_entries = xhead.fmh_entries;
+- head.fmh_oflags = xhead.fmh_oflags;
+- if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
+- return -EFAULT;
++ if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
++ error = -EFAULT;
++ goto out_free;
++ }
+
+- return 0;
++out_free:
++ kmem_free(recs);
++ return error;
+ }
+
+ STATIC int
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 4a48a8c75b4f7..b583669370825 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -247,6 +247,9 @@ xfs_rtallocate_extent_block(
+ end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+ i <= end;
+ i++) {
++ /* Make sure we don't scan off the end of the rt volume. */
++ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
++
+ /*
+ * See if there's a free extent of maxlen starting at i.
+ * If it's not so then next will contain the first non-free.
+@@ -442,6 +445,14 @@ xfs_rtallocate_extent_near(
+ */
+ if (bno >= mp->m_sb.sb_rextents)
+ bno = mp->m_sb.sb_rextents - 1;
++
++ /* Make sure we don't run off the end of the rt volume. */
++ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
++ if (maxlen < minlen) {
++ *rtblock = NULLRTBLOCK;
++ return 0;
++ }
++
+ /*
+ * Try the exact allocation first.
+ */
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 26a6d58ca78cc..81c7ea83e8079 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -342,6 +342,7 @@ struct bpf_subprog_info {
+ u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u16 stack_depth; /* max. stack depth used by this function */
++ bool has_tail_call;
+ };
+
+ /* single container for all structs
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index c696c265f0193..b9df34326772c 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -55,6 +55,7 @@ struct oom_control {
+ };
+
+ extern struct mutex oom_lock;
++extern struct mutex oom_adj_mutex;
+
+ static inline void set_current_oom_origin(void)
+ {
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index 659045046468f..50c93ca0c3d6f 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -3,6 +3,7 @@
+ #define __LINUX_OVERFLOW_H
+
+ #include <linux/compiler.h>
++#include <linux/limits.h>
+
+ /*
+ * In the fallback code below, we need to compute the minimum and
+diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
+index 8679ccd722e89..3468794f83d23 100644
+--- a/include/linux/page_owner.h
++++ b/include/linux/page_owner.h
+@@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops;
+ extern void __reset_page_owner(struct page *page, unsigned int order);
+ extern void __set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask);
+-extern void __split_page_owner(struct page *page, unsigned int order);
++extern void __split_page_owner(struct page *page, unsigned int nr);
+ extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+ extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+ extern void __dump_page_owner(struct page *page);
+@@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page,
+ __set_page_owner(page, order, gfp_mask);
+ }
+
+-static inline void split_page_owner(struct page *page, unsigned int order)
++static inline void split_page_owner(struct page *page, unsigned int nr)
+ {
+ if (static_branch_unlikely(&page_owner_inited))
+- __split_page_owner(page, order);
++ __split_page_owner(page, nr);
+ }
+ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+ {
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index e92bd9b32f369..6a6a819c5b49b 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -423,6 +423,7 @@ struct pci_dev {
+ unsigned int is_probed:1; /* Device probing in progress */
+ unsigned int link_active_reporting:1;/* Device capable of reporting link active */
+ unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
++ unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ pci_dev_flags_t dev_flags;
+ atomic_t enable_cnt; /* pci_enable_device has been called */
+
+diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
+index f3eaf9ec00a1b..70078be166e3c 100644
+--- a/include/linux/platform_data/dma-dw.h
++++ b/include/linux/platform_data/dma-dw.h
+@@ -21,6 +21,7 @@
+ * @dst_id: dst request line
+ * @m_master: memory master for transfers on allocated channel
+ * @p_master: peripheral master for transfers on allocated channel
++ * @channels: mask of the channels permitted for allocation (zero value means any)
+ * @hs_polarity:set active low polarity of handshake interface
+ */
+ struct dw_dma_slave {
+@@ -29,6 +30,7 @@ struct dw_dma_slave {
+ u8 dst_id;
+ u8 m_master;
+ u8 p_master;
++ u8 channels;
+ bool hs_polarity;
+ };
+
+diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
+index ecdc6542070f1..dfd82eab29025 100644
+--- a/include/linux/sched/coredump.h
++++ b/include/linux/sched/coredump.h
+@@ -72,6 +72,7 @@ static inline int get_dumpable(struct mm_struct *mm)
+ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
+ #define MMF_OOM_VICTIM 25 /* mm is the oom victim */
+ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
++#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+
+ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 4b15cc1c224c6..0278d63c15274 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -439,12 +439,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
+ bool forwarding)
+ {
+ struct net *net = dev_net(dst->dev);
++ unsigned int mtu;
+
+ if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
+ ip_mtu_locked(dst) ||
+ !forwarding)
+ return dst_mtu(dst);
+
++ /* 'forwarding = true' case should always honour route mtu */
++ mtu = dst_metric_raw(dst, RTAX_MTU);
++ if (mtu)
++ return mtu;
++
+ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
+
+diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
+index 0d3920896d502..716db4a0fed89 100644
+--- a/include/net/netfilter/nf_log.h
++++ b/include/net/netfilter/nf_log.h
+@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+ unsigned int logflags);
+ void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ struct sock *sk);
++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
+ void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+ unsigned int hooknum, const struct sk_buff *skb,
+ const struct net_device *in,
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index a91b2af64ec47..8e94279af47df 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -95,10 +95,11 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
+ size_t length) {
+ return -EINVAL;
+ }
+-static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
+- unsigned long pgsz_bitmap,
+- unsigned long virt) {
+- return -EINVAL;
++static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
++ unsigned long pgsz_bitmap,
++ unsigned long virt)
++{
++ return 0;
+ }
+
+ #endif /* CONFIG_INFINIBAND_USER_MEM */
+diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
+index 731ac09ed2313..5b567b43e1b16 100644
+--- a/include/scsi/scsi_common.h
++++ b/include/scsi/scsi_common.h
+@@ -25,6 +25,13 @@ scsi_command_size(const unsigned char *cmnd)
+ scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]);
+ }
+
++static inline unsigned char
++scsi_command_control(const unsigned char *cmnd)
++{
++ return (cmnd[0] == VARIABLE_LENGTH_CMD) ?
++ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1];
++}
++
+ /* Returns a human-readable name for the device */
+ extern const char *scsi_device_type(unsigned type);
+
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 65c056ce91128..8341e2c489824 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -254,6 +254,7 @@ struct hda_codec {
+ unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
++ unsigned int forced_resume:1; /* forced resume for jack */
+
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
+diff --git a/include/trace/events/target.h b/include/trace/events/target.h
+index 914a872dd3435..e87a3716b0ac9 100644
+--- a/include/trace/events/target.h
++++ b/include/trace/events/target.h
+@@ -140,6 +140,7 @@ TRACE_EVENT(target_sequencer_start,
+ __field( unsigned int, opcode )
+ __field( unsigned int, data_length )
+ __field( unsigned int, task_attribute )
++ __field( unsigned char, control )
+ __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
+ __string( initiator, cmd->se_sess->se_node_acl->initiatorname )
+ ),
+@@ -149,6 +150,7 @@ TRACE_EVENT(target_sequencer_start,
+ __entry->opcode = cmd->t_task_cdb[0];
+ __entry->data_length = cmd->data_length;
+ __entry->task_attribute = cmd->sam_task_attr;
++ __entry->control = scsi_command_control(cmd->t_task_cdb);
+ memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE);
+ __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname);
+ ),
+@@ -158,9 +160,7 @@ TRACE_EVENT(target_sequencer_start,
+ show_opcode_name(__entry->opcode),
+ __entry->data_length, __print_hex(__entry->cdb, 16),
+ show_task_attribute_name(__entry->task_attribute),
+- scsi_command_size(__entry->cdb) <= 16 ?
+- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
+- __entry->cdb[1]
++ __entry->control
+ )
+ );
+
+@@ -175,6 +175,7 @@ TRACE_EVENT(target_cmd_complete,
+ __field( unsigned int, opcode )
+ __field( unsigned int, data_length )
+ __field( unsigned int, task_attribute )
++ __field( unsigned char, control )
+ __field( unsigned char, scsi_status )
+ __field( unsigned char, sense_length )
+ __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE )
+@@ -187,6 +188,7 @@ TRACE_EVENT(target_cmd_complete,
+ __entry->opcode = cmd->t_task_cdb[0];
+ __entry->data_length = cmd->data_length;
+ __entry->task_attribute = cmd->sam_task_attr;
++ __entry->control = scsi_command_control(cmd->t_task_cdb);
+ __entry->scsi_status = cmd->scsi_status;
+ __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ?
+ min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0;
+@@ -203,9 +205,7 @@ TRACE_EVENT(target_cmd_complete,
+ show_opcode_name(__entry->opcode),
+ __entry->data_length, __print_hex(__entry->cdb, 16),
+ show_task_attribute_name(__entry->task_attribute),
+- scsi_command_size(__entry->cdb) <= 16 ?
+- __entry->cdb[scsi_command_size(__entry->cdb) - 1] :
+- __entry->cdb[1]
++ __entry->control
+ )
+ );
+
+diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
+index bb7b271397a66..ceccd980ffcfe 100644
+--- a/include/uapi/linux/perf_event.h
++++ b/include/uapi/linux/perf_event.h
+@@ -1131,7 +1131,7 @@ union perf_mem_data_src {
+
+ #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
+ /* 1 free */
+-#define PERF_MEM_SNOOPX_SHIFT 37
++#define PERF_MEM_SNOOPX_SHIFT 38
+
+ /* locked instruction */
+ #define PERF_MEM_LOCK_NA 0x01 /* not available */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index ae27dd77a73cb..507474f79195f 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1160,6 +1160,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
+ for (i = 0; i < insn_cnt; i++) {
+ u8 code = insn[i].code;
+
++ if (code == (BPF_JMP | BPF_CALL) &&
++ insn[i].imm == BPF_FUNC_tail_call &&
++ insn[i].src_reg != BPF_PSEUDO_CALL)
++ subprog[cur_subprog].has_tail_call = true;
+ if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
+ goto next;
+ if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+@@ -2612,6 +2616,31 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
+ int ret_prog[MAX_CALL_FRAMES];
+
+ process_func:
++ /* protect against potential stack overflow that might happen when
++ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
++ * depth for such case down to 256 so that the worst case scenario
++ * would result in 8k stack size (32 which is tailcall limit * 256 =
++ * 8k).
++ *
++ * To get the idea what might happen, see an example:
++ * func1 -> sub rsp, 128
++ * subfunc1 -> sub rsp, 256
++ * tailcall1 -> add rsp, 256
++ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
++ * subfunc2 -> sub rsp, 64
++ * subfunc22 -> sub rsp, 128
++ * tailcall2 -> add rsp, 128
++ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
++ *
++ * tailcall will unwind the current stack frame but it will not get rid
++ * of caller's stack as shown on the example above.
++ */
++ if (idx && subprog[idx].has_tail_call && depth >= 256) {
++ verbose(env,
++ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
++ depth);
++ return -EACCES;
++ }
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 3a5184eb6977d..46821793637a1 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -679,12 +679,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ size_avail = sizeof(kdb_buffer) - len;
+ goto kdb_print_out;
+ }
+- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
++ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) {
+ /*
+ * This was a interactive search (using '/' at more
+- * prompt) and it has completed. Clear the flag.
++ * prompt) and it has completed. Replace the \0 with
++ * its original value to ensure multi-line strings
++ * are handled properly, and return to normal mode.
+ */
++ *cphold = replaced_byte;
+ kdb_grepping_flag = 0;
++ }
+ /*
+ * at this point the string is a full line and
+ * should be printed, up to the null.
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 594272569a80f..e3d5963d8c6f5 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1750,6 +1750,25 @@ static __always_inline void delayed_free_task(struct task_struct *tsk)
+ free_task(tsk);
+ }
+
++static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
++{
++ /* Skip if kernel thread */
++ if (!tsk->mm)
++ return;
++
++ /* Skip if spawning a thread or using vfork */
++ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
++ return;
++
++ /* We need to synchronize with __set_oom_adj */
++ mutex_lock(&oom_adj_mutex);
++ set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
++ /* Update the values in case they were changed after copy_signal */
++ tsk->signal->oom_score_adj = current->signal->oom_score_adj;
++ tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
++ mutex_unlock(&oom_adj_mutex);
++}
++
+ /*
+ * This creates a new process as a copy of the old one,
+ * but does not actually start it yet.
+@@ -2222,6 +2241,8 @@ static __latent_entropy struct task_struct *copy_process(
+ trace_task_newtask(p, clone_flags);
+ uprobe_copy_process(p, clone_flags);
+
++ copy_oom_score_adj(clone_flags, p);
++
+ return p;
+
+ bad_fork_cancel_cgroup:
+diff --git a/kernel/module.c b/kernel/module.c
+index 819c5d3b4c295..45513909b01d5 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -88,8 +88,9 @@ EXPORT_SYMBOL_GPL(module_mutex);
+ static LIST_HEAD(modules);
+
+ /* Work queue for freeing init sections in success case */
+-static struct work_struct init_free_wq;
+-static struct llist_head init_free_list;
++static void do_free_init(struct work_struct *w);
++static DECLARE_WORK(init_free_wq, do_free_init);
++static LLIST_HEAD(init_free_list);
+
+ #ifdef CONFIG_MODULES_TREE_LOOKUP
+
+@@ -3563,14 +3564,6 @@ static void do_free_init(struct work_struct *w)
+ }
+ }
+
+-static int __init modules_wq_init(void)
+-{
+- INIT_WORK(&init_free_wq, do_free_init);
+- init_llist_head(&init_free_list);
+- return 0;
+-}
+-module_init(modules_wq_init);
+-
+ /*
+ * This is where the real work happens.
+ *
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index b6c5895ced36b..69c4cd472def3 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -839,17 +839,6 @@ static int software_resume(void)
+
+ /* Check if the device is there */
+ swsusp_resume_device = name_to_dev_t(resume_file);
+-
+- /*
+- * name_to_dev_t is ineffective to verify parition if resume_file is in
+- * integer format. (e.g. major:minor)
+- */
+- if (isdigit(resume_file[0]) && resume_wait) {
+- int partno;
+- while (!get_gendisk(swsusp_resume_device, &partno))
+- msleep(10);
+- }
+-
+ if (!swsusp_resume_device) {
+ /*
+ * Some device discovery might still be in progress; we need
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 79ce22de44095..4511532b08b84 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -36,7 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
+
+ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
++#ifdef CONFIG_SCHED_DEBUG
+ /*
+ * Debugging: various feature bits
+ *
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b02a83ff40687..dddaf61378f62 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5936,7 +5936,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
+ /*
+ * Scan the local SMT mask for idle CPUs.
+ */
+-static int select_idle_smt(struct task_struct *p, int target)
++static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ int cpu, si_cpu = -1;
+
+@@ -5944,7 +5944,8 @@ static int select_idle_smt(struct task_struct *p, int target)
+ return -1;
+
+ for_each_cpu(cpu, cpu_smt_mask(target)) {
+- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++ if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
++ !cpumask_test_cpu(cpu, sched_domain_span(sd)))
+ continue;
+ if (available_idle_cpu(cpu))
+ return cpu;
+@@ -5962,7 +5963,7 @@ static inline int select_idle_core(struct task_struct *p, struct sched_domain *s
+ return -1;
+ }
+
+-static inline int select_idle_smt(struct task_struct *p, int target)
++static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ return -1;
+ }
+@@ -6072,7 +6073,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ if ((unsigned)i < nr_cpumask_bits)
+ return i;
+
+- i = select_idle_smt(p, target);
++ i = select_idle_smt(p, sd, target);
+ if ((unsigned)i < nr_cpumask_bits)
+ return i;
+
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 9f2a9e34a78d5..3e7590813844f 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1568,7 +1568,7 @@ enum {
+
+ #undef SCHED_FEAT
+
+-#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
++#ifdef CONFIG_SCHED_DEBUG
+
+ /*
+ * To support run-time toggling of sched features, all the translation units
+@@ -1576,6 +1576,7 @@ enum {
+ */
+ extern const_debug unsigned int sysctl_sched_features;
+
++#ifdef CONFIG_JUMP_LABEL
+ #define SCHED_FEAT(name, enabled) \
+ static __always_inline bool static_branch_##name(struct static_key *key) \
+ { \
+@@ -1588,7 +1589,13 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
+ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+ #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+
+-#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
++#else /* !CONFIG_JUMP_LABEL */
++
++#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
++
++#endif /* CONFIG_JUMP_LABEL */
++
++#else /* !SCHED_DEBUG */
+
+ /*
+ * Each translation unit has its own copy of sysctl_sched_features to allow
+@@ -1604,7 +1611,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
+
+ #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+
+-#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
++#endif /* SCHED_DEBUG */
+
+ extern struct static_key_false sched_numa_balancing;
+ extern struct static_key_false sched_schedstats;
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 4a20455d1f61e..bf60ef26a45c2 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -331,7 +331,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
+ return crc;
+ }
+
+-#if CRC_LE_BITS == 1
++#if CRC_BE_BITS == 1
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
+diff --git a/lib/idr.c b/lib/idr.c
+index c2cf2c52bbde5..4d2eef0259d2c 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -470,6 +470,7 @@ alloc:
+ goto retry;
+ nospc:
+ xas_unlock_irqrestore(&xas, flags);
++ kfree(alloc);
+ return -ENOSPC;
+ }
+ EXPORT_SYMBOL(ida_alloc_range);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 51b2cb5aa5030..db542b4948838 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -847,10 +847,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL_GPL(replace_page_cache_page);
+
+-static int __add_to_page_cache_locked(struct page *page,
+- struct address_space *mapping,
+- pgoff_t offset, gfp_t gfp_mask,
+- void **shadowp)
++noinline int __add_to_page_cache_locked(struct page *page,
++ struct address_space *mapping,
++ pgoff_t offset, gfp_t gfp_mask,
++ void **shadowp)
+ {
+ XA_STATE(xas, &mapping->i_pages, offset);
+ int huge = PageHuge(page);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 873de55d93fb2..9295d9d70681e 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2569,7 +2569,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+
+ ClearPageCompound(head);
+
+- split_page_owner(head, HPAGE_PMD_ORDER);
++ split_page_owner(head, HPAGE_PMD_NR);
+
+ /* See comment in __split_huge_page_tail() */
+ if (PageAnon(head)) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ca1632850fb76..edf98f8588eeb 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5398,7 +5398,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ struct page *page = NULL;
+ swp_entry_t ent = pte_to_swp_entry(ptent);
+
+- if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
++ if (!(mc.flags & MOVE_ANON))
+ return NULL;
+
+ /*
+@@ -5417,6 +5417,9 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ return page;
+ }
+
++ if (non_swap_entry(ent))
++ return NULL;
++
+ /*
+ * Because lookup_swap_cache() updates some statistics counter,
+ * we call find_get_page() with swapper_space directly.
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index d58c481b3df83..212e718743018 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -63,6 +63,8 @@ int sysctl_oom_dump_tasks = 1;
+ * and mark_oom_victim
+ */
+ DEFINE_MUTEX(oom_lock);
++/* Serializes oom_score_adj and oom_score_adj_min updates */
++DEFINE_MUTEX(oom_adj_mutex);
+
+ static inline bool is_memcg_oom(struct oom_control *oc)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index aff0bb4629bdf..c20e664866c33 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3130,7 +3130,7 @@ void split_page(struct page *page, unsigned int order)
+
+ for (i = 1; i < (1 << order); i++)
+ set_page_refcounted(page + i);
+- split_page_owner(page, order);
++ split_page_owner(page, 1 << order);
+ }
+ EXPORT_SYMBOL_GPL(split_page);
+
+@@ -3385,7 +3385,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+
+ #endif /* CONFIG_FAIL_PAGE_ALLOC */
+
+-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
++noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+ {
+ return __should_fail_alloc_page(gfp_mask, order);
+ }
+diff --git a/mm/page_owner.c b/mm/page_owner.c
+index 18ecde9f45b24..83d08943bcdee 100644
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -204,7 +204,7 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
+ page_owner->last_migrate_reason = reason;
+ }
+
+-void __split_page_owner(struct page *page, unsigned int order)
++void __split_page_owner(struct page *page, unsigned int nr)
+ {
+ int i;
+ struct page_ext *page_ext = lookup_page_ext(page);
+@@ -213,7 +213,7 @@ void __split_page_owner(struct page *page, unsigned int order)
+ if (unlikely(!page_ext))
+ return;
+
+- for (i = 0; i < (1 << order); i++) {
++ for (i = 0; i < nr; i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->order = 0;
+ page_ext = page_ext_next(page_ext);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index cf62bdb7b3045..ff83ffe7a9108 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3284,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ error = inode_drain_writes(inode);
+ if (error) {
+ inode->i_flags &= ~S_SWAPFILE;
+- goto bad_swap_unlock_inode;
++ goto free_swap_address_space;
+ }
+
+ mutex_lock(&swapon_mutex);
+@@ -3309,6 +3309,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+
+ error = 0;
+ goto out;
++free_swap_address_space:
++ exit_swap_address_space(p->type);
+ bad_swap_unlock_inode:
+ inode_unlock(inode);
+ bad_swap:
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 40e96a610e2b4..8648c5211ebe6 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1344,8 +1344,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+
+ parent = bt_sk(sk)->parent;
+
+- sock_set_flag(sk, SOCK_ZAPPED);
+-
+ switch (chan->state) {
+ case BT_OPEN:
+ case BT_BOUND:
+@@ -1372,8 +1370,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
+
+ break;
+ }
+-
+ release_sock(sk);
++
++ /* Only zap after cleanup to avoid use after free race */
++ sock_set_flag(sk, SOCK_ZAPPED);
++
+ }
+
+ static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
+index 12a4f4d936810..3fda71a8579d1 100644
+--- a/net/bridge/netfilter/ebt_dnat.c
++++ b/net/bridge/netfilter/ebt_dnat.c
+@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ const struct ebt_nat_info *info = par->targinfo;
+
+- if (skb_ensure_writable(skb, ETH_ALEN))
++ if (skb_ensure_writable(skb, 0))
+ return EBT_DROP;
+
+ ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
+diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
+index 0cad62a4052b9..307790562b492 100644
+--- a/net/bridge/netfilter/ebt_redirect.c
++++ b/net/bridge/netfilter/ebt_redirect.c
+@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ const struct ebt_redirect_info *info = par->targinfo;
+
+- if (skb_ensure_writable(skb, ETH_ALEN))
++ if (skb_ensure_writable(skb, 0))
+ return EBT_DROP;
+
+ if (xt_hooknum(par) != NF_BR_BROUTING)
+diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
+index 27443bf229a3b..7dfbcdfc30e5d 100644
+--- a/net/bridge/netfilter/ebt_snat.c
++++ b/net/bridge/netfilter/ebt_snat.c
+@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ {
+ const struct ebt_nat_info *info = par->targinfo;
+
+- if (skb_ensure_writable(skb, ETH_ALEN * 2))
++ if (skb_ensure_writable(skb, 0))
+ return EBT_DROP;
+
+ ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index a8dd956b5e8e1..916fdf2464bc2 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
+ skb->dev = priv->ndev;
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+ /* reserve CAN header */
+ skb_reserve(skb, offsetof(struct can_frame, data));
+
+@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
+ skb->dev = priv->ndev;
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
++ can_skb_prv(skb)->skbcnt = 0;
+ skcb = j1939_skb_to_cb(skb);
+ memcpy(skcb, rel_skcb, sizeof(*skcb));
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index c441f9961e917..b040b7bf28582 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4270,7 +4270,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
+ cmpxchg(&sk->sk_pacing_status,
+ SK_PACING_NONE,
+ SK_PACING_NEEDED);
+- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
++ sk->sk_max_pacing_rate = (val == ~0U) ?
++ ~0UL : (unsigned int)val;
+ sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+ sk->sk_max_pacing_rate);
+ break;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 919f1a1739e90..1b765e62658f0 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -928,8 +928,6 @@ set_rcvbuf:
+ break;
+
+ case SO_TIMESTAMPING_NEW:
+- sock_set_flag(sk, SOCK_TSTAMP_NEW);
+- /* fall through */
+ case SO_TIMESTAMPING_OLD:
+ if (val & ~SOF_TIMESTAMPING_MASK) {
+ ret = -EINVAL;
+@@ -958,16 +956,14 @@ set_rcvbuf:
+ }
+
+ sk->sk_tsflags = val;
++ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
++
+ if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
+ sock_enable_timestamp(sk,
+ SOCK_TIMESTAMPING_RX_SOFTWARE);
+- else {
+- if (optname == SO_TIMESTAMPING_NEW)
+- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
+-
++ else
+ sock_disable_timestamp(sk,
+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
+- }
+ break;
+
+ case SO_RCVLOWAT:
+@@ -1110,7 +1106,7 @@ set_rcvbuf:
+
+ case SO_MAX_PACING_RATE:
+ {
+- unsigned long ulval = (val == ~0U) ? ~0UL : val;
++ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
+
+ if (sizeof(ulval) != sizeof(val) &&
+ optlen >= sizeof(ulval) &&
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 73f46cb5e51da..d00533aea1f05 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -239,7 +239,7 @@ static struct {
+ /**
+ * icmp_global_allow - Are we allowed to send one more ICMP message ?
+ *
+- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
++ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
+ * Returns false if we reached the limit and can not send another packet.
+ * Note: called with BH disabled
+ */
+@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
+ }
+ credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
+ if (credit) {
+- credit--;
++ /* We want to use a credit of one in average, but need to randomize
++ * it for security reasons.
++ */
++ credit = max_t(int, credit - prandom_u32_max(3), 0);
+ rc = true;
+ }
+ WRITE_ONCE(icmp_global.credit, credit);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 85ba1453ba5ca..fedad3a3e61b8 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -603,9 +603,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
+ }
+
+ if (dev->header_ops) {
+- /* Need space for new headers */
+- if (skb_cow_head(skb, dev->needed_headroom -
+- (tunnel->hlen + sizeof(struct iphdr))))
++ if (skb_cow_head(skb, 0))
+ goto free_skb;
+
+ tnl_params = (const struct iphdr *)skb->data;
+@@ -723,7 +721,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
+ len = tunnel->tun_hlen - len;
+ tunnel->hlen = tunnel->hlen + len;
+
+- dev->needed_headroom = dev->needed_headroom + len;
++ if (dev->header_ops)
++ dev->hard_header_len += len;
++ else
++ dev->needed_headroom += len;
++
+ if (set_mtu)
+ dev->mtu = max_t(int, dev->mtu - len, 68);
+
+@@ -926,6 +928,7 @@ static void __gre_tunnel_init(struct net_device *dev)
+ tunnel->parms.iph.protocol = IPPROTO_GRE;
+
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
++ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
+
+ dev->features |= GRE_FEATURES;
+ dev->hw_features |= GRE_FEATURES;
+@@ -969,10 +972,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
+ return -EINVAL;
+ dev->flags = IFF_BROADCAST;
+ dev->header_ops = &ipgre_header_ops;
++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
++ dev->needed_headroom = 0;
+ }
+ #endif
+ } else if (!tunnel->collect_md) {
+ dev->header_ops = &ipgre_header_ops;
++ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
++ dev->needed_headroom = 0;
+ }
+
+ return ip_tunnel_init(dev);
+diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
+index 7a83f881efa9e..136030ad2e546 100644
+--- a/net/ipv4/netfilter/nf_log_arp.c
++++ b/net/ipv4/netfilter/nf_log_arp.c
+@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb, unsigned int nhoff)
+ {
+- const struct arphdr *ah;
+- struct arphdr _arph;
+ const struct arppayload *ap;
+ struct arppayload _arpp;
++ const struct arphdr *ah;
++ unsigned int logflags;
++ struct arphdr _arph;
+
+ ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
+ if (ah == NULL) {
+ nf_log_buf_add(m, "TRUNCATED");
+ return;
+ }
++
++ if (info->type == NF_LOG_TYPE_LOG)
++ logflags = info->u.log.logflags;
++ else
++ logflags = NF_LOG_DEFAULT_MASK;
++
++ if (logflags & NF_LOG_MACDECODE) {
++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++ nf_log_dump_vlan(m, skb);
++ nf_log_buf_add(m, "MACPROTO=%04x ",
++ ntohs(eth_hdr(skb)->h_proto));
++ }
++
+ nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
+ ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
+
+diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
+index 4b2d49cc9f1a1..cb288ffbcfde2 100644
+--- a/net/ipv4/netfilter/nf_log_ipv4.c
++++ b/net/ipv4/netfilter/nf_log_ipv4.c
+@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++ nf_log_dump_vlan(m, skb);
++ nf_log_buf_add(m, "MACPROTO=%04x ",
+ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index afa2c5049845f..ea32b113089d3 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -763,7 +763,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
+ remove_nh_grp_entry(net, nhge, nlinfo);
+
+ /* make sure all see the newly published array before releasing rtnl */
+- synchronize_rcu();
++ synchronize_net();
+ }
+
+ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 7a5f64cf1fdd2..a293d4968d1eb 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2728,10 +2728,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
+ if (IS_ERR(rt))
+ return rt;
+
+- if (flp4->flowi4_proto)
++ if (flp4->flowi4_proto) {
++ flp4->flowi4_oif = rt->dst.dev->ifindex;
+ rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
+ flowi4_to_flowi(flp4),
+ sk, 0);
++ }
+
+ return rt;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index ab5358281000e..62f8ba4048180 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5696,6 +5696,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
+ tcp_data_snd_check(sk);
+ if (!inet_csk_ack_scheduled(sk))
+ goto no_ack;
++ } else {
++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
+ }
+
+ __tcp_ack_snd_check(sk, 0);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 9ca6c32065ec6..0646fce31b67a 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2519,8 +2519,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+ iter->skip = *pos;
+
+ if (iter->tbl) {
++ loff_t p = 0;
++
+ ipv6_route_seq_setup_walk(iter, net);
+- return ipv6_route_seq_next(seq, NULL, pos);
++ return ipv6_route_seq_next(seq, NULL, &p);
+ } else {
+ return NULL;
+ }
+diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
+index 22b80db6d8826..5b40258d3a5e9 100644
+--- a/net/ipv6/netfilter/nf_log_ipv6.c
++++ b/net/ipv6/netfilter/nf_log_ipv6.c
+@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+- ntohs(eth_hdr(skb)->h_proto));
++ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
++ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
++ nf_log_dump_vlan(m, skb);
++ nf_log_buf_add(m, "MACPROTO=%04x ",
++ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+ break;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a9dda5c228f60..fa293feef935d 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -698,7 +698,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
+ u16 brate;
+
+ sband = ieee80211_get_sband(sta->sdata);
+- if (sband) {
++ WARN_ON_ONCE(sband && !sband->bitrates);
++ if (sband && sband->bitrates) {
+ brate = sband->bitrates[rate->idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+ }
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index f5d96107af6de..4f14d8a06915a 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2083,6 +2083,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
+ int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
+
+ sband = local->hw.wiphy->bands[band];
++
++ if (WARN_ON_ONCE(!sband->bitrates))
++ break;
++
+ brate = sband->bitrates[rate_idx].bitrate;
+ if (rinfo->bw == RATE_INFO_BW_5)
+ shift = 2;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 3cccc88ef817b..99168af0c28d9 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2465,6 +2465,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ /* Set timeout values for (tcp tcpfin udp) */
+ ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
+ goto out_unlock;
++ } else if (!len) {
++ /* No more commands with len == 0 below */
++ ret = -EINVAL;
++ goto out_unlock;
+ }
+
+ usvc_compat = (struct ip_vs_service_user *)arg;
+@@ -2541,9 +2545,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ break;
+ case IP_VS_SO_SET_DELDEST:
+ ret = ip_vs_del_dest(svc, &udest);
+- break;
+- default:
+- ret = -EINVAL;
+ }
+
+ out_unlock:
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index c62a131a60948..cefc39878b1a4 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -615,6 +615,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
+ if (ret == NF_ACCEPT) {
+ nf_reset_ct(skb);
+ skb_forward_csum(skb);
++ if (skb->dev)
++ skb->tstamp = 0;
+ }
+ return ret;
+ }
+@@ -655,6 +657,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
+
+ if (!local) {
+ skb_forward_csum(skb);
++ if (skb->dev)
++ skb->tstamp = 0;
+ NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+ NULL, skb_dst(skb)->dev, dst_output);
+ } else
+@@ -675,6 +679,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
+ if (!local) {
+ ip_vs_drop_early_demux_sk(skb);
+ skb_forward_csum(skb);
++ if (skb->dev)
++ skb->tstamp = 0;
+ NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+ NULL, skb_dst(skb)->dev, dst_output);
+ } else
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 1926fd56df56a..848b137151c26 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ swin = win << sender->td_scale;
+ sender->td_maxwin = (swin == 0 ? 1 : swin);
+ sender->td_maxend = end + sender->td_maxwin;
+- /*
+- * We haven't seen traffic in the other direction yet
+- * but we have to tweak window tracking to pass III
+- * and IV until that happens.
+- */
+- if (receiver->td_maxwin == 0)
++ if (receiver->td_maxwin == 0) {
++ /* We haven't seen traffic in the other
++ * direction yet but we have to tweak window
++ * tracking to pass III and IV until that
++ * happens.
++ */
+ receiver->td_end = receiver->td_maxend = sack;
++ } else if (sack == receiver->td_end + 1) {
++ /* Likely a reply to a keepalive.
++ * Needed for III.
++ */
++ receiver->td_end++;
++ }
++
+ }
+ } else if (((state->state == TCP_CONNTRACK_SYN_SENT
+ && dir == IP_CT_DIR_ORIGINAL)
+diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
+index f108a76925dd8..ec6e7d6860163 100644
+--- a/net/netfilter/nf_dup_netdev.c
++++ b/net/netfilter/nf_dup_netdev.c
+@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
+ skb_push(skb, skb->mac_len);
+
+ skb->dev = dev;
++ skb->tstamp = 0;
+ dev_queue_xmit(skb);
+ }
+
+diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
+index ae5628ddbe6d7..fd7c5f0f5c25b 100644
+--- a/net/netfilter/nf_log_common.c
++++ b/net/netfilter/nf_log_common.c
+@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+ }
+ EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+
++void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
++{
++ u16 vid;
++
++ if (!skb_vlan_tag_present(skb))
++ return;
++
++ vid = skb_vlan_tag_get(skb);
++ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
++}
++EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
++
+ /* bridge and netdev logging families share this code. */
+ void nf_log_l2packet(struct net *net, u_int8_t pf,
+ __be16 protocol,
+diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
+index 3087e23297dbf..b77985986b24e 100644
+--- a/net/netfilter/nft_fwd_netdev.c
++++ b/net/netfilter/nft_fwd_netdev.c
+@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
+ return;
+
+ skb->dev = dev;
++ skb->tstamp = 0;
+ neigh_xmit(neigh_table, dev, addr, skb);
+ out:
+ regs->verdict.code = verdict;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 1b261375722e3..4170acc2dc282 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1225,7 +1225,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
+ u32 idx;
+ char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 4a5ef2adb2e57..1dc642b11443c 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -706,13 +706,6 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
+ return ret;
+ }
+
+-static int tcf_action_destroy_1(struct tc_action *a, int bind)
+-{
+- struct tc_action *actions[] = { a, NULL };
+-
+- return tcf_action_destroy(actions, bind);
+-}
+-
+ static int tcf_action_put(struct tc_action *p)
+ {
+ return __tcf_action_put(p, false);
+@@ -932,13 +925,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ if (err < 0)
+ goto err_mod;
+
+- if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+- !rcu_access_pointer(a->goto_chain)) {
+- tcf_action_destroy_1(a, bind);
+- NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
+- return ERR_PTR(-EINVAL);
+- }
+-
+ if (!name && tb[TCA_ACT_COOKIE])
+ tcf_set_action_cookie(&a->act_cookie, cookie);
+
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index bdaa04a9a7fa4..a5a2bf01eb9bc 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -315,7 +315,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+
+ metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
+ 0, flags,
+- key_id, 0);
++ key_id, opts_len);
+ } else {
+ NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
+ ret = -EINVAL;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index 0c5fcb8ed404d..aeea67f908415 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -795,7 +795,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
+ return buf_desc;
+ }
+
+-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
++#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+
+ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
+ bool is_dmb, int bufsize)
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 3645cd241d3ea..cf4d6d7e72822 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1095,9 +1095,9 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ struct gssp_in_token *in_token)
+ {
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+- unsigned int page_base, length;
+- int pages, i, res;
+- size_t inlen;
++ unsigned int length, pgto_offs, pgfrom_offs;
++ int pages, i, res, pgto, pgfrom;
++ size_t inlen, to_offs, from_offs;
+
+ res = gss_read_common_verf(gc, argv, authp, in_handle);
+ if (res)
+@@ -1125,17 +1125,24 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
+ inlen -= length;
+
+- i = 1;
+- page_base = rqstp->rq_arg.page_base;
++ to_offs = length;
++ from_offs = rqstp->rq_arg.page_base;
+ while (inlen) {
+- length = min_t(unsigned int, inlen, PAGE_SIZE);
+- memcpy(page_address(in_token->pages[i]),
+- page_address(rqstp->rq_arg.pages[i]) + page_base,
++ pgto = to_offs >> PAGE_SHIFT;
++ pgfrom = from_offs >> PAGE_SHIFT;
++ pgto_offs = to_offs & ~PAGE_MASK;
++ pgfrom_offs = from_offs & ~PAGE_MASK;
++
++ length = min_t(unsigned int, inlen,
++ min_t(unsigned int, PAGE_SIZE - pgto_offs,
++ PAGE_SIZE - pgfrom_offs));
++ memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
++ page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
+ length);
+
++ to_offs += length;
++ from_offs += length;
+ inlen -= length;
+- page_base = 0;
+- i++;
+ }
+ return 0;
+ }
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 217106c66a13c..25e8922c10b28 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -609,10 +609,11 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+ while (remaining) {
+ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+- memcpy(dst, page_address(*ppages), len);
++ memcpy(dst, page_address(*ppages) + pageoff, len);
+ remaining -= len;
+ dst += len;
+ pageoff = 0;
++ ppages++;
+ }
+ }
+
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index ee4b2261e7957..b0ed3c944b2d1 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ if (fragid == FIRST_FRAGMENT) {
+ if (unlikely(head))
+ goto err;
+- frag = skb_unshare(frag, GFP_ATOMIC);
++ if (skb_cloned(frag))
++ frag = skb_copy(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 25fca390cdcf5..933a3187d3bf2 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -405,14 +405,14 @@ static int tls_push_data(struct sock *sk,
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+ struct tls_record_info *record = ctx->open_record;
+ int tls_push_record_flags;
+ struct page_frag *pfrag;
+ size_t orig_size = size;
+ u32 max_open_record_len;
+- int copy, rc = 0;
++ bool more = false;
+ bool done = false;
++ int copy, rc = 0;
+ long timeo;
+
+ if (flags &
+@@ -480,9 +480,8 @@ handle_error:
+ if (!size) {
+ last_record:
+ tls_push_record_flags = flags;
+- if (more) {
+- tls_ctx->pending_open_record_frags =
+- !!record->num_frags;
++ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
++ more = true;
+ break;
+ }
+
+@@ -514,6 +513,8 @@ last_record:
+ }
+ } while (!done);
+
++ tls_ctx->pending_open_record_frags = more;
++
+ if (orig_size - size > 0)
+ rc = orig_size - size;
+
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 7bc4f37655237..672b70730e898 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2227,7 +2227,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
+ * case we'll continue with more data in the next round,
+ * but break unconditionally so unsplit data stops here.
+ */
+- state->split_start++;
++ if (state->split)
++ state->split_start++;
++ else
++ state->split_start = 0;
+ break;
+ case 9:
+ if (rdev->wiphy.extended_capabilities &&
+@@ -4496,16 +4499,14 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
+ if (err)
+ return err;
+
+- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
+- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
+- return -EINVAL;
+-
+- he_obss_pd->min_offset =
+- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
+- he_obss_pd->max_offset =
+- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
++ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
++ he_obss_pd->min_offset =
++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
++ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
++ he_obss_pd->max_offset =
++ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
+
+- if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
++ if (he_obss_pd->min_offset > he_obss_pd->max_offset)
+ return -EINVAL;
+
+ he_obss_pd->enable = true;
+diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
+index a11bf6c5b53b4..cd3f16a6f5caf 100644
+--- a/samples/mic/mpssd/mpssd.c
++++ b/samples/mic/mpssd/mpssd.c
+@@ -403,9 +403,9 @@ mic_virtio_copy(struct mic_info *mic, int fd,
+
+ static inline unsigned _vring_size(unsigned int num, unsigned long align)
+ {
+- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
++ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num)
+ + align - 1) & ~(align - 1))
+- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4);
+ }
+
+ /*
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index d5ad7b2539c75..d86825261b515 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -688,6 +688,8 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ /* now accumulate with current aggregate */
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
++ if (rc != 0)
++ return rc;
+ }
+ if (!rc)
+ crypto_shash_final(shash, digest);
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index c8b9c0b315d8f..250a92b187265 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -174,9 +174,12 @@ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (snd_BUG_ON(!dp))
+ return -ENXIO;
+
+- mutex_lock(&register_mutex);
++ if (cmd != SNDCTL_SEQ_SYNC &&
++ mutex_lock_interruptible(&register_mutex))
++ return -ERESTARTSYS;
+ rc = snd_seq_oss_ioctl(dp, cmd, arg);
+- mutex_unlock(&register_mutex);
++ if (cmd != SNDCTL_SEQ_SYNC)
++ mutex_unlock(&register_mutex);
+ return rc;
+ }
+
+diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
+index 45b740f44c459..c362eb38ab906 100644
+--- a/sound/firewire/bebob/bebob_hwdep.c
++++ b/sound/firewire/bebob/bebob_hwdep.c
+@@ -36,12 +36,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ }
+
+ memset(&event, 0, sizeof(event));
++ count = min_t(long, count, sizeof(event.lock_status));
+ if (bebob->dev_lock_changed) {
+ event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ event.lock_status.status = (bebob->dev_lock_count > 0);
+ bebob->dev_lock_changed = false;
+-
+- count = min_t(long, count, sizeof(event.lock_status));
+ }
+
+ spin_unlock_irq(&bebob->lock);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 590ea262f2e20..9a1968932b783 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1001,12 +1001,14 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
+ azx_init_pci(chip);
+ hda_intel_init_chip(chip, true);
+
+- if (status && from_rt) {
+- list_for_each_codec(codec, &chip->bus)
+- if (!codec->relaxed_resume &&
+- (status & (1 << codec->addr)))
+- schedule_delayed_work(&codec->jackpoll_work,
+- codec->jackpoll_interval);
++ if (from_rt) {
++ list_for_each_codec(codec, &chip->bus) {
++ if (codec->relaxed_resume)
++ continue;
++
++ if (codec->forced_resume || (status & (1 << codec->addr)))
++ pm_request_resume(hda_codec_dev(codec));
++ }
+ }
+
+ /* power down again for link-controlled chips */
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 6aa39339db0ab..459aff6c10bc5 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1065,6 +1065,7 @@ enum {
+ QUIRK_R3DI,
+ QUIRK_R3D,
+ QUIRK_AE5,
++ QUIRK_AE7,
+ };
+
+ #ifdef CONFIG_PCI
+@@ -1184,6 +1185,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
++ SND_PCI_QUIRK(0x1102, 0x0081, "Sound Blaster AE-7", QUIRK_AE7),
+ {}
+ };
+
+@@ -4674,6 +4676,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ tmp = FLOAT_THREE;
+ break;
++ case QUIRK_AE7:
++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
++ tmp = FLOAT_THREE;
++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
++ SR_96_000);
++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
++ SR_96_000);
++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
++ break;
+ default:
+ tmp = FLOAT_ONE;
+ break;
+@@ -4719,6 +4730,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ break;
++ case QUIRK_AE7:
++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
++ chipio_set_conn_rate(codec, MEM_CONNID_MICIN2,
++ SR_96_000);
++ chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2,
++ SR_96_000);
++ dspio_set_uint_param(codec, 0x80, 0x01, FLOAT_ZERO);
++ break;
+ default:
+ break;
+ }
+@@ -4728,7 +4747,10 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
+ chipio_set_conn_rate(codec, 0x0F, SR_96_000);
+
+- tmp = FLOAT_ZERO;
++ if (ca0132_quirk(spec) == QUIRK_AE7)
++ tmp = FLOAT_THREE;
++ else
++ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x80, 0x00, tmp);
+
+ switch (ca0132_quirk(spec)) {
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 51798632d334c..df4771b9eff24 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2001,22 +2001,25 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ int pinctl;
+ int err = 0;
+
++ mutex_lock(&spec->pcm_lock);
+ if (hinfo->nid) {
+ pcm_idx = hinfo_to_pcm_index(codec, hinfo);
+- if (snd_BUG_ON(pcm_idx < 0))
+- return -EINVAL;
++ if (snd_BUG_ON(pcm_idx < 0)) {
++ err = -EINVAL;
++ goto unlock;
++ }
+ cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
+- if (snd_BUG_ON(cvt_idx < 0))
+- return -EINVAL;
++ if (snd_BUG_ON(cvt_idx < 0)) {
++ err = -EINVAL;
++ goto unlock;
++ }
+ per_cvt = get_cvt(spec, cvt_idx);
+-
+ snd_BUG_ON(!per_cvt->assigned);
+ per_cvt->assigned = 0;
+ hinfo->nid = 0;
+
+ azx_stream(get_azx_dev(substream))->stripe = 0;
+
+- mutex_lock(&spec->pcm_lock);
+ snd_hda_spdif_ctls_unassign(codec, pcm_idx);
+ clear_bit(pcm_idx, &spec->pcm_in_use);
+ pin_idx = hinfo_to_pin_index(codec, hinfo);
+@@ -2044,10 +2047,11 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ per_pin->setup = false;
+ per_pin->channels = 0;
+ mutex_unlock(&per_pin->lock);
+- unlock:
+- mutex_unlock(&spec->pcm_lock);
+ }
+
++unlock:
++ mutex_unlock(&spec->pcm_lock);
++
+ return err;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4dfd714f718b8..7a24e9f0d2fe7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1141,6 +1141,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
+ codec->single_adc_amp = 1;
+ /* FIXME: do we need this for all Realtek codec models? */
+ codec->spdif_status_reset = 1;
++ codec->forced_resume = 1;
+ codec->patch_ops = alc_patch_ops;
+
+ err = alc_codec_rename_from_preset(codec);
+@@ -1920,6 +1921,8 @@ enum {
+ ALC1220_FIXUP_CLEVO_P950,
+ ALC1220_FIXUP_CLEVO_PB51ED,
+ ALC1220_FIXUP_CLEVO_PB51ED_PINS,
++ ALC887_FIXUP_ASUS_AUDIO,
++ ALC887_FIXUP_ASUS_HMIC,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2132,6 +2135,31 @@ static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+ }
+
++static void alc887_asus_hp_automute_hook(struct hda_codec *codec,
++ struct hda_jack_callback *jack)
++{
++ struct alc_spec *spec = codec->spec;
++ unsigned int vref;
++
++ snd_hda_gen_hp_automute(codec, jack);
++
++ if (spec->gen.hp_jack_present)
++ vref = AC_PINCTL_VREF_80;
++ else
++ vref = AC_PINCTL_VREF_HIZ;
++ snd_hda_set_pin_ctl(codec, 0x19, PIN_HP | vref);
++}
++
++static void alc887_fixup_asus_jack(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++ if (action != HDA_FIXUP_ACT_PROBE)
++ return;
++ snd_hda_set_pin_ctl_cache(codec, 0x1b, PIN_HP);
++ spec->gen.hp_automute_hook = alc887_asus_hp_automute_hook;
++}
++
+ static const struct hda_fixup alc882_fixups[] = {
+ [ALC882_FIXUP_ABIT_AW9D_MAX] = {
+ .type = HDA_FIXUP_PINS,
+@@ -2389,6 +2417,20 @@ static const struct hda_fixup alc882_fixups[] = {
+ .chained = true,
+ .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
+ },
++ [ALC887_FIXUP_ASUS_AUDIO] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x15, 0x02a14150 }, /* use as headset mic, without its own jack detect */
++ { 0x19, 0x22219420 },
++ {}
++ },
++ },
++ [ALC887_FIXUP_ASUS_HMIC] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc887_fixup_asus_jack,
++ .chained = true,
++ .chain_id = ALC887_FIXUP_ASUS_AUDIO,
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2422,6 +2464,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
+ SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
++ SND_PCI_QUIRK(0x1043, 0x2390, "Asus D700SA", ALC887_FIXUP_ASUS_HMIC),
+ SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+@@ -6215,6 +6258,7 @@ enum {
+ ALC269_FIXUP_LEMOTE_A190X,
+ ALC256_FIXUP_INTEL_NUC8_RUGGED,
+ ALC255_FIXUP_XIAOMI_HEADSET_MIC,
++ ALC274_FIXUP_HP_MIC,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7594,6 +7638,14 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC289_FIXUP_ASUS_GA401
+ },
++ [ALC274_FIXUP_HP_MIC] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7745,6 +7797,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
++ SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC),
++ SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -8070,6 +8124,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
++ {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+@@ -9633,6 +9688,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+ SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
+index 68165de1c8dea..7a1ffbaf48be5 100644
+--- a/sound/soc/codecs/tlv320aic32x4.c
++++ b/sound/soc/codecs/tlv320aic32x4.c
+@@ -662,7 +662,7 @@ static int aic32x4_set_processing_blocks(struct snd_soc_component *component,
+ }
+
+ static int aic32x4_setup_clocks(struct snd_soc_component *component,
+- unsigned int sample_rate)
++ unsigned int sample_rate, unsigned int channels)
+ {
+ u8 aosr;
+ u16 dosr;
+@@ -750,7 +750,9 @@ static int aic32x4_setup_clocks(struct snd_soc_component *component,
+ dosr);
+
+ clk_set_rate(clocks[5].clk,
+- sample_rate * 32);
++ sample_rate * 32 *
++ channels);
++
+ return 0;
+ }
+ }
+@@ -772,7 +774,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
+ u8 iface1_reg = 0;
+ u8 dacsetup_reg = 0;
+
+- aic32x4_setup_clocks(component, params_rate(params));
++ aic32x4_setup_clocks(component, params_rate(params),
++ params_channels(params));
+
+ switch (params_width(params)) {
+ case 16:
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 914b75c23d1bf..027259695551c 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -694,7 +694,7 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
+ return 0;
+ }
+
+-static struct snd_soc_dai_driver fsl_sai_dai = {
++static struct snd_soc_dai_driver fsl_sai_dai_template = {
+ .probe = fsl_sai_dai_probe,
+ .playback = {
+ .stream_name = "CPU-Playback",
+@@ -965,12 +965,15 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ memcpy(&sai->cpu_dai_drv, &fsl_sai_dai_template,
++ sizeof(fsl_sai_dai_template));
++
+ /* Sync Tx with Rx as default by following old DT binding */
+ sai->synchronous[RX] = true;
+ sai->synchronous[TX] = false;
+- fsl_sai_dai.symmetric_rates = 1;
+- fsl_sai_dai.symmetric_channels = 1;
+- fsl_sai_dai.symmetric_samplebits = 1;
++ sai->cpu_dai_drv.symmetric_rates = 1;
++ sai->cpu_dai_drv.symmetric_channels = 1;
++ sai->cpu_dai_drv.symmetric_samplebits = 1;
+
+ if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
+ of_find_property(np, "fsl,sai-asynchronous", NULL)) {
+@@ -987,9 +990,9 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ /* Discard all settings for asynchronous mode */
+ sai->synchronous[RX] = false;
+ sai->synchronous[TX] = false;
+- fsl_sai_dai.symmetric_rates = 0;
+- fsl_sai_dai.symmetric_channels = 0;
+- fsl_sai_dai.symmetric_samplebits = 0;
++ sai->cpu_dai_drv.symmetric_rates = 0;
++ sai->cpu_dai_drv.symmetric_channels = 0;
++ sai->cpu_dai_drv.symmetric_samplebits = 0;
+ }
+
+ if (of_find_property(np, "fsl,sai-mclk-direction-output", NULL) &&
+@@ -1018,7 +1021,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+- &fsl_sai_dai, 1);
++ &sai->cpu_dai_drv, 1);
+ if (ret)
+ goto err_pm_disable;
+
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 6aba7d28f5f34..677ecfc1ec68f 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -180,6 +180,7 @@ struct fsl_sai {
+ unsigned int bclk_ratio;
+
+ const struct fsl_sai_soc_data *soc_data;
++ struct snd_soc_dai_driver cpu_dai_drv;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ };
+diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
+index 15a27a2cd0cae..fad1eb6253d53 100644
+--- a/sound/soc/fsl/imx-es8328.c
++++ b/sound/soc/fsl/imx-es8328.c
+@@ -145,13 +145,13 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+- goto fail;
++ goto put_device;
+ }
+
+ comp = devm_kzalloc(dev, 3 * sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
+- goto fail;
++ goto put_device;
+ }
+
+ data->dev = dev;
+@@ -182,12 +182,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ ret = snd_soc_of_parse_card_name(&data->card, "model");
+ if (ret) {
+ dev_err(dev, "Unable to parse card name\n");
+- goto fail;
++ goto put_device;
+ }
+ ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
+ if (ret) {
+ dev_err(dev, "Unable to parse routing: %d\n", ret);
+- goto fail;
++ goto put_device;
+ }
+ data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
+@@ -196,10 +196,12 @@ static int imx_es8328_probe(struct platform_device *pdev)
+ ret = snd_soc_register_card(&data->card);
+ if (ret) {
+ dev_err(dev, "Unable to register: %d\n", ret);
+- goto fail;
++ goto put_device;
+ }
+
+ platform_set_drvdata(pdev, data);
++put_device:
++ put_device(&ssi_pdev->dev);
+ fail:
+ of_node_put(ssi_np);
+ of_node_put(codec_np);
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index dbce7e92baf3c..c5d6952a4a33f 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -174,21 +174,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
+-static int lpass_cpu_daiops_hw_free(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
+-{
+- struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
+- int ret;
+-
+- ret = regmap_write(drvdata->lpaif_map,
+- LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id),
+- 0);
+- if (ret)
+- dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
+-
+- return ret;
+-}
+-
+ static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
+@@ -269,7 +254,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+ .startup = lpass_cpu_daiops_startup,
+ .shutdown = lpass_cpu_daiops_shutdown,
+ .hw_params = lpass_cpu_daiops_hw_params,
+- .hw_free = lpass_cpu_daiops_hw_free,
+ .prepare = lpass_cpu_daiops_prepare,
+ .trigger = lpass_cpu_daiops_trigger,
+ };
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index 4c745baa39f73..9acaef81dd74c 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -61,7 +61,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
+ int ret, dma_ch, dir = substream->stream;
+ struct lpass_pcm_data *data;
+
+- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL);
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+@@ -119,6 +119,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
+ if (v->free_dma_channel)
+ v->free_dma_channel(drvdata, data->dma_ch);
+
++ kfree(data);
+ return 0;
+ }
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index ac2feddc75fdd..ea183922c4ef1 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1671,8 +1671,10 @@ static void setup_system_wide(int forks)
+ struct evsel *counter;
+
+ evlist__for_each_entry(evsel_list, counter) {
+- if (!counter->core.system_wide)
++ if (!counter->core.system_wide &&
++ strcmp(counter->name, "duration_time")) {
+ return;
++ }
+ }
+
+ if (evsel_list->core.nr_entries)
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index c5cce3a60476b..8aeaeba48a41f 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -974,6 +974,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
+
+ if (queue->tid == -1 || pt->have_sched_switch) {
+ ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
++ if (ptq->tid == -1)
++ ptq->pid = -1;
+ thread__zput(ptq->thread);
+ }
+
+@@ -2488,10 +2490,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
+ tid = sample->tid;
+ }
+
+- if (tid == -1) {
+- pr_err("context_switch event has no tid\n");
+- return -EINVAL;
+- }
++ if (tid == -1)
++ intel_pt_log("context_switch event has no tid\n");
+
+ intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
+ cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
+diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
+index 8995092d541ec..3b796dd5e5772 100644
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -523,8 +523,27 @@ static void *ida_random_fn(void *arg)
+ return NULL;
+ }
+
++static void *ida_leak_fn(void *arg)
++{
++ struct ida *ida = arg;
++ time_t s = time(NULL);
++ int i, ret;
++
++ rcu_register_thread();
++
++ do for (i = 0; i < 1000; i++) {
++ ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL);
++ if (ret >= 0)
++ ida_free(ida, 128);
++ } while (time(NULL) < s + 2);
++
++ rcu_unregister_thread();
++ return NULL;
++}
++
+ void ida_thread_tests(void)
+ {
++ DEFINE_IDA(ida);
+ pthread_t threads[20];
+ int i;
+
+@@ -536,6 +555,16 @@ void ida_thread_tests(void)
+
+ while (i--)
+ pthread_join(threads[i], NULL);
++
++ for (i = 0; i < ARRAY_SIZE(threads); i++)
++ if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) {
++ perror("creating ida thread");
++ exit(1);
++ }
++
++ while (i--)
++ pthread_join(threads[i], NULL);
++ assert(ida_is_empty(&ida));
+ }
+
+ void ida_tests(void)
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+index d22e438198cf7..9af8822ece477 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+@@ -18,11 +18,11 @@
+ #define MAX_ULONG_STR_LEN 7
+ #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+
++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
+ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
+ unsigned char i;
+- char name[64];
++ char name[sizeof(tcp_mem_name)];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+index cb201cbe11e77..55251046c9b73 100644
+--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
++++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+@@ -18,11 +18,11 @@
+ #define MAX_ULONG_STR_LEN 7
+ #define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+
++const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
+ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
+ {
+- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
+ unsigned char i;
+- char name[64];
++ char name[sizeof(tcp_mem_name)];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+index f3eb8aacec0e7..a2b0e4eb1fe4c 100644
+--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+@@ -34,12 +34,12 @@ echo 'wakeup_latency u64 lat pid_t pid' >> synthetic_events
+ echo 'hist:keys=pid:ts1=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger
+ echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts1:onmatch(sched.sched_wakeup).wakeup_latency($wakeup_lat,next_pid) if next_comm=="ping"' > events/sched/sched_switch/trigger
+
+-echo 'waking+wakeup_latency u64 lat; pid_t pid' >> synthetic_events
+-echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking+wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
+-echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking+wakeup_latency/trigger
++echo 'waking_plus_wakeup_latency u64 lat; pid_t pid' >> synthetic_events
++echo 'hist:keys=pid,lat:sort=pid,lat:ww_lat=$waking_lat+$wakeup_lat:onmatch(synthetic.wakeup_latency).waking_plus_wakeup_latency($ww_lat,pid)' >> events/synthetic/wakeup_latency/trigger
++echo 'hist:keys=pid,lat:sort=pid,lat' >> events/synthetic/waking_plus_wakeup_latency/trigger
+
+ ping $LOCALHOST -c 3
+-if ! grep -q "pid:" events/synthetic/waking+wakeup_latency/hist; then
++if ! grep -q "pid:" events/synthetic/waking_plus_wakeup_latency/hist; then
+ fail "Failed to create combined histogram"
+ fi
+
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index b8503a8119b07..81fcc25a54db6 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -29,3 +29,4 @@ CONFIG_NET_SCH_FQ=m
+ CONFIG_NET_SCH_ETF=m
+ CONFIG_TEST_BLACKHOLE_DEV=m
+ CONFIG_KALLSYMS=y
++CONFIG_NET_FOU=m
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
+index a0b5f57d6bd31..0727e2012b685 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
+@@ -215,10 +215,16 @@ switch_create()
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++ sysctl_set net.ipv4.conf.all.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+
+ switch_destroy()
+ {
++ sysctl_restore net.ipv4.conf.all.rp_filter
++
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+@@ -359,6 +365,10 @@ ns_switch_create()
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++ sysctl_set net.ipv4.conf.all.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ export -f ns_switch_create
+
+diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
+index 1209031bc794d..5d97fa347d75a 100755
+--- a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
++++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
+@@ -237,10 +237,16 @@ switch_create()
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++ sysctl_set net.ipv4.conf.all.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+
+ switch_destroy()
+ {
++ sysctl_restore net.ipv4.conf.all.rp_filter
++
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+@@ -402,6 +408,10 @@ ns_switch_create()
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
++
++ sysctl_set net.ipv4.conf.all.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan10-v.rp_filter 0
++ sysctl_set net.ipv4.conf.vlan20-v.rp_filter 0
+ }
+ export -f ns_switch_create
+
+diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
+index bdbf4b3125b6a..28ea3753da207 100755
+--- a/tools/testing/selftests/net/rtnetlink.sh
++++ b/tools/testing/selftests/net/rtnetlink.sh
+@@ -521,6 +521,11 @@ kci_test_encap_fou()
+ return $ksft_skip
+ fi
+
++ if ! /sbin/modprobe -q -n fou; then
++ echo "SKIP: module fou is not found"
++ return $ksft_skip
++ fi
++ /sbin/modprobe -q fou
+ ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
+ if [ $? -ne 0 ];then
+ echo "FAIL: can't add fou port 7777, skipping test"
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+index f988d2f42e8f2..cf001a2c69420 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+@@ -1,17 +1,19 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0-only
+
++KSELFTESTS_SKIP=4
++
+ . ./eeh-functions.sh
+
+ if ! eeh_supported ; then
+ echo "EEH not supported on this system, skipping"
+- exit 0;
++ exit $KSELFTESTS_SKIP;
+ fi
+
+ if [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_check" ] && \
+ [ ! -e "/sys/kernel/debug/powerpc/eeh_dev_break" ] ; then
+ echo "debugfs EEH testing files are missing. Is debugfs mounted?"
+- exit 1;
++ exit $KSELFTESTS_SKIP;
+ fi
+
+ pre_lspci=`mktemp`
+@@ -79,4 +81,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
+ lspci | diff -u $pre_lspci -
+ rm -f $pre_lspci
+
+-exit $failed
++test "$failed" == 0
++exit $?