summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-01-18 06:09:53 -0500
committerMike Pagano <mpagano@gentoo.org>2023-01-18 06:09:53 -0500
commita862d7334573a3ac93c3b465eaac469aab04ec4b (patch)
tree3ad82677c33c760b0b34f59c2bb3de559a8dd76e
parentLinux patch 5.4.228 (diff)
downloadlinux-patches-a862d7334573a3ac93c3b465eaac469aab04ec4b.tar.gz
linux-patches-a862d7334573a3ac93c3b465eaac469aab04ec4b.tar.bz2
linux-patches-a862d7334573a3ac93c3b465eaac469aab04ec4b.zip
Linux patch 5.4.2295.4-234
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1228_linux-5.4.229.patch21195
2 files changed, 21199 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 63872f92..bec9c24b 100644
--- a/0000_README
+++ b/0000_README
@@ -955,6 +955,10 @@ Patch: 1227_linux-5.4.228.patch
From: http://www.kernel.org
Desc: Linux 5.4.228
+Patch: 1228_linux-5.4.229.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.229
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1228_linux-5.4.229.patch b/1228_linux-5.4.229.patch
new file mode 100644
index 00000000..102e76a7
--- /dev/null
+++ b/1228_linux-5.4.229.patch
@@ -0,0 +1,21195 @@
+diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+index 5d6ea66a863fe..1f75feec3dec6 100644
+--- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
++++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt
+@@ -109,7 +109,7 @@ audio-codec@1{
+ reg = <1 0>;
+ interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "intr2"
+- reset-gpios = <&msmgpio 64 0>;
++ reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>;
+ slim-ifc-dev = <&wc9335_ifd>;
+ clock-names = "mclk", "native";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>,
+diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst
+index f64cb666498aa..f28887045049d 100644
+--- a/Documentation/driver-api/spi.rst
++++ b/Documentation/driver-api/spi.rst
+@@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as
+ a pair of FIFOs connected to dual DMA engines on the other side of the
+ SPI shift register (maximizing throughput). Such drivers bridge between
+ whatever bus they sit on (often the platform bus) and SPI, and expose
+-the SPI side of their device as a :c:type:`struct spi_master
+-<spi_master>`. SPI devices are children of that master,
++the SPI side of their device as a :c:type:`struct spi_controller
++<spi_controller>`. SPI devices are children of that master,
+ represented as a :c:type:`struct spi_device <spi_device>` and
+ manufactured from :c:type:`struct spi_board_info
+ <spi_board_info>` descriptors which are usually provided by
+diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst
+index f51bb21d20e44..49b577307385c 100644
+--- a/Documentation/fault-injection/fault-injection.rst
++++ b/Documentation/fault-injection/fault-injection.rst
+@@ -74,8 +74,8 @@ configuration of fault-injection capabilities.
+
+ - /sys/kernel/debug/fail*/times:
+
+- specifies how many times failures may happen at most.
+- A value of -1 means "no limit".
++ specifies how many times failures may happen at most. A value of -1
++ means "no limit".
+
+ - /sys/kernel/debug/fail*/space:
+
+@@ -163,11 +163,13 @@ configuration of fault-injection capabilities.
+ - ERRNO: retval must be -1 to -MAX_ERRNO (-4096).
+ - ERR_NULL: retval must be 0 or -1 to -MAX_ERRNO (-4096).
+
+-- /sys/kernel/debug/fail_function/<functiuon-name>/retval:
++- /sys/kernel/debug/fail_function/<function-name>/retval:
+
+- specifies the "error" return value to inject to the given
+- function for given function. This will be created when
+- user specifies new injection entry.
++ specifies the "error" return value to inject to the given function.
++ This will be created when the user specifies a new injection entry.
++ Note that this file only accepts unsigned values. So, if you want to
++ use a negative errno, you better use 'printf' instead of 'echo', e.g.:
++ $ printf %#x -12 > retval
+
+ Boot option
+ ^^^^^^^^^^^
+@@ -331,7 +333,7 @@ Application Examples
+ FAILTYPE=fail_function
+ FAILFUNC=open_ctree
+ echo $FAILFUNC > /sys/kernel/debug/$FAILTYPE/inject
+- echo -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
++ printf %#x -12 > /sys/kernel/debug/$FAILTYPE/$FAILFUNC/retval
+ echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+ echo 100 > /sys/kernel/debug/$FAILTYPE/probability
+ echo 0 > /sys/kernel/debug/$FAILTYPE/interval
+diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
+index eeb394b39e2cc..8b416bfd75ac1 100644
+--- a/Documentation/sphinx/load_config.py
++++ b/Documentation/sphinx/load_config.py
+@@ -3,7 +3,7 @@
+
+ import os
+ import sys
+-from sphinx.util.pycompat import execfile_
++from sphinx.util.osutil import fs_encoding
+
+ # ------------------------------------------------------------------------------
+ def loadConfig(namespace):
+@@ -48,7 +48,9 @@ def loadConfig(namespace):
+ sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
+ config = namespace.copy()
+ config['__file__'] = config_file
+- execfile_(config_file, config)
++ with open(config_file, 'rb') as f:
++ code = compile(f.read(), fs_encoding, 'exec')
++ exec(code, config)
+ del config['__file__']
+ namespace.update(config)
+ else:
+diff --git a/Makefile b/Makefile
+index d70676d900f5b..abe29e47198b9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 228
++SUBLEVEL = 229
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
+index 2e09248f83242..c27d012327993 100644
+--- a/arch/alpha/kernel/entry.S
++++ b/arch/alpha/kernel/entry.S
+@@ -469,8 +469,10 @@ entSys:
+ #ifdef CONFIG_AUDITSYSCALL
+ lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ and $3, $6, $3
+-#endif
+ bne $3, strace
++#else
++ blbs $3, strace /* check for SYSCALL_TRACE in disguise */
++#endif
+ beq $4, 1f
+ ldq $27, 0($5)
+ 1: jsr $26, ($27), sys_ni_syscall
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index 46e6d3ed8f35a..c042c416a94a3 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -74,7 +74,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 2932a29ae272e..230f6dd876a2a 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -584,7 +584,7 @@
+
+ pcie1: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
+index cff1269f3fbfd..7146cc8f082af 100644
+--- a/arch/arm/boot/dts/armada-380.dtsi
++++ b/arch/arm/boot/dts/armada-380.dtsi
+@@ -79,7 +79,7 @@
+ /* x1 port */
+ pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -98,7 +98,7 @@
+ /* x1 port */
+ pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+index 92e08486ec81f..320c759b4090f 100644
+--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
++++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
+@@ -22,6 +22,12 @@
+ stdout-path = &uart0;
+ };
+
++ aliases {
++ ethernet0 = &eth0;
++ ethernet1 = &eth1;
++ ethernet2 = &eth2;
++ };
++
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1024 MB */
+@@ -291,7 +297,17 @@
+ };
+ };
+
+- /* port 6 is connected to eth0 */
++ ports@6 {
++ reg = <6>;
++ label = "cpu";
++ ethernet = <&eth0>;
++ phy-mode = "rgmii-id";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ };
++ };
+ };
+ };
+ };
+diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
+index f0022d10c7159..f081f7cb66e5f 100644
+--- a/arch/arm/boot/dts/armada-385.dtsi
++++ b/arch/arm/boot/dts/armada-385.dtsi
+@@ -84,7 +84,7 @@
+ /* x1 port */
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -103,7 +103,7 @@
+ /* x1 port */
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -125,7 +125,7 @@
+ */
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
+index b1b86934c688a..cd7a46c48d192 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -457,7 +457,7 @@
+ /* x1 port */
+ pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x40000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -476,7 +476,7 @@
+ /* x1 port */
+ pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x44000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -498,7 +498,7 @@
+ */
+ pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x48000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+index 8558bf6bb54c6..d55fe162fc7f0 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+@@ -97,7 +97,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -115,7 +115,7 @@
+
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -133,7 +133,7 @@
+
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -151,7 +151,7 @@
+
+ pcie5: pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index 2d85fe8ac3272..fdcc818199401 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -112,7 +112,7 @@
+
+ pcie2: pcie@2,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -130,7 +130,7 @@
+
+ pcie3: pcie@3,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
++ assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+ reg = <0x1800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -148,7 +148,7 @@
+
+ pcie4: pcie@4,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
++ assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+ reg = <0x2000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -166,7 +166,7 @@
+
+ pcie5: pcie@5,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+ reg = <0x2800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -184,7 +184,7 @@
+
+ pcie6: pcie@6,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
++ assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+ reg = <0x3000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -202,7 +202,7 @@
+
+ pcie7: pcie@7,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
++ assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
+ reg = <0x3800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -220,7 +220,7 @@
+
+ pcie8: pcie@8,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
++ assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
+ reg = <0x4000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -238,7 +238,7 @@
+
+ pcie9: pcie@9,0 {
+ device_type = "pci";
+- assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
++ assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
+ reg = <0x4800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index 2e8a3977219f1..347624ea96cd2 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -129,7 +129,7 @@
+ pcie1: pcie@2 {
+ device_type = "pci";
+ status = "disabled";
+- assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
++ assigned-addresses = <0x82001000 0 0x80000 0 0x2000>;
+ reg = <0x1000 0 0 0 0>;
+ clocks = <&gate_clk 5>;
+ marvell,pcie-port = <1>;
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 764984c95c686..8c8a576ab9c01 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -1570,7 +1570,7 @@
+ };
+
+ etb@1a01000 {
+- compatible = "coresight-etb10", "arm,primecell";
++ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0x1a01000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
+index fd41243a0b2c0..9d5a04a46b14e 100644
+--- a/arch/arm/boot/dts/spear600.dtsi
++++ b/arch/arm/boot/dts/spear600.dtsi
+@@ -47,7 +47,7 @@
+ compatible = "arm,pl110", "arm,primecell";
+ reg = <0xfc200000 0x1000>;
+ interrupt-parent = <&vic1>;
+- interrupts = <12>;
++ interrupts = <13>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
+index 483df32583be6..0bdb872f5018b 100644
+--- a/arch/arm/mach-mmp/time.c
++++ b/arch/arm/mach-mmp/time.c
+@@ -44,18 +44,21 @@
+ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE;
+
+ /*
+- * FIXME: the timer needs some delay to stablize the counter capture
++ * Read the timer through the CVWR register. Delay is required after requesting
++ * a read. The CR register cannot be directly read due to metastability issues
++ * documented in the PXA168 software manual.
+ */
+ static inline uint32_t timer_read(void)
+ {
+- int delay = 100;
++ uint32_t val;
++ int delay = 3;
+
+ __raw_writel(1, mmp_timer_base + TMR_CVWR(1));
+
+ while (delay--)
+- cpu_relax();
++ val = __raw_readl(mmp_timer_base + TMR_CVWR(1));
+
+- return __raw_readl(mmp_timer_base + TMR_CVWR(1));
++ return val;
+ }
+
+ static u64 notrace mmp_read_sched_clock(void)
+diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile
+index 303400fa2cdf7..2aec85ab1e8b9 100644
+--- a/arch/arm/nwfpe/Makefile
++++ b/arch/arm/nwfpe/Makefile
+@@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \
+ entry.o
+
+ nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o
++
++# Try really hard to avoid generating calls to __aeabi_uldivmod() from
++# float64_rem() due to loop elision.
++ifdef CONFIG_CC_IS_CLANG
++CFLAGS_softfloat.o += -mllvm -replexitval=never
++endif
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index 2e8239d489f82..351e211afcf5a 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -122,9 +122,12 @@
+ /delete-property/ mrvl,i2c-fast-mode;
+ status = "okay";
+
++ /* MCP7940MT-I/MNY RTC */
+ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
++ interrupt-parent = <&gpiosb>;
++ interrupts = <5 0>; /* GPIO2_5 */
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+index 2b91daf5c1a64..45e37aa67ce73 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
++++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+@@ -26,14 +26,14 @@
+ stdout-path = "serial0:921600n8";
+ };
+
+- cpus_fixed_vproc0: fixedregulator@0 {
++ cpus_fixed_vproc0: regulator-vproc-buck0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vproc_buck0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+- cpus_fixed_vproc1: fixedregulator@1 {
++ cpus_fixed_vproc1: regulator-vproc-buck1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vproc_buck1";
+ regulator-min-microvolt = <1000000>;
+@@ -50,7 +50,7 @@
+ id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>;
+ };
+
+- usb_p0_vbus: regulator@2 {
++ usb_p0_vbus: regulator-usb-p0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p0_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -59,7 +59,7 @@
+ enable-active-high;
+ };
+
+- usb_p1_vbus: regulator@3 {
++ usb_p1_vbus: regulator-usb-p1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p1_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -68,7 +68,7 @@
+ enable-active-high;
+ };
+
+- usb_p2_vbus: regulator@4 {
++ usb_p2_vbus: regulator-usb-p2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p2_vbus";
+ regulator-min-microvolt = <5000000>;
+@@ -77,7 +77,7 @@
+ enable-active-high;
+ };
+
+- usb_p3_vbus: regulator@5 {
++ usb_p3_vbus: regulator-usb-p3-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "p3_vbus";
+ regulator-min-microvolt = <5000000>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+index 43307bad3f0d6..3b12bb313dcdf 100644
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -160,70 +160,70 @@
+ #clock-cells = <0>;
+ };
+
+- clk26m: oscillator@0 {
++ clk26m: oscillator-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ clock-output-names = "clk26m";
+ };
+
+- clk32k: oscillator@1 {
++ clk32k: oscillator-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "clk32k";
+ };
+
+- clkfpc: oscillator@2 {
++ clkfpc: oscillator-50m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "clkfpc";
+ };
+
+- clkaud_ext_i_0: oscillator@3 {
++ clkaud_ext_i_0: oscillator-aud0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <6500000>;
+ clock-output-names = "clkaud_ext_i_0";
+ };
+
+- clkaud_ext_i_1: oscillator@4 {
++ clkaud_ext_i_1: oscillator-aud1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <196608000>;
+ clock-output-names = "clkaud_ext_i_1";
+ };
+
+- clkaud_ext_i_2: oscillator@5 {
++ clkaud_ext_i_2: oscillator-aud2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <180633600>;
+ clock-output-names = "clkaud_ext_i_2";
+ };
+
+- clki2si0_mck_i: oscillator@6 {
++ clki2si0_mck_i: oscillator-i2s0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si0_mck_i";
+ };
+
+- clki2si1_mck_i: oscillator@7 {
++ clki2si1_mck_i: oscillator-i2s1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si1_mck_i";
+ };
+
+- clki2si2_mck_i: oscillator@8 {
++ clki2si2_mck_i: oscillator-i2s2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+ clock-output-names = "clki2si2_mck_i";
+ };
+
+- clktdmin_mclk_i: oscillator@9 {
++ clktdmin_mclk_i: oscillator-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <30000000>;
+@@ -266,7 +266,7 @@
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+- pio: pinctrl@10005000 {
++ pio: pinctrl@1000b000 {
+ compatible = "mediatek,mt2712-pinctrl";
+ reg = <0 0x1000b000 0 0x1000>;
+ mediatek,pctl-regmap = <&syscfg_pctl_a>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+index 2b2a69c7567fd..d4c78c9672ffe 100644
+--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+@@ -102,7 +102,7 @@
+ };
+ };
+
+- clk26m: oscillator@0 {
++ clk26m: oscillator-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+index 99a28d64ee627..2b7923f1f0ecc 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+@@ -1310,7 +1310,7 @@ ap_ts_i2c: &i2c14 {
+ config {
+ pins = "gpio126";
+ function = "gpio";
+- bias-no-pull;
++ bias-disable;
+ drive-strength = <2>;
+ output-low;
+ };
+@@ -1320,7 +1320,7 @@ ap_ts_i2c: &i2c14 {
+ config {
+ pins = "gpio126";
+ function = "gpio";
+- bias-no-pull;
++ bias-disable;
+ drive-strength = <2>;
+ output-high;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+index 840d6b9bbb598..5eb8f014c3f50 100644
+--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+@@ -298,8 +298,10 @@
+ };
+
+ &qup_i2c12_default {
+- drive-strength = <2>;
+- bias-disable;
++ pinmux {
++ drive-strength = <2>;
++ bias-disable;
++ };
+ };
+
+ &qup_uart6_default {
+diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
+index 7b012148bfd6c..abd302e521c06 100644
+--- a/arch/arm64/include/asm/atomic_ll_sc.h
++++ b/arch/arm64/include/asm/atomic_ll_sc.h
+@@ -12,19 +12,6 @@
+
+ #include <linux/stringify.h>
+
+-#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
+-#define __LL_SC_FALLBACK(asm_ops) \
+-" b 3f\n" \
+-" .subsection 1\n" \
+-"3:\n" \
+-asm_ops "\n" \
+-" b 4f\n" \
+-" .previous\n" \
+-"4:\n"
+-#else
+-#define __LL_SC_FALLBACK(asm_ops) asm_ops
+-#endif
+-
+ #ifndef CONFIG_CC_HAS_K_CONSTRAINT
+ #define K
+ #endif
+@@ -43,12 +30,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ldxr %w0, %2\n" \
+-" " #asm_op " %w0, %w0, %w3\n" \
+-" stxr %w1, %w0, %2\n" \
+-" cbnz %w1, 1b\n") \
++ " prfm pstl1strm, %2\n" \
++ "1: ldxr %w0, %2\n" \
++ " " #asm_op " %w0, %w0, %w3\n" \
++ " stxr %w1, %w0, %2\n" \
++ " cbnz %w1, 1b\n" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -61,13 +47,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return" #name "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ld" #acq "xr %w0, %2\n" \
+-" " #asm_op " %w0, %w0, %w3\n" \
+-" st" #rel "xr %w1, %w0, %2\n" \
+-" cbnz %w1, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %2\n" \
++ "1: ld" #acq "xr %w0, %2\n" \
++ " " #asm_op " %w0, %w0, %w3\n" \
++ " st" #rel "xr %w1, %w0, %2\n" \
++ " cbnz %w1, 1b\n" \
++ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -83,13 +68,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
+ int val, result; \
+ \
+ asm volatile("// atomic_fetch_" #op #name "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %3\n" \
+-"1: ld" #acq "xr %w0, %3\n" \
+-" " #asm_op " %w1, %w0, %w4\n" \
+-" st" #rel "xr %w2, %w1, %3\n" \
+-" cbnz %w2, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %3\n" \
++ "1: ld" #acq "xr %w0, %3\n" \
++ " " #asm_op " %w1, %w0, %w4\n" \
++ " st" #rel "xr %w2, %w1, %3\n" \
++ " cbnz %w2, 1b\n" \
++ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -142,12 +126,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ldxr %0, %2\n" \
+-" " #asm_op " %0, %0, %3\n" \
+-" stxr %w1, %0, %2\n" \
+-" cbnz %w1, 1b") \
++ " prfm pstl1strm, %2\n" \
++ "1: ldxr %0, %2\n" \
++ " " #asm_op " %0, %0, %3\n" \
++ " stxr %w1, %0, %2\n" \
++ " cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i)); \
+ }
+@@ -160,13 +143,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return" #name "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %2\n" \
+-"1: ld" #acq "xr %0, %2\n" \
+-" " #asm_op " %0, %0, %3\n" \
+-" st" #rel "xr %w1, %0, %2\n" \
+-" cbnz %w1, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %2\n" \
++ "1: ld" #acq "xr %0, %2\n" \
++ " " #asm_op " %0, %0, %3\n" \
++ " st" #rel "xr %w1, %0, %2\n" \
++ " cbnz %w1, 1b\n" \
++ " " #mb \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -176,19 +158,18 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
+
+ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
+ static inline long \
+-__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
++__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
+ { \
+ s64 result, val; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_fetch_" #op #name "\n" \
+- __LL_SC_FALLBACK( \
+-" prfm pstl1strm, %3\n" \
+-"1: ld" #acq "xr %0, %3\n" \
+-" " #asm_op " %1, %0, %4\n" \
+-" st" #rel "xr %w2, %1, %3\n" \
+-" cbnz %w2, 1b\n" \
+-" " #mb ) \
++ " prfm pstl1strm, %3\n" \
++ "1: ld" #acq "xr %0, %3\n" \
++ " " #asm_op " %1, %0, %4\n" \
++ " st" #rel "xr %w2, %1, %3\n" \
++ " cbnz %w2, 1b\n" \
++ " " #mb \
+ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
+ : __stringify(constraint) "r" (i) \
+ : cl); \
+@@ -240,15 +221,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
+ unsigned long tmp;
+
+ asm volatile("// atomic64_dec_if_positive\n"
+- __LL_SC_FALLBACK(
+-" prfm pstl1strm, %2\n"
+-"1: ldxr %0, %2\n"
+-" subs %0, %0, #1\n"
+-" b.lt 2f\n"
+-" stlxr %w1, %0, %2\n"
+-" cbnz %w1, 1b\n"
+-" dmb ish\n"
+-"2:")
++ " prfm pstl1strm, %2\n"
++ "1: ldxr %0, %2\n"
++ " subs %0, %0, #1\n"
++ " b.lt 2f\n"
++ " stlxr %w1, %0, %2\n"
++ " cbnz %w1, 1b\n"
++ " dmb ish\n"
++ "2:"
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
+@@ -274,7 +254,6 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
+ old = (u##sz)old; \
+ \
+ asm volatile( \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %[v]\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
+@@ -282,7 +261,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
+ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " cbnz %w[tmp], 1b\n" \
+ " " #mb "\n" \
+- "2:") \
++ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(u##sz *)ptr) \
+ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
+@@ -326,7 +305,6 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
+ unsigned long tmp, ret; \
+ \
+ asm volatile("// __cmpxchg_double" #name "\n" \
+- __LL_SC_FALLBACK( \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxp %0, %1, %2\n" \
+ " eor %0, %0, %3\n" \
+@@ -336,8 +314,8 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
+ " st" #rel "xp %w0, %5, %6, %2\n" \
+ " cbnz %w0, 1b\n" \
+ " " #mb "\n" \
+- "2:") \
+- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
++ "2:" \
++ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
+ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
+ : cl); \
+ \
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index da3280f639cd7..28e96118c1e5a 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -11,11 +11,11 @@
+ #define __ASM_ATOMIC_LSE_H
+
+ #define ATOMIC_OP(op, asm_op) \
+-static inline void __lse_atomic_##op(int i, atomic_t *v) \
++static inline void __lse_atomic_##op(int i, atomic_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op " %w[i], %[v]\n" \
++ " " #asm_op " %w[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
+ }
+@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op #mb " %w[i], %w[i], %[v]" \
++ " " #asm_op #mb " %w[i], %w[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
+@@ -130,7 +130,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
+ " add %w[i], %w[i], %w[tmp]" \
+ : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
+ : "r" (v) \
+- : cl); \
++ : cl); \
+ \
+ return i; \
+ }
+@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op " %[i], %[v]\n" \
++ " " #asm_op " %[i], %[v]\n" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v)); \
+ }
+@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
+ { \
+ asm volatile( \
+ __LSE_PREAMBLE \
+-" " #asm_op #mb " %[i], %[i], %[v]" \
++ " " #asm_op #mb " %[i], %[i], %[v]" \
+ : [i] "+r" (i), [v] "+Q" (v->counter) \
+ : "r" (v) \
+ : cl); \
+@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
+ }
+
+ #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
+-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
++static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
+ { \
+ unsigned long tmp; \
+ \
+@@ -403,7 +403,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
+ " eor %[old2], %[old2], %[oldval2]\n" \
+ " orr %[old1], %[old1], %[old2]" \
+ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
+- [v] "+Q" (*(unsigned long *)ptr) \
++ [v] "+Q" (*(__uint128_t *)ptr) \
+ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
+ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
+ : cl); \
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index f65ff6b90f4a9..4a4c20a1bf265 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -378,8 +378,26 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+
+ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+ {
+- if (kvm_vcpu_abt_iss1tw(vcpu))
+- return true;
++ if (kvm_vcpu_abt_iss1tw(vcpu)) {
++ /*
++ * Only a permission fault on a S1PTW should be
++ * considered as a write. Otherwise, page tables baked
++ * in a read-only memslot will result in an exception
++ * being delivered in the guest.
++ *
++ * The drawback is that we end-up faulting twice if the
++ * guest is using any of HW AF/DB: a translation fault
++ * to map the page containing the PT (read only at
++ * first), then a permission fault to allow the flags
++ * to be set.
++ */
++ switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
++ case ESR_ELx_FSC_PERM:
++ return true;
++ default:
++ return false;
++ }
++ }
+
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ return false;
+diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
+index dcfa0ea912fe1..f183c45503ce1 100644
+--- a/arch/mips/bcm63xx/clk.c
++++ b/arch/mips/bcm63xx/clk.c
+@@ -361,6 +361,8 @@ static struct clk clk_periph = {
+ */
+ int clk_enable(struct clk *clk)
+ {
++ if (!clk)
++ return 0;
+ mutex_lock(&clocks_mutex);
+ clk_enable_unlocked(clk);
+ mutex_unlock(&clocks_mutex);
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+index 2e2d45bc850d8..601afad60bfe9 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+@@ -211,7 +211,7 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+ {
+ cvmx_helper_link_info_t result;
+
+- WARN(!octeon_is_simulation(),
++ WARN_ONCE(!octeon_is_simulation(),
+ "Using deprecated link status - please update your DT");
+
+ /* Unless we fix it later, all links are defaulted to down */
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+index de391541d6f7e..89a397c73aa64 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -1100,7 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+ if (index == 0)
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ else {
+- WARN(1, "Using deprecated link status - please update your DT");
++ WARN_ONCE(1, "Using deprecated link status - please update your DT");
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
+index 9268ebc0f61e6..903c07bdc92d9 100644
+--- a/arch/mips/kernel/vpe-cmp.c
++++ b/arch/mips/kernel/vpe-cmp.c
+@@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe);
+
+ static void vpe_device_release(struct device *cd)
+ {
+- kfree(cd);
+ }
+
+ static struct class vpe_class = {
+@@ -157,6 +156,7 @@ out_dev:
+ device_del(&vpe_device);
+
+ out_class:
++ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+ out_chrdev:
+@@ -169,7 +169,7 @@ void __exit vpe_module_exit(void)
+ {
+ struct vpe *v, *n;
+
+- device_del(&vpe_device);
++ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
+index 2e003b11a098f..9fd7cd48ea1d2 100644
+--- a/arch/mips/kernel/vpe-mt.c
++++ b/arch/mips/kernel/vpe-mt.c
+@@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe);
+
+ static void vpe_device_release(struct device *cd)
+ {
+- kfree(cd);
+ }
+
+ static struct class vpe_class = {
+@@ -497,6 +496,7 @@ out_dev:
+ device_del(&vpe_device);
+
+ out_class:
++ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+ out_chrdev:
+@@ -509,7 +509,7 @@ void __exit vpe_module_exit(void)
+ {
+ struct vpe *v, *n;
+
+- device_del(&vpe_device);
++ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
+index 6fd8871e4081e..6b5da7b06d43f 100644
+--- a/arch/parisc/include/uapi/asm/mman.h
++++ b/arch/parisc/include/uapi/asm/mman.h
+@@ -48,28 +48,27 @@
+ #define MADV_DONTFORK 10 /* don't inherit across fork */
+ #define MADV_DOFORK 11 /* do inherit across fork */
+
+-#define MADV_COLD 20 /* deactivate these pages */
+-#define MADV_PAGEOUT 21 /* reclaim these pages */
+-
+-#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
+-#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
++#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
++#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
+
+-#define MADV_HUGEPAGE 67 /* Worth backing with hugepages */
+-#define MADV_NOHUGEPAGE 68 /* Not worth backing with hugepages */
++#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
++#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+
+-#define MADV_DONTDUMP 69 /* Explicity exclude from the core dump,
++#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+ overrides the coredump filter bits */
+-#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */
++#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+
+-#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */
+-#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */
++#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
++#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
++
++#define MADV_COLD 20 /* deactivate these pages */
++#define MADV_PAGEOUT 21 /* reclaim these pages */
+
+ #define MADV_HWPOISON 100 /* poison a page for testing */
+ #define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
+
+ /* compatibility flags */
+ #define MAP_FILE 0
+-#define MAP_VARIABLE 0
+
+ #define PKEY_DISABLE_ACCESS 0x1
+ #define PKEY_DISABLE_WRITE 0x2
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 5d458a44b09c6..a795ce76bafe9 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -373,3 +373,30 @@ long parisc_personality(unsigned long personality)
+
+ return err;
+ }
++
++/*
++ * madvise() wrapper
++ *
++ * Up to kernel v6.1 parisc has different values than all other
++ * platforms for the MADV_xxx flags listed below.
++ * To keep binary compatibility with existing userspace programs
++ * translate the former values to the new values.
++ *
++ * XXX: Remove this wrapper in year 2025 (or later)
++ */
++
++asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior)
++{
++ switch (behavior) {
++ case 65: behavior = MADV_MERGEABLE; break;
++ case 66: behavior = MADV_UNMERGEABLE; break;
++ case 67: behavior = MADV_HUGEPAGE; break;
++ case 68: behavior = MADV_NOHUGEPAGE; break;
++ case 69: behavior = MADV_DONTDUMP; break;
++ case 70: behavior = MADV_DODUMP; break;
++ case 71: behavior = MADV_WIPEONFORK; break;
++ case 72: behavior = MADV_KEEPONFORK; break;
++ }
++
++ return sys_madvise(start, len_in, behavior);
++}
+diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
+index 51f15a414e29a..e016bf6fae1e6 100644
+--- a/arch/parisc/kernel/syscalls/syscall.tbl
++++ b/arch/parisc/kernel/syscalls/syscall.tbl
+@@ -131,7 +131,7 @@
+ 116 common sysinfo sys_sysinfo compat_sys_sysinfo
+ 117 common shutdown sys_shutdown
+ 118 common fsync sys_fsync
+-119 common madvise sys_madvise
++119 common madvise parisc_madvise
+ 120 common clone sys_clone_wrapper
+ 121 common setdomainname sys_setdomainname
+ 122 common sendfile sys_sendfile compat_sys_sendfile
+diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
+index 4da4fcba0684b..11d2da692fe85 100644
+--- a/arch/powerpc/include/asm/imc-pmu.h
++++ b/arch/powerpc/include/asm/imc-pmu.h
+@@ -132,7 +132,7 @@ struct imc_pmu {
+ * are inited.
+ */
+ struct imc_pmu_ref {
+- struct mutex lock;
++ spinlock_t lock;
+ unsigned int id;
+ int refc;
+ };
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 35e246e39705b..139377f37b748 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -714,6 +714,7 @@ void __noreturn rtas_halt(void)
+
+ /* Must be in the RMO region, so we place it here */
+ static char rtas_os_term_buf[2048];
++static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE;
+
+ void rtas_os_term(char *str)
+ {
+@@ -725,16 +726,20 @@ void rtas_os_term(char *str)
+ * this property may terminate the partition which we want to avoid
+ * since it interferes with panic_timeout.
+ */
+- if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
+- RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
++ if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE)
+ return;
+
+ snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
+
++ /*
++ * Keep calling as long as RTAS returns a "try again" status,
++ * but don't use rtas_busy_delay(), which potentially
++ * schedules.
++ */
+ do {
+- status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
++ status = rtas_call(ibm_os_term_token, 1, 1, NULL,
+ __pa(rtas_os_term_buf));
+- } while (rtas_busy_delay(status));
++ } while (rtas_busy_delay_time(status));
+
+ if (status != 0)
+ printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
+@@ -1215,6 +1220,13 @@ void __init rtas_initialize(void)
+ no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
+ rtas.entry = no_entry ? rtas.base : entry;
+
++ /*
++ * Discover these now to avoid device tree lookups in the
++ * panic path.
++ */
++ if (of_property_read_bool(rtas.dev, "ibm,extended-os-term"))
++ ibm_os_term_token = rtas_token("ibm,os-term");
++
+ /* If RTAS was found, allocate the RMO buffer for it and look for
+ * the stop-self token if any
+ */
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index c84bbd4298a04..4c9aaedd2b1bb 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -64,6 +64,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
+ next_sp = fp[0];
+
+ if (next_sp == sp + STACK_INT_FRAME_SIZE &&
++ validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
+ fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
+ /*
+ * This looks like an interrupt frame for an
+diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
+index 8965b4463d433..5e86371a20c78 100644
+--- a/arch/powerpc/perf/hv-gpci-requests.h
++++ b/arch/powerpc/perf/hv-gpci-requests.h
+@@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id)
+ )
+ #include I(REQUEST_END)
+
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ /*
+ * Not available for counter_info_version >= 0x8, use
+ * run_instruction_cycles_by_partition(0x100) instead.
+@@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id)
+ __count(0x10, 8, cycles)
+ )
+ #include I(REQUEST_END)
++#endif
+
+ #define REQUEST_NAME system_performance_capabilities
+ #define REQUEST_NUM 0x40
+@@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged)
+ )
+ #include I(REQUEST_END)
+
++#ifdef ENABLE_EVENTS_COUNTERINFO_V6
+ #define REQUEST_NAME processor_bus_utilization_abc_links
+ #define REQUEST_NUM 0x50
+ #define REQUEST_IDX_KIND "hw_chip_id=?"
+@@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx)
+ __count(0x28, 8, instructions_completed)
+ )
+ #include I(REQUEST_END)
++#endif
+
+ /* Processor_core_power_mode (0x95) skipped, no counters */
+ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped,
+diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
+index 732cfc53e260d..ac3f3df57fe39 100644
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -70,7 +70,7 @@ static struct attribute_group format_group = {
+
+ static struct attribute_group event_group = {
+ .name = "events",
+- .attrs = hv_gpci_event_attrs,
++ /* .attrs is set in init */
+ };
+
+ #define HV_CAPS_ATTR(_name, _format) \
+@@ -280,6 +280,7 @@ static int hv_gpci_init(void)
+ int r;
+ unsigned long hret;
+ struct hv_perf_caps caps;
++ struct hv_gpci_request_buffer *arg;
+
+ hv_gpci_assert_offsets_correct();
+
+@@ -298,6 +299,36 @@ static int hv_gpci_init(void)
+ /* sampling not supported */
+ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
++ arg = (void *)get_cpu_var(hv_gpci_reqb);
++ memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
++
++ /*
++ * hcall H_GET_PERF_COUNTER_INFO populates the output
++ * counter_info_version value based on the system hypervisor.
++ * Pass the counter request 0x10 corresponds to request type
++ * 'Dispatch_timebase_by_processor', to get the supported
++ * counter_info_version.
++ */
++ arg->params.counter_request = cpu_to_be32(0x10);
++
++ r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
++ virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
++ if (r) {
++ pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r);
++ arg->params.counter_info_version_out = 0x8;
++ }
++
++ /*
++ * Use counter_info_version_out value to assign
++ * required hv-gpci event list.
++ */
++ if (arg->params.counter_info_version_out >= 0x8)
++ event_group.attrs = hv_gpci_event_attrs;
++ else
++ event_group.attrs = hv_gpci_event_attrs_v6;
++
++ put_cpu_var(hv_gpci_reqb);
++
+ r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
+ if (r)
+ return r;
+diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
+index a3053eda5dcc3..060e464d35c6e 100644
+--- a/arch/powerpc/perf/hv-gpci.h
++++ b/arch/powerpc/perf/hv-gpci.h
+@@ -53,6 +53,7 @@ enum {
+ #define REQUEST_FILE "../hv-gpci-requests.h"
+ #define NAME_LOWER hv_gpci
+ #define NAME_UPPER HV_GPCI
++#define ENABLE_EVENTS_COUNTERINFO_V6
+ #include "req-gen/perf.h"
+ #undef REQUEST_FILE
+ #undef NAME_LOWER
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index d76e800a1f337..b0e4241e7609d 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -13,6 +13,7 @@
+ #include <asm/cputhreads.h>
+ #include <asm/smp.h>
+ #include <linux/string.h>
++#include <linux/spinlock.h>
+
+ /* Nest IMC data structures and variables */
+
+@@ -20,7 +21,7 @@
+ * Used to avoid races in counting the nest-pmu units during hotplug
+ * register and unregister
+ */
+-static DEFINE_MUTEX(nest_init_lock);
++static DEFINE_SPINLOCK(nest_init_lock);
+ static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
+ static struct imc_pmu **per_nest_pmu_arr;
+ static cpumask_t nest_imc_cpumask;
+@@ -49,7 +50,7 @@ static int trace_imc_mem_size;
+ * core and trace-imc
+ */
+ static struct imc_pmu_ref imc_global_refc = {
+- .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
++ .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
+ .id = 0,
+ .refc = 0,
+ };
+@@ -393,7 +394,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
+ get_hard_smp_processor_id(cpu));
+ /*
+ * If this is the last cpu in this chip then, skip the reference
+- * count mutex lock and make the reference count on this chip zero.
++ * count lock and make the reference count on this chip zero.
+ */
+ ref = get_nest_pmu_ref(cpu);
+ if (!ref)
+@@ -455,15 +456,15 @@ static void nest_imc_counters_release(struct perf_event *event)
+ /*
+ * See if we need to disable the nest PMU.
+ * If no events are currently in use, then we have to take a
+- * mutex to ensure that we don't race with another task doing
++ * lock to ensure that we don't race with another task doing
+ * enable or disable the nest counters.
+ */
+ ref = get_nest_pmu_ref(event->cpu);
+ if (!ref)
+ return;
+
+- /* Take the mutex lock for this node and then decrement the reference count */
+- mutex_lock(&ref->lock);
++ /* Take the lock for this node and then decrement the reference count */
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+@@ -475,7 +476,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ * an OPAL call to disable the engine in that node.
+ *
+ */
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ return;
+ }
+ ref->refc--;
+@@ -483,7 +484,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
+ return;
+ }
+@@ -491,7 +492,7 @@ static void nest_imc_counters_release(struct perf_event *event)
+ WARN(1, "nest-imc: Invalid event reference count\n");
+ ref->refc = 0;
+ }
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ }
+
+ static int nest_imc_event_init(struct perf_event *event)
+@@ -550,26 +551,25 @@ static int nest_imc_event_init(struct perf_event *event)
+
+ /*
+ * Get the imc_pmu_ref struct for this node.
+- * Take the mutex lock and then increment the count of nest pmu events
+- * inited.
++ * Take the lock and then increment the count of nest pmu events inited.
+ */
+ ref = get_nest_pmu_ref(event->cpu);
+ if (!ref)
+ return -EINVAL;
+
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("nest-imc: Unable to start the counters for node %d\n",
+ node_id);
+ return rc;
+ }
+ }
+ ++ref->refc;
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+
+ event->destroy = nest_imc_counters_release;
+ return 0;
+@@ -605,9 +605,8 @@ static int core_imc_mem_init(int cpu, int size)
+ return -ENOMEM;
+ mem_info->vbase = page_address(page);
+
+- /* Init the mutex */
+ core_imc_refc[core_id].id = core_id;
+- mutex_init(&core_imc_refc[core_id].lock);
++ spin_lock_init(&core_imc_refc[core_id].lock);
+
+ rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
+ __pa((void *)mem_info->vbase),
+@@ -696,9 +695,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
+ perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
+ } else {
+ /*
+- * If this is the last cpu in this core then, skip taking refernce
+- * count mutex lock for this core and directly zero "refc" for
+- * this core.
++ * If this is the last cpu in this core then skip taking reference
++ * count lock for this core and directly zero "refc" for this core.
+ */
+ opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(cpu));
+@@ -713,11 +711,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
+ * last cpu in this core and core-imc event running
+ * in this cpu.
+ */
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_CORE)
+ imc_global_refc.refc--;
+
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+ }
+ return 0;
+ }
+@@ -732,7 +730,7 @@ static int core_imc_pmu_cpumask_init(void)
+
+ static void reset_global_refc(struct perf_event *event)
+ {
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ imc_global_refc.refc--;
+
+ /*
+@@ -744,7 +742,7 @@ static void reset_global_refc(struct perf_event *event)
+ imc_global_refc.refc = 0;
+ imc_global_refc.id = 0;
+ }
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+ }
+
+ static void core_imc_counters_release(struct perf_event *event)
+@@ -757,17 +755,17 @@ static void core_imc_counters_release(struct perf_event *event)
+ /*
+ * See if we need to disable the IMC PMU.
+ * If no events are currently in use, then we have to take a
+- * mutex to ensure that we don't race with another task doing
++ * lock to ensure that we don't race with another task doing
+ * enable or disable the core counters.
+ */
+ core_id = event->cpu / threads_per_core;
+
+- /* Take the mutex lock and decrement the refernce count for this core */
++ /* Take the lock and decrement the refernce count for this core */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return;
+
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ /*
+ * The scenario where this is true is, when perf session is
+@@ -779,7 +777,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ * an OPAL call to disable the engine in that core.
+ *
+ */
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ return;
+ }
+ ref->refc--;
+@@ -787,7 +785,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
+ return;
+ }
+@@ -795,7 +793,7 @@ static void core_imc_counters_release(struct perf_event *event)
+ WARN(1, "core-imc: Invalid event reference count\n");
+ ref->refc = 0;
+ }
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+
+ reset_global_refc(event);
+ }
+@@ -833,7 +831,6 @@ static int core_imc_event_init(struct perf_event *event)
+ if ((!pcmi->vbase))
+ return -ENODEV;
+
+- /* Get the core_imc mutex for this core */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+@@ -841,22 +838,22 @@ static int core_imc_event_init(struct perf_event *event)
+ /*
+ * Core pmu units are enabled only when it is used.
+ * See if this is triggered for the first time.
+- * If yes, take the mutex lock and enable the core counters.
++ * If yes, take the lock and enable the core counters.
+ * If not, just increment the count in core_imc_refc struct.
+ */
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("core-imc: Unable to start the counters for core %d\n",
+ core_id);
+ return rc;
+ }
+ }
+ ++ref->refc;
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+
+ /*
+ * Since the system can run either in accumulation or trace-mode
+@@ -867,7 +864,7 @@ static int core_imc_event_init(struct perf_event *event)
+ * to know whether any other trace/thread imc
+ * events are running.
+ */
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
+ /*
+ * No other trace/thread imc events are running in
+@@ -876,10 +873,10 @@ static int core_imc_event_init(struct perf_event *event)
+ imc_global_refc.id = IMC_DOMAIN_CORE;
+ imc_global_refc.refc++;
+ } else {
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+
+ event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
+ event->destroy = core_imc_counters_release;
+@@ -951,10 +948,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
+ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+
+ /* Reduce the refc if thread-imc event running on this cpu */
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_THREAD)
+ imc_global_refc.refc--;
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+
+ return 0;
+ }
+@@ -994,7 +991,7 @@ static int thread_imc_event_init(struct perf_event *event)
+ if (!target)
+ return -EINVAL;
+
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ /*
+ * Check if any other trace/core imc events are running in the
+ * system, if not set the global id to thread-imc.
+@@ -1003,10 +1000,10 @@ static int thread_imc_event_init(struct perf_event *event)
+ imc_global_refc.id = IMC_DOMAIN_THREAD;
+ imc_global_refc.refc++;
+ } else {
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+
+ event->pmu->task_ctx_nr = perf_sw_context;
+ event->destroy = reset_global_refc;
+@@ -1128,25 +1125,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+- * If yes, take the mutex lock and enable the counters.
++ * If yes, take the lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ return 0;
+ }
+
+@@ -1163,12 +1160,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
+ return;
+ }
+
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+@@ -1176,7 +1173,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+
+ /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
+ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+@@ -1217,9 +1214,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
+ }
+ }
+
+- /* Init the mutex, if not already */
+ trace_imc_refc[core_id].id = core_id;
+- mutex_init(&trace_imc_refc[core_id].lock);
++ spin_lock_init(&trace_imc_refc[core_id].lock);
+
+ mtspr(SPRN_LDBAR, 0);
+ return 0;
+@@ -1239,10 +1235,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
+ * Reduce the refc if any trace-imc event running
+ * on this cpu.
+ */
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_TRACE)
+ imc_global_refc.refc--;
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+
+ return 0;
+ }
+@@ -1345,17 +1341,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
+ }
+
+ mtspr(SPRN_LDBAR, ldbar_value);
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ return 0;
+ }
+
+@@ -1388,19 +1384,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
+ return;
+ }
+
+- mutex_lock(&ref->lock);
++ spin_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+ pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+- mutex_unlock(&ref->lock);
++ spin_unlock(&ref->lock);
+
+ trace_imc_event_stop(event, flags);
+ }
+@@ -1424,7 +1420,7 @@ static int trace_imc_event_init(struct perf_event *event)
+ * no other thread is running any core/thread imc
+ * events
+ */
+- mutex_lock(&imc_global_refc.lock);
++ spin_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
+ /*
+ * No core/thread imc events are running in the
+@@ -1433,10 +1429,10 @@ static int trace_imc_event_init(struct perf_event *event)
+ imc_global_refc.id = IMC_DOMAIN_TRACE;
+ imc_global_refc.refc++;
+ } else {
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+- mutex_unlock(&imc_global_refc.lock);
++ spin_unlock(&imc_global_refc.lock);
+
+ event->hw.idx = -1;
+ target = event->hw.target;
+@@ -1509,10 +1505,10 @@ static int init_nest_pmu_ref(void)
+ i = 0;
+ for_each_node(nid) {
+ /*
+- * Mutex lock to avoid races while tracking the number of
++ * Take the lock to avoid races while tracking the number of
+ * sessions using the chip's nest pmu units.
+ */
+- mutex_init(&nest_imc_refc[i].lock);
++ spin_lock_init(&nest_imc_refc[i].lock);
+
+ /*
+ * Loop to init the "id" with the node_id. Variable "i" initialized to
+@@ -1609,7 +1605,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
+ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+ {
+ if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
+- mutex_lock(&nest_init_lock);
++ spin_lock(&nest_init_lock);
+ if (nest_pmus == 1) {
+ cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
+ kfree(nest_imc_refc);
+@@ -1619,7 +1615,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+
+ if (nest_pmus > 0)
+ nest_pmus--;
+- mutex_unlock(&nest_init_lock);
++ spin_unlock(&nest_init_lock);
+ }
+
+ /* Free core_imc memory */
+@@ -1776,11 +1772,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ * rest. To handle the cpuhotplug callback unregister, we track
+ * the number of nest pmus in "nest_pmus".
+ */
+- mutex_lock(&nest_init_lock);
++ spin_lock(&nest_init_lock);
+ if (nest_pmus == 0) {
+ ret = init_nest_pmu_ref();
+ if (ret) {
+- mutex_unlock(&nest_init_lock);
++ spin_unlock(&nest_init_lock);
+ kfree(per_nest_pmu_arr);
+ per_nest_pmu_arr = NULL;
+ goto err_free_mem;
+@@ -1788,7 +1784,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ /* Register for cpu hotplug notification. */
+ ret = nest_pmu_cpumask_init();
+ if (ret) {
+- mutex_unlock(&nest_init_lock);
++ spin_unlock(&nest_init_lock);
+ kfree(nest_imc_refc);
+ kfree(per_nest_pmu_arr);
+ per_nest_pmu_arr = NULL;
+@@ -1796,7 +1792,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
+ }
+ }
+ nest_pmus++;
+- mutex_unlock(&nest_init_lock);
++ spin_unlock(&nest_init_lock);
+ break;
+ case IMC_DOMAIN_CORE:
+ ret = core_imc_pmu_cpumask_init();
+diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
+index fa9bc804e67af..6b2a59fefffa7 100644
+--- a/arch/powerpc/perf/req-gen/perf.h
++++ b/arch/powerpc/perf/req-gen/perf.h
+@@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \
+ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \
+ r_fields
+
++/* Generate event list for platforms with counter_info_version 0x6 or below */
++static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = {
++#include REQUEST_FILE
++ NULL
++};
++
++/*
++ * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci
++ * events were deprecated for platform firmware that supports
++ * counter_info_version 0x8 or above.
++ * Those deprecated events are still part of platform firmware that
++ * support counter_info_version 0x6 and below. As per the getPerfCountInfo
++ * v1.018 documentation there is no counter_info_version 0x7.
++ * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of
++ * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms
++ * that supports counter_info_version 0x8 or above.
++ */
++#undef ENABLE_EVENTS_COUNTERINFO_V6
++
++/* Generate event list for platforms with counter_info_version 0x8 or above*/
+ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = {
+ #include REQUEST_FILE
+ NULL
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+index 05e19470d523c..22e264bd3ed21 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+@@ -530,6 +530,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op)
+ err_bcom_rx_irq:
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+ err_bcom_rx:
++ free_irq(lpbfifo.irq, &lpbfifo);
+ err_irq:
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+index 4588ce632484e..b6354054f8834 100644
+--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+@@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
+
+ goto next;
+ unreg:
+- platform_device_del(pdev);
++ platform_device_put(pdev);
+ err:
+ pr_err("%pOF: registration failed\n", np);
+ next:
+diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
+index b21d71badaec9..e6788bc065843 100644
+--- a/arch/powerpc/sysdev/xive/spapr.c
++++ b/arch/powerpc/sysdev/xive/spapr.c
+@@ -422,6 +422,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
+
+ data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
+ if (!data->trig_mmio) {
++ iounmap(data->eoi_mmio);
+ pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
+index e076437cfafec..20f0cc9628dab 100644
+--- a/arch/riscv/include/asm/uaccess.h
++++ b/arch/riscv/include/asm/uaccess.h
+@@ -235,7 +235,7 @@ do { \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
+- ((x) = 0, -EFAULT); \
++ ((x) = (__force __typeof__(x))0, -EFAULT); \
+ })
+
+ #define __put_user_asm(insn, x, ptr, err) \
+diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
+index 918f0ba4f4d20..5e26e2c4641b4 100644
+--- a/arch/s390/include/asm/percpu.h
++++ b/arch/s390/include/asm/percpu.h
+@@ -31,7 +31,7 @@
+ pcp_op_T__ *ptr__; \
+ preempt_disable_notrace(); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
+- prev__ = *ptr__; \
++ prev__ = READ_ONCE(*ptr__); \
+ do { \
+ old__ = prev__; \
+ new__ = old__ op (val); \
+diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
+index 53da174754d97..bf0596749ebd3 100644
+--- a/arch/s390/kernel/machine_kexec_file.c
++++ b/arch/s390/kernel/machine_kexec_file.c
+@@ -185,8 +185,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
+
+ data->memsz = ALIGN(data->memsz, PAGE_SIZE);
+ buf.mem = data->memsz;
+- if (image->type == KEXEC_TYPE_CRASH)
+- buf.mem += crashk_res.start;
+
+ ptr = (void *)ipl_cert_list_addr;
+ end = ptr + ipl_cert_list_size;
+@@ -223,6 +221,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
+ data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
+ *lc_ipl_parmblock_ptr = (__u32)buf.mem;
+
++ if (image->type == KEXEC_TYPE_CRASH)
++ buf.mem += crashk_res.start;
++
+ ret = kexec_add_buffer(&buf);
+ out:
+ return ret;
+diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
+index 5521ea12f44e0..aa9b964575843 100644
+--- a/arch/x86/boot/bioscall.S
++++ b/arch/x86/boot/bioscall.S
+@@ -32,7 +32,7 @@ intcall:
+ movw %dx, %si
+ movw %sp, %di
+ movw $11, %cx
+- rep; movsd
++ rep; movsl
+
+ /* Pop full state from the stack */
+ popal
+@@ -67,7 +67,7 @@ intcall:
+ jz 4f
+ movw %sp, %si
+ movw $11, %cx
+- rep; movsd
++ rep; movsl
+ 4: addw $44, %sp
+
+ /* Restore state and return */
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 0f61f46e6086f..fe2edc760e609 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -2762,6 +2762,7 @@ static bool hswep_has_limit_sbox(unsigned int device)
+ return false;
+
+ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
++ pci_dev_put(dev);
+ if (!hswep_get_chop(capid4))
+ return true;
+
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index 9bb71abd66bd3..37b36a8ce5fa7 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -140,6 +140,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
+ set_personality_ia32(false);
+
+ setup_new_exec(bprm);
++ install_exec_creds(bprm);
+
+ regs->cs = __USER32_CS;
+ regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
+@@ -156,8 +157,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
+ if (retval < 0)
+ return retval;
+
+- install_exec_creds(bprm);
+-
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+
+diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
+index 1835767aa3356..d716fe938fc03 100644
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -19,8 +19,8 @@
+ /*
+ * Definitions of Primary Processor-Based VM-Execution Controls.
+ */
+-#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+-#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
++#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
++#define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
+ #define CPU_BASED_HLT_EXITING 0x00000080
+ #define CPU_BASED_INVLPG_EXITING 0x00000200
+ #define CPU_BASED_MWAIT_EXITING 0x00000400
+@@ -31,7 +31,7 @@
+ #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
+ #define CPU_BASED_CR8_STORE_EXITING 0x00100000
+ #define CPU_BASED_TPR_SHADOW 0x00200000
+-#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
++#define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
+ #define CPU_BASED_MOV_DR_EXITING 0x00800000
+ #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
+ #define CPU_BASED_USE_IO_BITMAPS 0x02000000
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index 3eb8411ab60ef..e95b72ec19bc0 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -33,7 +33,7 @@
+ #define EXIT_REASON_TRIPLE_FAULT 2
+ #define EXIT_REASON_INIT_SIGNAL 3
+
+-#define EXIT_REASON_PENDING_INTERRUPT 7
++#define EXIT_REASON_INTERRUPT_WINDOW 7
+ #define EXIT_REASON_NMI_WINDOW 8
+ #define EXIT_REASON_TASK_SWITCH 9
+ #define EXIT_REASON_CPUID 10
+@@ -94,7 +94,7 @@
+ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
+ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
+ { EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
+- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
++ { EXIT_REASON_INTERRUPT_WINDOW, "INTERRUPT_WINDOW" }, \
+ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
+ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
+ { EXIT_REASON_CPUID, "CPUID" }, \
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 69d92ed52e4c9..8dca326a9e78a 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1787,6 +1787,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+ if (ctrl == PR_SPEC_FORCE_DISABLE)
+ task_set_spec_ib_force_disable(task);
+ task_update_spec_tif(task);
++ if (task == current)
++ indirect_branch_prediction_barrier();
+ break;
+ default:
+ return -ERANGE;
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index b224d4dae2ffb..896f456b3f5c8 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -659,7 +659,6 @@ void load_ucode_intel_ap(void)
+ else
+ iup = &intel_ucode_patch;
+
+-reget:
+ if (!*iup) {
+ patch = __load_ucode_intel(&uci);
+ if (!patch)
+@@ -670,12 +669,7 @@ reget:
+
+ uci.mc = *iup;
+
+- if (apply_microcode_early(&uci, true)) {
+- /* Mixed-silicon system? Try to refetch the proper patch: */
+- *iup = NULL;
+-
+- goto reget;
+- }
++ apply_microcode_early(&uci, true);
+ }
+
+ static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 28f786289fce4..8d6023e6ad9e3 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -577,8 +577,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
+ /*
+ * Ensure the task's closid and rmid are written before determining if
+ * the task is current that will decide if it will be interrupted.
++ * This pairs with the full barrier between the rq->curr update and
++ * resctrl_sched_in() during context switch.
+ */
+- barrier();
++ smp_mb();
+
+ /*
+ * By now, the task's closid and rmid are set. If the task is current
+@@ -2178,19 +2180,23 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ t->closid = to->closid;
+ t->rmid = to->mon.rmid;
+
+-#ifdef CONFIG_SMP
+ /*
+- * This is safe on x86 w/o barriers as the ordering
+- * of writing to task_cpu() and t->on_cpu is
+- * reverse to the reading here. The detection is
+- * inaccurate as tasks might move or schedule
+- * before the smp function call takes place. In
+- * such a case the function call is pointless, but
++ * Order the closid/rmid stores above before the loads
++ * in task_curr(). This pairs with the full barrier
++ * between the rq->curr update and resctrl_sched_in()
++ * during context switch.
++ */
++ smp_mb();
++
++ /*
++ * If the task is on a CPU, set the CPU in the mask.
++ * The detection is inaccurate as tasks might move or
++ * schedule before the smp function call takes place.
++ * In such a case the function call is pointless, but
+ * there is no other side effect.
+ */
+- if (mask && t->on_cpu)
++ if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
+ cpumask_set_cpu(task_cpu(t), mask);
+-#endif
+ }
+ }
+ read_unlock(&tasklist_lock);
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index fae5b00cbccfb..f51fc7fde3a0c 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ switch (opc1) {
+ case 0xeb: /* jmp 8 */
+ case 0xe9: /* jmp 32 */
+- case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
+ break;
++ case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
++ goto setup;
+
+ case 0xe8: /* call relative */
+ branch_clear_offset(auprobe, insn);
+@@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+ return -ENOTSUPP;
+ }
+
++setup:
+ auprobe->branch.opc1 = opc1;
+ auprobe->branch.ilen = insn->length;
+ auprobe->branch.offs = insn->immediate.value;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 6ab0410f10309..00f3336194a96 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2073,8 +2073,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+ * EXEC CONTROLS
+ */
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+- exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+- exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
++ exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
++ exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+
+@@ -2459,7 +2459,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
+ return -EINVAL;
+
+ if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
+- nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING)))
++ nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
+ return -EINVAL;
+
+ return 0;
+@@ -3039,7 +3039,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ u32 exit_qual;
+
+ evaluate_pending_interrupts = exec_controls_get(vmx) &
+- (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
++ (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
+ if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+ evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
+
+@@ -3090,7 +3090,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ }
+
+ enter_guest_mode(vcpu);
+- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
+ vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+@@ -3154,7 +3154,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ * 26.7 "VM-entry failures during or after loading guest state".
+ */
+ vmentry_fail_vmexit_guest_mode:
+- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
+ vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+ leave_guest_mode(vcpu);
+
+@@ -3267,8 +3267,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+ */
+ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
+- !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
+- !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
++ !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
++ !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
+ (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
+ vmx->nested.nested_run_pending = 0;
+ return kvm_vcpu_halt(vcpu);
+@@ -4073,7 +4073,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
+
+- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
+ vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+
+ if (likely(!vmx->fail)) {
+@@ -5376,10 +5376,10 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
+ return false;
+ case EXIT_REASON_TRIPLE_FAULT:
+ return true;
+- case EXIT_REASON_PENDING_INTERRUPT:
+- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
++ case EXIT_REASON_INTERRUPT_WINDOW:
++ return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
+ case EXIT_REASON_NMI_WINDOW:
+- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
++ return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
+ case EXIT_REASON_TASK_SWITCH:
+ return true;
+ case EXIT_REASON_CPUID:
+@@ -5869,8 +5869,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+ msrs->procbased_ctls_low =
+ CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ msrs->procbased_ctls_high &=
+- CPU_BASED_VIRTUAL_INTR_PENDING |
+- CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
++ CPU_BASED_INTR_WINDOW_EXITING |
++ CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
+ CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+@@ -5916,7 +5916,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+ SECONDARY_EXEC_RDRAND_EXITING |
+ SECONDARY_EXEC_ENABLE_INVPCID |
+ SECONDARY_EXEC_RDSEED_EXITING |
+- SECONDARY_EXEC_XSAVES;
++ SECONDARY_EXEC_XSAVES |
++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
+
+ /*
+ * We can emulate "VMCS shadowing," even if the hardware
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 52f024eeac3dc..df77207d93b06 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1780,7 +1780,7 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (is_guest_mode(vcpu) &&
+- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
++ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
+ return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
+
+ return vcpu->arch.tsc_offset;
+@@ -1798,7 +1798,7 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ * to the newly set TSC to get L2's TSC.
+ */
+ if (is_guest_mode(vcpu) &&
+- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
++ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
+ g_tsc_offset = vmcs12->tsc_offset;
+
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+@@ -2425,7 +2425,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
+ CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_MOV_DR_EXITING |
+- CPU_BASED_USE_TSC_OFFSETING |
++ CPU_BASED_USE_TSC_OFFSETTING |
+ CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_INVLPG_EXITING |
+@@ -4458,7 +4458,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+
+ static void enable_irq_window(struct kvm_vcpu *vcpu)
+ {
+- exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
++ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
+ }
+
+ static void enable_nmi_window(struct kvm_vcpu *vcpu)
+@@ -4469,7 +4469,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
+ return;
+ }
+
+- exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
++ exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
+ }
+
+ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
+@@ -4802,7 +4802,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
+-static int handle_external_interrupt(struct kvm_vcpu *vcpu)
++static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
+ {
+ ++vcpu->stat.irq_exits;
+ return 1;
+@@ -5074,21 +5074,6 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+ vmcs_writel(GUEST_DR7, val);
+ }
+
+-static int handle_cpuid(struct kvm_vcpu *vcpu)
+-{
+- return kvm_emulate_cpuid(vcpu);
+-}
+-
+-static int handle_rdmsr(struct kvm_vcpu *vcpu)
+-{
+- return kvm_emulate_rdmsr(vcpu);
+-}
+-
+-static int handle_wrmsr(struct kvm_vcpu *vcpu)
+-{
+- return kvm_emulate_wrmsr(vcpu);
+-}
+-
+ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
+ {
+ kvm_apic_update_ppr(vcpu);
+@@ -5097,7 +5082,7 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
+
+ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
+ {
+- exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
++ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
+
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+@@ -5105,11 +5090,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
+ return 1;
+ }
+
+-static int handle_halt(struct kvm_vcpu *vcpu)
+-{
+- return kvm_emulate_halt(vcpu);
+-}
+-
+ static int handle_vmcall(struct kvm_vcpu *vcpu)
+ {
+ return kvm_emulate_hypercall(vcpu);
+@@ -5315,7 +5295,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
+ static int handle_nmi_window(struct kvm_vcpu *vcpu)
+ {
+ WARN_ON_ONCE(!enable_vnmi);
+- exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
++ exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
+ ++vcpu->stat.nmi_window_exits;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+@@ -5336,7 +5316,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
+
+ intr_window_requested = exec_controls_get(vmx) &
+- CPU_BASED_VIRTUAL_INTR_PENDING;
++ CPU_BASED_INTR_WINDOW_EXITING;
+
+ while (vmx->emulation_required && count-- != 0) {
+ if (intr_window_requested && vmx_interrupt_allowed(vcpu))
+@@ -5657,11 +5637,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ [EXIT_REASON_IO_INSTRUCTION] = handle_io,
+ [EXIT_REASON_CR_ACCESS] = handle_cr,
+ [EXIT_REASON_DR_ACCESS] = handle_dr,
+- [EXIT_REASON_CPUID] = handle_cpuid,
+- [EXIT_REASON_MSR_READ] = handle_rdmsr,
+- [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
+- [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
+- [EXIT_REASON_HLT] = handle_halt,
++ [EXIT_REASON_CPUID] = kvm_emulate_cpuid,
++ [EXIT_REASON_MSR_READ] = kvm_emulate_rdmsr,
++ [EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
++ [EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window,
++ [EXIT_REASON_HLT] = kvm_emulate_halt,
+ [EXIT_REASON_INVD] = handle_invd,
+ [EXIT_REASON_INVLPG] = handle_invlpg,
+ [EXIT_REASON_RDPMC] = handle_rdpmc,
+@@ -6035,9 +6015,23 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ }
+
+ if (exit_reason < kvm_vmx_max_exit_handlers
+- && kvm_vmx_exit_handlers[exit_reason])
++ && kvm_vmx_exit_handlers[exit_reason]) {
++#ifdef CONFIG_RETPOLINE
++ if (exit_reason == EXIT_REASON_MSR_WRITE)
++ return kvm_emulate_wrmsr(vcpu);
++ else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
++ return handle_preemption_timer(vcpu);
++ else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
++ return handle_interrupt_window(vcpu);
++ else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
++ return handle_external_interrupt(vcpu);
++ else if (exit_reason == EXIT_REASON_HLT)
++ return kvm_emulate_halt(vcpu);
++ else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
++ return handle_ept_misconfig(vcpu);
++#endif
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+- else {
++ } else {
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason);
+ dump_vmcs();
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 7a43b2ae19f12..a1cc855c539c1 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+
+ void xen_smp_intr_free(unsigned int cpu)
+ {
++ kfree(per_cpu(xen_resched_irq, cpu).name);
++ per_cpu(xen_resched_irq, cpu).name = NULL;
+ if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
+ per_cpu(xen_resched_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_resched_irq, cpu).name);
+- per_cpu(xen_resched_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_callfunc_irq, cpu).name);
++ per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
+ per_cpu(xen_callfunc_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_callfunc_irq, cpu).name);
+- per_cpu(xen_callfunc_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_debug_irq, cpu).name);
++ per_cpu(xen_debug_irq, cpu).name = NULL;
+ if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
+ per_cpu(xen_debug_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_debug_irq, cpu).name);
+- per_cpu(xen_debug_irq, cpu).name = NULL;
+ }
++ kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
++ per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
+ NULL);
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
+- per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+ }
+ }
+
+@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu)
+ char *resched_name, *callfunc_name, *debug_name;
+
+ resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
++ per_cpu(xen_resched_irq, cpu).name = resched_name;
+ rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ cpu,
+ xen_reschedule_interrupt,
+@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_resched_irq, cpu).irq = rc;
+- per_cpu(xen_resched_irq, cpu).name = resched_name;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
++ per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ cpu,
+ xen_call_function_interrupt,
+@@ -86,18 +87,21 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_callfunc_irq, cpu).irq = rc;
+- per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+
+- debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+- rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
+- IRQF_PERCPU | IRQF_NOBALANCING,
+- debug_name, NULL);
+- if (rc < 0)
+- goto fail;
+- per_cpu(xen_debug_irq, cpu).irq = rc;
+- per_cpu(xen_debug_irq, cpu).name = debug_name;
++ if (!xen_fifo_events) {
++ debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
++ per_cpu(xen_debug_irq, cpu).name = debug_name;
++ rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
++ xen_debug_interrupt,
++ IRQF_PERCPU | IRQF_NOBALANCING,
++ debug_name, NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(xen_debug_irq, cpu).irq = rc;
++ }
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
++ per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ cpu,
+ xen_call_function_single_interrupt,
+@@ -107,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
+- per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+
+ return 0;
+
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 9d9777ded5f7b..928fbe63c96f8 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -98,18 +98,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
+
+ void xen_smp_intr_free_pv(unsigned int cpu)
+ {
++ kfree(per_cpu(xen_irq_work, cpu).name);
++ per_cpu(xen_irq_work, cpu).name = NULL;
+ if (per_cpu(xen_irq_work, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
+ per_cpu(xen_irq_work, cpu).irq = -1;
+- kfree(per_cpu(xen_irq_work, cpu).name);
+- per_cpu(xen_irq_work, cpu).name = NULL;
+ }
+
++ kfree(per_cpu(xen_pmu_irq, cpu).name);
++ per_cpu(xen_pmu_irq, cpu).name = NULL;
+ if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
+ per_cpu(xen_pmu_irq, cpu).irq = -1;
+- kfree(per_cpu(xen_pmu_irq, cpu).name);
+- per_cpu(xen_pmu_irq, cpu).name = NULL;
+ }
+ }
+
+@@ -119,6 +119,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ char *callfunc_name, *pmu_name;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
++ per_cpu(xen_irq_work, cpu).name = callfunc_name;
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ cpu,
+ xen_irq_work_interrupt,
+@@ -128,10 +129,10 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_irq_work, cpu).irq = rc;
+- per_cpu(xen_irq_work, cpu).name = callfunc_name;
+
+ if (is_xen_pmu) {
+ pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
++ per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
+ xen_pmu_irq_handler,
+ IRQF_PERCPU|IRQF_NOBALANCING,
+@@ -139,7 +140,6 @@ int xen_smp_intr_init_pv(unsigned int cpu)
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_pmu_irq, cpu).irq = rc;
+- per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ }
+
+ return 0;
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index d817b7c862a62..00d2ec73017e0 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
+ cpu, per_cpu(lock_kicker_irq, cpu));
+
+ name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
++ per_cpu(irq_name, cpu) = name;
+ irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
+ cpu,
+ dummy_handler,
+@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
+ if (irq >= 0) {
+ disable_irq(irq); /* make sure it's never delivered */
+ per_cpu(lock_kicker_irq, cpu) = irq;
+- per_cpu(irq_name, cpu) = name;
+ }
+
+ printk("cpu %d spinlock event irq %d\n", cpu, irq);
+@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
+ if (!xen_pvspin)
+ return;
+
++ kfree(per_cpu(irq_name, cpu));
++ per_cpu(irq_name, cpu) = NULL;
+ /*
+ * When booting the kernel with 'mitigations=auto,nosmt', the secondary
+ * CPUs are not activated, and lock_kicker_irq is not initialized.
+@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
+
+ unbind_from_irqhandler(irq, NULL);
+ per_cpu(lock_kicker_irq, cpu) = -1;
+- kfree(per_cpu(irq_name, cpu));
+- per_cpu(irq_name, cpu) = NULL;
+ }
+
+ PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 45a441c33d6dc..120e2bcf20f88 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -30,6 +30,8 @@ extern struct start_info *xen_start_info;
+ extern struct shared_info xen_dummy_shared_info;
+ extern struct shared_info *HYPERVISOR_shared_info;
+
++extern bool xen_fifo_events;
++
+ void xen_setup_mfn_list_list(void);
+ void xen_build_mfn_list_list(void);
+ void xen_setup_machphys_mapping(void);
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index 5dafd7a8ec913..7abd66d1228ad 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -250,7 +250,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+ struct request_queue *q = hctx->queue;
+ struct blk_mq_ctx *ctx;
+- int i, ret;
++ int i, j, ret;
+
+ if (!hctx->nr_ctx)
+ return 0;
+@@ -262,9 +262,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
+ hctx_for_each_ctx(hctx, ctx, i) {
+ ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
+ if (ret)
+- break;
++ goto out;
+ }
+
++ return 0;
++out:
++ hctx_for_each_ctx(hctx, ctx, j) {
++ if (j < i)
++ kobject_del(&ctx->kobj);
++ }
++ kobject_del(&hctx->kobj);
+ return ret;
+ }
+
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index 0cece1f883ebe..12dab10d36b2e 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -1281,15 +1281,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ goto out_free_tfm;
+ }
+
+-
+- for (i = 0; i < num_mb; ++i)
+- if (testmgr_alloc_buf(data[i].xbuf)) {
+- while (i--)
+- testmgr_free_buf(data[i].xbuf);
+- goto out_free_tfm;
+- }
+-
+-
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index f59b4d944f7fa..603483f8332b0 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+- goto cleanup;
++ goto pop_walk_state;
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+@@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+
+ ACPI_FREE(info);
+ if (ACPI_FAILURE(status)) {
+- goto cleanup;
++ goto pop_walk_state;
+ }
+
+ next_walk_state->method_nesting_depth =
+@@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
+
+ return_ACPI_STATUS(status);
+
++pop_walk_state:
++
++ /* On error, pop the walk state to be deleted from thread */
++
++ acpi_ds_pop_walk_state(thread);
++
+ cleanup:
+
+ /* On error, we must terminate the method properly */
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 1fb8327f3c3be..9c0b94d1c4baa 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
+ status = acpi_ut_walk_package_tree(source_obj, dest_obj,
+ acpi_ut_copy_ielement_to_ielement,
+ walk_state);
+- if (ACPI_FAILURE(status)) {
+-
+- /* On failure, delete the destination package object */
+-
+- acpi_ut_remove_reference(dest_obj);
+- }
+-
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 6f572967b5552..4069c2a79daa2 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -81,6 +81,7 @@ enum board_ids {
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
+ static void ahci_shutdown_one(struct pci_dev *dev);
++static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -639,6 +640,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+
++static int ahci_pci_reset_controller(struct ata_host *host)
++{
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ struct ahci_host_priv *hpriv = host->private_data;
++ int rc;
++
++ rc = ahci_reset_controller(host);
++ if (rc)
++ return rc;
++
++ /*
++ * If platform firmware failed to enable ports, try to enable
++ * them here.
++ */
++ ahci_intel_pcs_quirk(pdev, hpriv);
++
++ return 0;
++}
++
+ static void ahci_pci_init_controller(struct ata_host *host)
+ {
+ struct ahci_host_priv *hpriv = host->private_data;
+@@ -841,7 +861,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
+
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+ ahci_pci_init_controller(host);
+@@ -876,7 +896,7 @@ static int ahci_pci_device_resume(struct device *dev)
+ ahci_mcp89_apple_enable(pdev);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+
+@@ -1741,12 +1761,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
+- /*
+- * If platform firmware failed to enable ports, try to enable
+- * them here.
+- */
+- ahci_intel_pcs_quirk(pdev, hpriv);
+-
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+@@ -1856,7 +1870,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (rc)
+ return rc;
+
+- rc = ahci_reset_controller(host);
++ rc = ahci_pci_reset_controller(host);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
+index abc0e87ca1a8b..43215a4c1e540 100644
+--- a/drivers/ata/pata_ixp4xx_cf.c
++++ b/drivers/ata/pata_ixp4xx_cf.c
+@@ -135,12 +135,12 @@ static void ixp4xx_setup_port(struct ata_port *ap,
+
+ static int ixp4xx_pata_probe(struct platform_device *pdev)
+ {
+- unsigned int irq;
+ struct resource *cs0, *cs1;
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+ int ret;
++ int irq;
+
+ cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+diff --git a/drivers/base/class.c b/drivers/base/class.c
+index d8a6a5864c2ee..61784503ca404 100644
+--- a/drivers/base/class.c
++++ b/drivers/base/class.c
+@@ -191,6 +191,11 @@ int __class_register(struct class *cls, struct lock_class_key *key)
+ }
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
++ if (error) {
++ kobject_del(&cp->subsys.kobj);
++ kfree_const(cp->subsys.kobj.name);
++ kfree(cp);
++ }
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(__class_register);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 10063d8a1b7d4..1abd39ed3f9f3 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -1068,8 +1068,12 @@ static int __driver_attach(struct device *dev, void *data)
+ */
+ return 0;
+ } else if (ret < 0) {
+- dev_dbg(dev, "Bus failed to match device: %d", ret);
+- return ret;
++ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
++ /*
++ * Driver could not match with device, but may match with
++ * another device on the bus.
++ */
++ return 0;
+ } /* ret > 0 means positive match */
+
+ if (driver_allows_async_probing(drv)) {
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8fbd376471de0..9ee58bf49d133 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -446,7 +446,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+- if (dev->power.no_callbacks)
++ callback = RPM_GET_CALLBACK(dev, runtime_idle);
++
++ /* If no callback assume success. */
++ if (!callback || dev->power.no_callbacks)
+ goto out;
+
+ /* Carry out an asynchronous or a synchronous idle notification. */
+@@ -462,10 +465,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
+
+ dev->power.idle_notification = true;
+
+- callback = RPM_GET_CALLBACK(dev, runtime_idle);
++ if (dev->power.irq_safe)
++ spin_unlock(&dev->power.lock);
++ else
++ spin_unlock_irq(&dev->power.lock);
++
++ retval = callback(dev);
+
+- if (callback)
+- retval = __rpm_callback(callback, dev);
++ if (dev->power.irq_safe)
++ spin_lock(&dev->power.lock);
++ else
++ spin_lock_irq(&dev->power.lock);
+
+ dev->power.idle_notification = false;
+ wake_up_all(&dev->power.wait_queue);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c8f2b991e9cf7..79f77315854f4 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -563,13 +563,13 @@ static inline void btusb_free_frags(struct btusb_data *data)
+
+ spin_lock_irqsave(&data->rxlock, flags);
+
+- kfree_skb(data->evt_skb);
++ dev_kfree_skb_irq(data->evt_skb);
+ data->evt_skb = NULL;
+
+- kfree_skb(data->acl_skb);
++ dev_kfree_skb_irq(data->acl_skb);
+ data->acl_skb = NULL;
+
+- kfree_skb(data->sco_skb);
++ dev_kfree_skb_irq(data->sco_skb);
+ data->sco_skb = NULL;
+
+ spin_unlock_irqrestore(&data->rxlock, flags);
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
+index cf4a560958173..8055f63603f45 100644
+--- a/drivers/bluetooth/hci_bcsp.c
++++ b/drivers/bluetooth/hci_bcsp.c
+@@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
+ i++;
+
+ __skb_unlink(skb, &bcsp->unack);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+
+ if (skb_queue_empty(&bcsp->unack))
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index bf3e23104194a..e77da593f2903 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -298,7 +298,7 @@ static void h5_pkt_cull(struct h5 *h5)
+ break;
+
+ __skb_unlink(skb, &h5->unack);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+
+ if (skb_queue_empty(&h5->unack))
+diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
+index d9a4c6c691e07..aeb3e670c4f52 100644
+--- a/drivers/bluetooth/hci_ll.c
++++ b/drivers/bluetooth/hci_ll.c
+@@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ default:
+ BT_ERR("illegal hcill state: %ld (losing packet)",
+ ll->hcill_state);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index e3164c200eac5..467137c47e4f2 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -816,7 +816,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+ default:
+ BT_ERR("Illegal tx state: %d (losing packet)",
+ qca->tx_ibs_state);
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ break;
+ }
+
+diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
+index 9959c762da2f8..db3dd467194c2 100644
+--- a/drivers/char/hw_random/amd-rng.c
++++ b/drivers/char/hw_random/amd-rng.c
+@@ -143,15 +143,19 @@ static int __init mod_init(void)
+ found:
+ err = pci_read_config_dword(pdev, 0x58, &pmbase);
+ if (err)
+- return err;
++ goto put_dev;
+
+ pmbase &= 0x0000FF00;
+- if (pmbase == 0)
+- return -EIO;
++ if (pmbase == 0) {
++ err = -EIO;
++ goto put_dev;
++ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+- if (!priv)
+- return -ENOMEM;
++ if (!priv) {
++ err = -ENOMEM;
++ goto put_dev;
++ }
+
+ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
+@@ -185,6 +189,8 @@ err_iomap:
+ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ out:
+ kfree(priv);
++put_dev:
++ pci_dev_put(pdev);
+ return err;
+ }
+
+@@ -200,6 +206,8 @@ static void __exit mod_exit(void)
+
+ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
++ pci_dev_put(priv->pcidev);
++
+ kfree(priv);
+ }
+
+diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
+index e1d421a36a138..207272979f233 100644
+--- a/drivers/char/hw_random/geode-rng.c
++++ b/drivers/char/hw_random/geode-rng.c
+@@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, pci_tbl);
+
++struct amd_geode_priv {
++ struct pci_dev *pcidev;
++ void __iomem *membase;
++};
+
+ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
+ {
+@@ -90,6 +94,7 @@ static int __init mod_init(void)
+ const struct pci_device_id *ent;
+ void __iomem *mem;
+ unsigned long rng_base;
++ struct amd_geode_priv *priv;
+
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+@@ -97,17 +102,26 @@ static int __init mod_init(void)
+ goto found;
+ }
+ /* Device not found. */
+- goto out;
++ return err;
+
+ found:
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ err = -ENOMEM;
++ goto put_dev;
++ }
++
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+- goto out;
++ goto free_priv;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+- goto out;
+- geode_rng.priv = (unsigned long)mem;
++ goto free_priv;
++
++ geode_rng.priv = (unsigned long)priv;
++ priv->membase = mem;
++ priv->pcidev = pdev;
+
+ pr_info("AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+@@ -116,20 +130,26 @@ found:
+ err);
+ goto err_unmap;
+ }
+-out:
+ return err;
+
+ err_unmap:
+ iounmap(mem);
+- goto out;
++free_priv:
++ kfree(priv);
++put_dev:
++ pci_dev_put(pdev);
++ return err;
+ }
+
+ static void __exit mod_exit(void)
+ {
+- void __iomem *mem = (void __iomem *)geode_rng.priv;
++ struct amd_geode_priv *priv;
+
++ priv = (struct amd_geode_priv *)geode_rng.priv;
+ hwrng_unregister(&geode_rng);
+- iounmap(mem);
++ iounmap(priv->membase);
++ pci_dev_put(priv->pcidev);
++ kfree(priv);
+ }
+
+ module_init(mod_init);
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 736970312bbc9..0833a2ac2f69a 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -1298,6 +1298,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ unsigned long flags;
+ struct cmd_rcvr *rcvr;
+ struct cmd_rcvr *rcvrs = NULL;
++ struct module *owner;
+
+ if (!acquire_ipmi_user(user, &i)) {
+ /*
+@@ -1358,8 +1359,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
+ kfree(rcvr);
+ }
+
++ owner = intf->owner;
+ kref_put(&intf->refcount, intf_free);
+- module_put(intf->owner);
++ module_put(owner);
+ }
+
+ int ipmi_destroy_user(struct ipmi_user *user)
+@@ -3535,12 +3537,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg,
+ unsigned char err)
+ {
++ int rv;
+ msg->rsp[0] = msg->data[0] | 4;
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = err;
+ msg->rsp_size = 3;
+- /* It's an error, so it will never requeue, no need to check return. */
+- handle_one_recv_msg(intf, msg);
++
++ /* This will never requeue, but it may ask us to free the message. */
++ rv = handle_one_recv_msg(intf, msg);
++ if (rv == 0)
++ ipmi_free_smi_msg(msg);
+ }
+
+ static void cleanup_smi_msgs(struct ipmi_smi *intf)
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index b6e7df9e88503..2879b5267dfbe 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2157,6 +2157,20 @@ skip_fallback_noirq:
+ }
+ module_init(init_ipmi_si);
+
++static void wait_msg_processed(struct smi_info *smi_info)
++{
++ unsigned long jiffies_now;
++ long time_diff;
++
++ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
++ jiffies_now = jiffies;
++ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
++ * SI_USEC_PER_JIFFY);
++ smi_event_handler(smi_info, time_diff);
++ schedule_timeout_uninterruptible(1);
++ }
++}
++
+ static void shutdown_smi(void *send_info)
+ {
+ struct smi_info *smi_info = send_info;
+@@ -2191,16 +2205,13 @@ static void shutdown_smi(void *send_info)
+ * in the BMC. Note that timers and CPU interrupts are off,
+ * so no need for locks.
+ */
+- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+- poll(smi_info);
+- schedule_timeout_uninterruptible(1);
+- }
++ wait_msg_processed(smi_info);
++
+ if (smi_info->handlers)
+ disable_si_irq(smi_info);
+- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
+- poll(smi_info);
+- schedule_timeout_uninterruptible(1);
+- }
++
++ wait_msg_processed(smi_info);
++
+ if (smi_info->handlers)
+ smi_info->handlers->cleanup(smi_info->si_sm);
+
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index a9dcf31eadd21..5fe52a6839b53 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev,
+ iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+ if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
+ TPM2_TIMEOUT_C)) {
+- dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
++ dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
+ return -ETIME;
+ }
+
+@@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device)
+
+ /* Should the FIFO driver handle this? */
+ sm = buf->start_method;
+- if (sm == ACPI_TPM2_MEMORY_MAPPED)
+- return -ENODEV;
++ if (sm == ACPI_TPM2_MEMORY_MAPPED) {
++ rc = -ENODEV;
++ goto out;
++ }
+
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+- if (!priv)
+- return -ENOMEM;
++ if (!priv) {
++ rc = -ENOMEM;
++ goto out;
++ }
+
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
+@@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device)
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
+- return -EINVAL;
++ rc = -EINVAL;
++ goto out;
+ }
+ crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
+ priv->smc_func_id = crb_smc->smc_func_id;
+@@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device)
+
+ rc = crb_map_io(device, priv, buf);
+ if (rc)
+- return rc;
++ goto out;
+
+ chip = tpmm_chip_alloc(dev, &tpm_crb);
+- if (IS_ERR(chip))
+- return PTR_ERR(chip);
++ if (IS_ERR(chip)) {
++ rc = PTR_ERR(chip);
++ goto out;
++ }
+
+ dev_set_drvdata(&chip->dev, priv);
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+- return tpm_chip_register(chip);
++ rc = tpm_chip_register(chip);
++
++out:
++ acpi_put_table((struct acpi_table_header *)buf);
++ return rc;
+ }
+
+ static int crb_acpi_remove(struct acpi_device *device)
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index c722e3b3121a8..9c61be2afca7e 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev)
+ const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
++ int ret = 0;
+
+ if (!aid || aid->driver_data != DEVICE_IS_TPM2)
+ return 0;
+@@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev)
+ /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
+ * table is mandatory
+ */
+- st =
+- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
++ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+@@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev)
+
+ /* The tpm2_crb driver handles this device */
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
+- return -ENODEV;
++ ret = -ENODEV;
+
+- return 0;
++ acpi_put_table((struct acpi_table_header *)tbl);
++ return ret;
+ }
+ #else
+ static int check_acpi_tpm2(struct device *dev)
+diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
+index 882b42efd2582..9d33321c89bda 100644
+--- a/drivers/clk/imx/clk-imx8mn.c
++++ b/drivers/clk/imx/clk-imx8mn.c
+@@ -189,27 +189,27 @@ static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out
+ "sys_pll3_out", "clk_ext4", };
+
+ static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext2", "clk_ext3", };
+
+ static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext3", "clk_ext4", };
+
+ static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out",
+- "video_pll1_out", "sys_pll1_133m", "osc_hdmi",
++ "video_pll1_out", "sys_pll1_133m", "dummy",
+ "clk_ext2", "clk_ext3", };
+
+ static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m",
+@@ -582,7 +582,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
+ clks[IMX8MN_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ clks[IMX8MN_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ clks[IMX8MN_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+- clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
++ clks[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0);
+ clks[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0);
+ clks[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ clks[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
+index 90046428693c2..e74fc81a14d00 100644
+--- a/drivers/clk/qcom/clk-krait.c
++++ b/drivers/clk/qcom/clk-krait.c
+@@ -98,6 +98,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
++ else
++ mask <<= d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
+index 80df4eb041cc2..75954ac1fb9bb 100644
+--- a/drivers/clk/renesas/r9a06g032-clocks.c
++++ b/drivers/clk/renesas/r9a06g032-clocks.c
+@@ -386,7 +386,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ int error;
+ int index;
+
+- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
++ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++,
+ &clkspec)) {
+ if (clkspec.np != pd->dev.of_node)
+ continue;
+@@ -399,7 +399,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd,
+ if (error)
+ return error;
+ }
+- i++;
+ }
+
+ return 0;
+diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
+index 198417d563006..aa8a299ff704a 100644
+--- a/drivers/clk/rockchip/clk-pll.c
++++ b/drivers/clk/rockchip/clk-pll.c
+@@ -963,6 +963,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ return mux_clk;
+
+ err_pll:
++ kfree(pll->rate_table);
+ clk_unregister(mux_clk);
+ mux_clk = pll_clk;
+ err_mux:
+diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
+index ac70ad785d8ed..33df20f813d59 100644
+--- a/drivers/clk/samsung/clk-pll.c
++++ b/drivers/clk/samsung/clk-pll.c
+@@ -1390,6 +1390,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+ if (ret) {
+ pr_err("%s: failed to register pll clock %s : %d\n",
+ __func__, pll_clk->name, ret);
++ kfree(pll->rate_table);
+ kfree(pll);
+ return;
+ }
+diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
+index cf94a12459ea4..ee2a2d284113c 100644
+--- a/drivers/clk/socfpga/clk-gate.c
++++ b/drivers/clk/socfpga/clk-gate.c
+@@ -174,21 +174,24 @@ void __init socfpga_gate_init(struct device_node *node)
+ u32 div_reg[3];
+ u32 clk_phase[2];
+ u32 fixed_div;
+- struct clk *clk;
++ struct clk_hw *hw_clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ const char *clk_name = node->name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
+ struct clk_init_data init;
+ struct clk_ops *ops;
+ int rc;
++ int err;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (WARN_ON(!socfpga_clk))
+ return;
+
+ ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL);
+- if (WARN_ON(!ops))
++ if (WARN_ON(!ops)) {
++ kfree(socfpga_clk);
+ return;
++ }
+
+ rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2);
+ if (rc)
+@@ -238,12 +241,15 @@ void __init socfpga_gate_init(struct device_node *node)
+ init.parent_names = parent_name;
+ socfpga_clk->hw.hw.init = &init;
+
+- clk = clk_register(NULL, &socfpga_clk->hw.hw);
+- if (WARN_ON(IS_ERR(clk))) {
++ hw_clk = &socfpga_clk->hw.hw;
++
++ err = clk_hw_register(NULL, hw_clk);
++ if (err) {
++ kfree(ops);
+ kfree(socfpga_clk);
+ return;
+ }
+- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
++ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
+ if (WARN_ON(rc))
+ return;
+ }
+diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
+index 5e0c4b45f77f7..43707e2d72484 100644
+--- a/drivers/clk/socfpga/clk-periph.c
++++ b/drivers/clk/socfpga/clk-periph.c
+@@ -51,7 +51,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
+ const struct clk_ops *ops)
+ {
+ u32 reg;
+- struct clk *clk;
++ struct clk_hw *hw_clk;
+ struct socfpga_periph_clk *periph_clk;
+ const char *clk_name = node->name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
+@@ -94,13 +94,13 @@ static __init void __socfpga_periph_init(struct device_node *node,
+ init.parent_names = parent_name;
+
+ periph_clk->hw.hw.init = &init;
++ hw_clk = &periph_clk->hw.hw;
+
+- clk = clk_register(NULL, &periph_clk->hw.hw);
+- if (WARN_ON(IS_ERR(clk))) {
++ if (clk_hw_register(NULL, hw_clk)) {
+ kfree(periph_clk);
+ return;
+ }
+- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
++ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
+ }
+
+ void __init socfpga_periph_init(struct device_node *node)
+diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
+index dc65cc0fd3bd1..004e196492c4e 100644
+--- a/drivers/clk/socfpga/clk-pll.c
++++ b/drivers/clk/socfpga/clk-pll.c
+@@ -70,17 +70,18 @@ static struct clk_ops clk_pll_ops = {
+ .get_parent = clk_pll_get_parent,
+ };
+
+-static __init struct clk *__socfpga_pll_init(struct device_node *node,
++static __init struct clk_hw *__socfpga_pll_init(struct device_node *node,
+ const struct clk_ops *ops)
+ {
+ u32 reg;
+- struct clk *clk;
++ struct clk_hw *hw_clk;
+ struct socfpga_pll *pll_clk;
+ const char *clk_name = node->name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
+ struct clk_init_data init;
+ struct device_node *clkmgr_np;
+ int rc;
++ int err;
+
+ of_property_read_u32(node, "reg", &reg);
+
+@@ -108,13 +109,15 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
+ clk_pll_ops.enable = clk_gate_ops.enable;
+ clk_pll_ops.disable = clk_gate_ops.disable;
+
+- clk = clk_register(NULL, &pll_clk->hw.hw);
+- if (WARN_ON(IS_ERR(clk))) {
++ hw_clk = &pll_clk->hw.hw;
++
++ err = clk_hw_register(NULL, hw_clk);
++ if (err) {
+ kfree(pll_clk);
+- return NULL;
++ return ERR_PTR(err);
+ }
+- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+- return clk;
++ rc = of_clk_add_provider(node, of_clk_src_simple_get, hw_clk);
++ return hw_clk;
+ }
+
+ void __init socfpga_pll_init(struct device_node *node)
+diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
+index a156bd0c6af75..9eff05386ef9b 100644
+--- a/drivers/clk/st/clkgen-fsyn.c
++++ b/drivers/clk/st/clkgen-fsyn.c
+@@ -943,9 +943,10 @@ static void __init st_of_quadfs_setup(struct device_node *np,
+
+ clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
+ reg, lock);
+- if (IS_ERR(clk))
++ if (IS_ERR(clk)) {
++ kfree(lock);
+ goto err_exit;
+- else
++ } else
+ pr_debug("%s: parent %s rate %u\n",
+ __clk_get_name(clk),
+ __clk_get_name(clk_get_parent(clk)),
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index a0570213170d8..b1ec79ddb7f2a 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -231,6 +231,8 @@ static const struct sh_cmt_info sh_cmt_info[] = {
+ #define CMCNT 1 /* channel register */
+ #define CMCOR 2 /* channel register */
+
++#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */
++
+ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+ {
+ if (ch->iostart)
+@@ -845,6 +847,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
+ unsigned int hwidx, bool clockevent,
+ bool clocksource, struct sh_cmt_device *cmt)
+ {
++ u32 value;
+ int ret;
+
+ /* Skip unused channels. */
+@@ -874,6 +877,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
+ ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
+ ch->ioctrl = ch->iostart + 0x10;
+ ch->timer_bit = 0;
++
++ /* Enable the clock supply to the channel */
++ value = ioread32(cmt->mapbase + CMCLKE);
++ value |= BIT(hwidx);
++ iowrite32(value, cmt->mapbase + CMCLKE);
+ break;
+ }
+
+@@ -1006,12 +1014,10 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ else
+ cmt->rate = clk_get_rate(cmt->clk) / 8;
+
+- clk_disable(cmt->clk);
+-
+ /* Map the memory resource(s). */
+ ret = sh_cmt_map_memory(cmt);
+ if (ret < 0)
+- goto err_clk_unprepare;
++ goto err_clk_disable;
+
+ /* Allocate and setup the channels. */
+ cmt->num_channels = hweight8(cmt->hw_channels);
+@@ -1039,6 +1045,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ mask &= ~(1 << hwidx);
+ }
+
++ clk_disable(cmt->clk);
++
+ platform_set_drvdata(pdev, cmt);
+
+ return 0;
+@@ -1046,6 +1054,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
+ err_unmap:
+ kfree(cmt->channels);
+ iounmap(cmt->mapbase);
++err_clk_disable:
++ clk_disable(cmt->clk);
+ err_clk_unprepare:
+ clk_unprepare(cmt->clk);
+ err_clk_put:
+diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
+index bbc930a5962c6..95f8f2e217db0 100644
+--- a/drivers/counter/stm32-lptimer-cnt.c
++++ b/drivers/counter/stm32-lptimer-cnt.c
+@@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+
+ /* ensure CMP & ARR registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+- (val & STM32_LPTIM_CMPOK_ARROK),
++ (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 100, 1000);
+ if (ret)
+ return ret;
+diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
+index 5107cbe2d64dd..72fd06fa0b595 100644
+--- a/drivers/cpufreq/amd_freq_sensitivity.c
++++ b/drivers/cpufreq/amd_freq_sensitivity.c
+@@ -124,6 +124,8 @@ static int __init amd_freq_sensitivity_init(void)
+ if (!pcidev) {
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
++ } else {
++ pci_dev_put(pcidev);
+ }
+
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index af9f348048629..01f0a8bdd534b 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1204,6 +1204,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+ goto err_free_rcpumask;
+
++ init_completion(&policy->kobj_unregister);
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ cpufreq_global_kobject, "policy%u", cpu);
+ if (ret) {
+@@ -1242,7 +1243,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
+ init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
+- init_completion(&policy->kobj_unregister);
+ INIT_WORK(&policy->update, handle_update);
+
+ policy->cpu = cpu;
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index d06d21a9525df..74702065730ca 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -224,6 +224,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
+ * also be 0 on platforms with missing DT idle states or legacy DT
+ * configuration predating the DT idle states bindings.
+ */
+- return i;
++ return state_idx - start_idx;
+ }
+ EXPORT_SYMBOL_GPL(dt_init_idle_driver);
+diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
+index 5669997386988..47077dd77f5d9 100644
+--- a/drivers/crypto/ccree/cc_debugfs.c
++++ b/drivers/crypto/ccree/cc_debugfs.c
+@@ -59,7 +59,7 @@ void __init cc_debugfs_global_init(void)
+ cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
+ }
+
+-void __exit cc_debugfs_global_fini(void)
++void cc_debugfs_global_fini(void)
+ {
+ debugfs_remove(cc_debugfs_dir);
+ }
+diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
+index 8b8eee513c279..3d59fef1fbeee 100644
+--- a/drivers/crypto/ccree/cc_driver.c
++++ b/drivers/crypto/ccree/cc_driver.c
+@@ -653,10 +653,17 @@ static struct platform_driver ccree_driver = {
+
+ static int __init ccree_init(void)
+ {
+- cc_hash_global_init();
++ int rc;
++
+ cc_debugfs_global_init();
+
+- return platform_driver_register(&ccree_driver);
++ rc = platform_driver_register(&ccree_driver);
++ if (rc) {
++ cc_debugfs_global_fini();
++ return rc;
++ }
++
++ return 0;
+ }
+ module_init(ccree_init);
+
+diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
+index bc71bdf44a9ff..9f67df0a4921b 100644
+--- a/drivers/crypto/ccree/cc_hash.c
++++ b/drivers/crypto/ccree/cc_hash.c
+@@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = {
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+ static const u32 cc_digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+-static u64 cc_sha384_init[] = {
+- SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+- SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+-static u64 cc_sha512_init[] = {
+- SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+- SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
++
++/*
++ * Due to the way the HW works, every double word in the SHA384 and SHA512
++ * larval hashes must be stored in hi/lo order
++ */
++#define hilo(x) upper_32_bits(x), lower_32_bits(x)
++static const u32 cc_sha384_init[] = {
++ hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
++ hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
++static const u32 cc_sha512_init[] = {
++ hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
++ hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
++
+ static const u32 cc_sm3_init[] = {
+ SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
+ SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
+@@ -1948,8 +1955,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
+ }
+
+ if (large_sha_supported) {
+- cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
+- (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
++ cc_set_sram_desc(cc_sha384_init, sram_buff_ofs,
++ ARRAY_SIZE(cc_sha384_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+@@ -1957,8 +1964,8 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
+ sram_buff_ofs += sizeof(cc_sha384_init);
+ larval_seq_len = 0;
+
+- cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
+- (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
++ cc_set_sram_desc(cc_sha512_init, sram_buff_ofs,
++ ARRAY_SIZE(cc_sha512_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+@@ -1969,28 +1976,6 @@ init_digest_const_err:
+ return rc;
+ }
+
+-static void __init cc_swap_dwords(u32 *buf, unsigned long size)
+-{
+- int i;
+- u32 tmp;
+-
+- for (i = 0; i < size; i += 2) {
+- tmp = buf[i];
+- buf[i] = buf[i + 1];
+- buf[i + 1] = tmp;
+- }
+-}
+-
+-/*
+- * Due to the way the HW works we need to swap every
+- * double word in the SHA384 and SHA512 larval hashes
+- */
+-void __init cc_hash_global_init(void)
+-{
+- cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
+- cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
+-}
+-
+ int cc_hash_alloc(struct cc_drvdata *drvdata)
+ {
+ struct cc_hash_handle *hash_handle;
+diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
+index 0d6dc61484d79..3dbd0abefea05 100644
+--- a/drivers/crypto/ccree/cc_hash.h
++++ b/drivers/crypto/ccree/cc_hash.h
+@@ -104,6 +104,4 @@ cc_digest_len_addr(void *drvdata, u32 mode);
+ */
+ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+
+-void cc_hash_global_init(void);
+-
+ #endif /*__CC_HASH_H__*/
+diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
+index fe4cc8babe1c7..17cc44f14e5c4 100644
+--- a/drivers/crypto/img-hash.c
++++ b/drivers/crypto/img-hash.c
+@@ -356,12 +356,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
+ static void img_hash_dma_task(unsigned long d)
+ {
+ struct img_hash_dev *hdev = (struct img_hash_dev *)d;
+- struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
++ struct img_hash_request_ctx *ctx;
+ u8 *addr;
+ size_t nbytes, bleft, wsend, len, tbc;
+ struct scatterlist tsg;
+
+- if (!hdev->req || !ctx->sg)
++ if (!hdev->req)
++ return;
++
++ ctx = ahash_request_ctx(hdev->req);
++ if (!ctx->sg)
+ return;
+
+ addr = sg_virt(ctx->sg);
+diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
+index dc15b06e96ab7..80127344d82bc 100644
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1278,6 +1278,7 @@ struct n2_hash_tmpl {
+ const u32 *hash_init;
+ u8 hw_op_hashsz;
+ u8 digest_size;
++ u8 statesize;
+ u8 block_size;
+ u8 auth_type;
+ u8 hmac_type;
+@@ -1309,6 +1310,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ .hmac_type = AUTH_TYPE_HMAC_MD5,
+ .hw_op_hashsz = MD5_DIGEST_SIZE,
+ .digest_size = MD5_DIGEST_SIZE,
++ .statesize = sizeof(struct md5_state),
+ .block_size = MD5_HMAC_BLOCK_SIZE },
+ { .name = "sha1",
+ .hash_zero = sha1_zero_message_hash,
+@@ -1317,6 +1319,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ .hmac_type = AUTH_TYPE_HMAC_SHA1,
+ .hw_op_hashsz = SHA1_DIGEST_SIZE,
+ .digest_size = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct sha1_state),
+ .block_size = SHA1_BLOCK_SIZE },
+ { .name = "sha256",
+ .hash_zero = sha256_zero_message_hash,
+@@ -1325,6 +1328,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ .hmac_type = AUTH_TYPE_HMAC_SHA256,
+ .hw_op_hashsz = SHA256_DIGEST_SIZE,
+ .digest_size = SHA256_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
+ .block_size = SHA256_BLOCK_SIZE },
+ { .name = "sha224",
+ .hash_zero = sha224_zero_message_hash,
+@@ -1333,6 +1337,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
+ .hmac_type = AUTH_TYPE_RESERVED,
+ .hw_op_hashsz = SHA256_DIGEST_SIZE,
+ .digest_size = SHA224_DIGEST_SIZE,
++ .statesize = sizeof(struct sha256_state),
+ .block_size = SHA224_BLOCK_SIZE },
+ };
+ #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
+@@ -1474,6 +1479,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
+
+ halg = &ahash->halg;
+ halg->digestsize = tmpl->digest_size;
++ halg->statesize = tmpl->statesize;
+
+ base = &halg->base;
+ snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index f8a146554b1f3..dbab9e38223e1 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -2141,7 +2141,7 @@ static int omap_sham_probe(struct platform_device *pdev)
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get sync: %d\n", err);
+ goto err_pm;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index c79652ee94be6..f5ad9b9986549 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -603,8 +603,7 @@ static void devfreq_dev_release(struct device *dev)
+ * @dev: the device to add devfreq feature.
+ * @profile: device-specific profile to run devfreq.
+ * @governor_name: name of the policy to choose frequency.
+- * @data: private data for the governor. The devfreq framework does not
+- * touch this value.
++ * @data: devfreq driver pass to governors, governor should not change it.
+ */
+ struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+@@ -788,8 +787,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res)
+ * @dev: the device to add devfreq feature.
+ * @profile: device-specific profile to run devfreq.
+ * @governor_name: name of the policy to choose frequency.
+- * @data: private data for the governor. The devfreq framework does not
+- * touch this value.
++ * @data: devfreq driver pass to governors, governor should not change it.
+ *
+ * This function manages automatically the memory of devfreq device using device
+ * resource management and simplify the free operation for memory of devfreq
+diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
+index af94942fcf955..a3ae4dc4668ba 100644
+--- a/drivers/devfreq/governor_userspace.c
++++ b/drivers/devfreq/governor_userspace.c
+@@ -21,7 +21,7 @@ struct userspace_data {
+
+ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+ {
+- struct userspace_data *data = df->data;
++ struct userspace_data *data = df->governor_data;
+
+ if (data->valid)
+ *freq = data->user_frequency;
+@@ -40,7 +40,7 @@ static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
+ int err = 0;
+
+ mutex_lock(&devfreq->lock);
+- data = devfreq->data;
++ data = devfreq->governor_data;
+
+ sscanf(buf, "%lu", &wanted);
+ data->user_frequency = wanted;
+@@ -60,7 +60,7 @@ static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
+ int err = 0;
+
+ mutex_lock(&devfreq->lock);
+- data = devfreq->data;
++ data = devfreq->governor_data;
+
+ if (data->valid)
+ err = sprintf(buf, "%lu\n", data->user_frequency);
+@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq)
+ goto out;
+ }
+ data->valid = false;
+- devfreq->data = data;
++ devfreq->governor_data = data;
+
+ err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+ out:
+@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq)
+ if (devfreq->dev.kobj.sd)
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+
+- kfree(devfreq->data);
+- devfreq->data = NULL;
++ kfree(devfreq->governor_data);
++ devfreq->governor_data = NULL;
+ }
+
+ static int devfreq_userspace_handler(struct devfreq *devfreq,
+diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
+index c9aa15fb86a9a..d07bceb3e34b0 100644
+--- a/drivers/dio/dio.c
++++ b/drivers/dio/dio.c
+@@ -110,6 +110,12 @@ static char dio_no_name[] = { 0 };
+
+ #endif /* CONFIG_DIO_CONSTANTS */
+
++static void dio_dev_release(struct device *dev)
++{
++ struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev);
++ kfree(ddev);
++}
++
+ int __init dio_find(int deviceid)
+ {
+ /* Called to find a DIO device before the full bus scan has run.
+@@ -222,6 +228,7 @@ static int __init dio_init(void)
+ dev->bus = &dio_bus;
+ dev->dev.parent = &dio_bus.dev;
+ dev->dev.bus = &dio_bus_type;
++ dev->dev.release = dio_dev_release;
+ dev->scode = scode;
+ dev->resource.start = pa;
+ dev->resource.end = pa + DIO_SIZE(scode, va);
+@@ -249,6 +256,7 @@ static int __init dio_init(void)
+ if (error) {
+ pr_err("DIO: Error registering device %s\n",
+ dev->name);
++ put_device(&dev->dev);
+ continue;
+ }
+ error = dio_create_sysfs_dev_files(dev);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 65cf2b9355c47..93d6e6319b3cc 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -424,17 +424,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ * Then restart the workq on the new delay
+ */
+ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+- unsigned long value)
++ unsigned long msec)
+ {
+- unsigned long jiffs = msecs_to_jiffies(value);
+-
+- if (value == 1000)
+- jiffs = round_jiffies_relative(value);
+-
+- edac_dev->poll_msec = value;
+- edac_dev->delay = jiffs;
++ edac_dev->poll_msec = msec;
++ edac_dev->delay = msecs_to_jiffies(msec);
+
+- edac_mod_work(&edac_dev->work, jiffs);
++ /* See comment in edac_device_workq_setup() above */
++ if (edac_dev->poll_msec == 1000)
++ edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
++ else
++ edac_mod_work(&edac_dev->work, edac_dev->delay);
+ }
+
+ int edac_device_alloc_index(void)
+diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
+index 388427d378b10..2369a56a08cc6 100644
+--- a/drivers/edac/edac_module.h
++++ b/drivers/edac/edac_module.h
+@@ -57,7 +57,7 @@ bool edac_stop_work(struct delayed_work *work);
+ bool edac_mod_work(struct delayed_work *work, unsigned long delay);
+
+ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+- *edac_dev, unsigned long value);
++ *edac_dev, unsigned long msec);
+ extern void edac_mc_reset_delay_period(unsigned long value);
+
+ extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
+diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
+index 29576922df78f..a887c33134312 100644
+--- a/drivers/edac/i10nm_base.c
++++ b/drivers/edac/i10nm_base.c
+@@ -53,11 +53,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
+ bus, dev, fun);
++ pci_dev_put(pdev);
+ return NULL;
+ }
+
+- pci_dev_get(pdev);
+-
+ return pdev;
+ }
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index ac9fb336c80ff..eb98018ab420e 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -345,8 +345,8 @@ static int __init efisubsys_init(void)
+ efi_kobj = kobject_create_and_add("efi", firmware_kobj);
+ if (!efi_kobj) {
+ pr_err("efi: Firmware registration failed.\n");
+- destroy_workqueue(efi_rts_wq);
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto err_destroy_wq;
+ }
+
+ error = generic_ops_register();
+@@ -382,7 +382,10 @@ err_unregister:
+ generic_ops_unregister();
+ err_put:
+ kobject_put(efi_kobj);
+- destroy_workqueue(efi_rts_wq);
++err_destroy_wq:
++ if (efi_rts_wq)
++ destroy_workqueue(efi_rts_wq);
++
+ return error;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index b1172d93c99c3..ba604985cad94 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -313,6 +313,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
+
+ if (!found)
+ return false;
++ pci_dev_put(pdev);
+
+ adev->bios = kmalloc(size, GFP_KERNEL);
+ if (!adev->bios) {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 947e4fa3c5e68..d499add3601ab 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2894,7 +2894,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ data->od8_settings.od8_settings_array;
+ OverDriveTable_t *od_table =
+ &(data->smc_state_table.overdrive_table);
+- int32_t input_index, input_clk, input_vol, i;
++ int32_t input_clk, input_vol, i;
++ uint32_t input_index;
+ int od8_id;
+ int ret;
+
+diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
+index 11a81e8ba9639..123b04f44ca81 100644
+--- a/drivers/gpu/drm/drm_connector.c
++++ b/drivers/gpu/drm/drm_connector.c
+@@ -474,6 +474,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
+ mutex_destroy(&connector->mutex);
+
+ memset(connector, 0, sizeof(*connector));
++
++ if (dev->registered)
++ drm_sysfs_hotplug_event(dev);
+ }
+ EXPORT_SYMBOL(drm_connector_cleanup);
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index db35736d47af2..8c6f9752692df 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -392,6 +392,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ if (gpu->identity.model == chipModel_GC700)
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
+
++ /* These models/revisions don't have the 2D pipe bit */
++ if ((gpu->identity.model == chipModel_GC500 &&
++ gpu->identity.revision <= 2) ||
++ gpu->identity.model == chipModel_GC300)
++ gpu->identity.features |= chipFeatures_PIPE_2D;
++
+ if ((gpu->identity.model == chipModel_GC500 &&
+ gpu->identity.revision < 2) ||
+ (gpu->identity.model == chipModel_GC300 &&
+@@ -425,8 +431,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
+ }
+
+- /* GC600 idle register reports zero bits where modules aren't present */
+- if (gpu->identity.model == chipModel_GC600)
++ /* GC600/300 idle register reports zero bits where modules aren't present */
++ if (gpu->identity.model == chipModel_GC600 ||
++ gpu->identity.model == chipModel_GC300)
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
+ VIVS_HI_IDLE_STATE_RA |
+ VIVS_HI_IDLE_STATE_SE |
+diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+index a92fd6c70b09e..8de9bc8343a2b 100644
+--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+@@ -70,8 +70,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+ return drm_panel_get_modes(fsl_connector->panel);
+ }
+
+-static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ if (mode->hdisplay & 0xf)
+ return MODE_ERROR;
+diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
+index 058dcd5416440..c1dc225d84361 100644
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -632,6 +632,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
+
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
++ intel_vgpu_unpin_mm(workload->shadow_mm);
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
+index 4a64d8aed9da9..7c68a39339150 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
+@@ -364,9 +364,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
+ if (--dpi->refcount != 0)
+ return;
+
+- if (dpi->pinctrl && dpi->pins_gpio)
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+-
+ mtk_dpi_disable(dpi);
+ clk_disable_unprepare(dpi->pixel_clk);
+ clk_disable_unprepare(dpi->engine_clk);
+@@ -391,9 +388,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
+ goto err_pixel;
+ }
+
+- if (dpi->pinctrl && dpi->pins_dpi)
+- pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+-
+ return 0;
+
+ err_pixel:
+@@ -529,12 +523,18 @@ static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+ struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+ mtk_dpi_power_off(dpi);
++
++ if (dpi->pinctrl && dpi->pins_gpio)
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+ }
+
+ static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+ {
+ struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
++ if (dpi->pinctrl && dpi->pins_dpi)
++ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
++
+ mtk_dpi_power_on(dpi);
+ mtk_dpi_set_display_mode(dpi, &dpi->mode);
+ mtk_dpi_enable(dpi);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+index c7441fb8313e4..e1a8989b78358 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+@@ -47,11 +47,9 @@ enum {
+ ADRENO_FW_MAX,
+ };
+
+-enum adreno_quirks {
+- ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+- ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+- ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
+-};
++#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
++#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
++#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
+
+ struct adreno_rev {
+ uint8_t core;
+@@ -74,7 +72,7 @@ struct adreno_info {
+ const char *name;
+ const char *fw[ADRENO_FW_MAX];
+ uint32_t gmem;
+- enum adreno_quirks quirks;
++ u64 quirks;
+ struct msm_gpu *(*init)(struct drm_device *dev);
+ const char *zapfw;
+ u32 inactive_period;
+diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+index 09c5d9a6f9fa8..638f605acb2db 100644
+--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
++++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+@@ -392,7 +392,15 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
+ st7701->dsi = dsi;
+ st7701->desc = desc;
+
+- return mipi_dsi_attach(dsi);
++ ret = mipi_dsi_attach(dsi);
++ if (ret)
++ goto err_attach;
++
++ return 0;
++
++err_attach:
++ drm_panel_remove(&st7701->panel);
++ return ret;
+ }
+
+ static int st7701_dsi_remove(struct mipi_dsi_device *dsi)
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 756a50e8aff20..8c8e13ec3cd64 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -227,6 +227,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+
+ if (!found)
+ return false;
++ pci_dev_put(pdev);
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+@@ -612,13 +613,14 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ acpi_size tbl_size;
+ UEFI_ACPI_VFCT *vfct;
+ unsigned offset;
++ bool r = false;
+
+ if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
+ return false;
+ tbl_size = hdr->length;
+ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+- return false;
++ goto out;
+ }
+
+ vfct = (UEFI_ACPI_VFCT *)hdr;
+@@ -631,13 +633,13 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ offset += sizeof(VFCT_IMAGE_HEADER);
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image header truncated\n");
+- return false;
++ goto out;
+ }
+
+ offset += vhdr->ImageLength;
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+- return false;
++ goto out;
+ }
+
+ if (vhdr->ImageLength &&
+@@ -649,15 +651,18 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+ rdev->bios = kmemdup(&vbios->VbiosContent,
+ vhdr->ImageLength,
+ GFP_KERNEL);
++ if (rdev->bios)
++ r = true;
+
+- if (!rdev->bios)
+- return false;
+- return true;
++ goto out;
+ }
+ }
+
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+- return false;
++
++out:
++ acpi_put_table(hdr);
++ return r;
+ }
+ #else
+ static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+index 67dae1354aa65..2ea672f4420d5 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
+@@ -563,7 +563,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder,
+ video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+- memcpy(&dp->mode, adjusted, sizeof(*mode));
++ drm_mode_copy(&dp->mode, adjusted);
+ }
+
+ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
+diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
+index ed344a795b4d9..f2e2cc66f4897 100644
+--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
++++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
+@@ -487,7 +487,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ inno_hdmi_setup(hdmi, adj_mode);
+
+ /* Store the display mode for plugin/DPMS poweron events */
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++ drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+
+ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+index 85fc5f01f761b..4a81c5c8a5500 100644
+--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
++++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+@@ -382,7 +382,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ /* Store the display mode for plugin/DPMS poweron events. */
+- memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
++ drm_mode_copy(&hdmi->previous_mode, adj_mode);
+ }
+
+ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
+index e55870190bf5c..d0f1384d0fbaa 100644
+--- a/drivers/gpu/drm/sti/sti_dvo.c
++++ b/drivers/gpu/drm/sti/sti_dvo.c
+@@ -287,7 +287,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge,
+
+ DRM_DEBUG_DRIVER("\n");
+
+- memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&dvo->mode, mode);
+
+ /* According to the path used (main or aux), the dvo clocks should
+ * have a different parent clock. */
+@@ -345,8 +345,9 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_dvo_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
+index 94e404f132342..b321e55257711 100644
+--- a/drivers/gpu/drm/sti/sti_hda.c
++++ b/drivers/gpu/drm/sti/sti_hda.c
+@@ -522,7 +522,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge,
+
+ DRM_DEBUG_DRIVER("\n");
+
+- memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&hda->mode, mode);
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+@@ -600,8 +600,9 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hda_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index 9862c322f0c4a..c5547fedebe30 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -933,7 +933,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Copy the drm display mode in the connector local structure */
+- memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
++ drm_mode_copy(&hdmi->mode, mode);
+
+ /* Update clock framerate according to the selected mode */
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+@@ -996,8 +996,9 @@ fail:
+
+ #define CLK_TOLERANCE_HZ 50
+
+-static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+- struct drm_display_mode *mode)
++static enum drm_mode_status
++sti_hdmi_connector_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
+ {
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index c410221824c1b..923899b95c881 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -2458,8 +2458,10 @@ static int tegra_dc_probe(struct platform_device *pdev)
+ usleep_range(2000, 4000);
+
+ err = reset_control_assert(dc->rst);
+- if (err < 0)
++ if (err < 0) {
++ clk_disable_unprepare(dc->clk);
+ return err;
++ }
+
+ usleep_range(2000, 4000);
+
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 0a88ef11b9d3f..5ae132e37277d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -327,10 +327,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
+ drm_gem_object_release(obj);
+ return ret;
+ }
+- drm_gem_object_put_unlocked(obj);
+
+ rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
+ rc->bo_handle = handle;
++
++ /*
++ * The handle owns the reference now. But we must drop our
++ * remaining reference *after* we no longer need to dereference
++ * the obj. Otherwise userspace could guess the handle and
++ * race closing it from another thread.
++ */
++ drm_gem_object_put_unlocked(obj);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 0b800c3540492..2492d3a19f36c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -182,7 +182,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+- box->d != 1 || box_count != 1) {
++ box->d != 1 || box_count != 1 ||
++ box->w > 64 || box->h > 64) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle more dst & src != 0 */
+ /* TODO handle more then one copy */
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 0d4479f478aa4..222f525c3d045 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -938,7 +938,10 @@
+ #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
+
+ #define USB_VENDOR_ID_PLANTRONICS 0x047f
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES 0xc055
+ #define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
+
+ #define USB_VENDOR_ID_PANASONIC 0x04da
+ #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
+@@ -1140,7 +1143,9 @@
+ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
+ #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
++#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
++#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017 0x73f6
+ #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
+
+ #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 742c052b0110a..b8cce9c196d8c 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -18,10 +18,21 @@ static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
++ /* For Acer Aspire Switch 10 SW5-012 keyboard-dock */
+ if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
+- hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
++ hid_info(hdev, "Fixing up Acer Sw5-012 ITE keyboard report descriptor\n");
+ rdesc[163] = HID_MAIN_ITEM_RELATIVE;
+ }
++ /* For Acer One S1002/S1003 keyboard-dock */
++ if (*rsize == 188 && rdesc[185] == 0x81 && rdesc[186] == 0x02) {
++ hid_info(hdev, "Fixing up Acer S1002/S1003 ITE keyboard report descriptor\n");
++ rdesc[186] = HID_MAIN_ITEM_RELATIVE;
++ }
++ /* For Acer Aspire Switch 10E (SW3-016) keyboard-dock */
++ if (*rsize == 210 && rdesc[184] == 0x81 && rdesc[185] == 0x02) {
++ hid_info(hdev, "Fixing up Acer Aspire Switch 10E (SW3-016) ITE keyboard report descriptor\n");
++ rdesc[185] = HID_MAIN_ITEM_RELATIVE;
++ }
+ }
+
+ return rdesc;
+@@ -103,7 +114,18 @@ static const struct hid_device_id ite_devices[] = {
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+- USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002),
++ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_SYNAPTICS,
++ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003),
++ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
++ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++ USB_VENDOR_ID_SYNAPTICS,
++ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_017),
++ .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 463a8deae37ec..9db327654580c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1953,6 +1953,10 @@ static const struct hid_device_id mt_devices[] = {
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x313a) },
+
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_ELAN, 0x3148) },
++
+ /* Elitegroup panel */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
+index e81b7cec2d124..3d414ae194acb 100644
+--- a/drivers/hid/hid-plantronics.c
++++ b/drivers/hid/hid-plantronics.c
+@@ -198,9 +198,18 @@ err:
+ }
+
+ static const struct hid_device_id plantronics_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
+ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+ { }
+ };
+diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
+index fb827c295842f..825f011c79013 100644
+--- a/drivers/hid/hid-sensor-custom.c
++++ b/drivers/hid/hid-sensor-custom.c
+@@ -59,7 +59,7 @@ struct hid_sensor_sample {
+ u32 raw_len;
+ } __packed;
+
+-static struct attribute hid_custom_attrs[] = {
++static struct attribute hid_custom_attrs[HID_CUSTOM_TOTAL_ATTRS] = {
+ {.name = "name", .mode = S_IRUGO},
+ {.name = "units", .mode = S_IRUGO},
+ {.name = "unit-expo", .mode = S_IRUGO},
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index 4edb241957040..e4811d37ca775 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -172,6 +172,7 @@ static int uclogic_probe(struct hid_device *hdev,
+ * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
++ hdev->quirks |= HID_QUIRK_HIDINPUT_FORCE;
+
+ /* Allocate and assign driver data */
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 4dbf69078387f..b42785fdf7ed5 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -160,6 +160,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ {
+ struct wacom *wacom = hid_get_drvdata(hdev);
+
++ if (wacom->wacom_wac.features.type == BOOTLOADER)
++ return 0;
++
+ if (size > WACOM_PKGLEN_MAX)
+ return 1;
+
+@@ -2786,6 +2789,11 @@ static int wacom_probe(struct hid_device *hdev,
+ return error;
+ }
+
++ if (features->type == BOOTLOADER) {
++ hid_warn(hdev, "Using device in hidraw-only mode");
++ return hid_hw_start(hdev, HID_CONNECT_HIDRAW);
++ }
++
+ error = wacom_parse_and_register(wacom, false);
+ if (error)
+ return error;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index c3dff7501af28..aa477a43a3123 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -4782,6 +4782,9 @@ static const struct wacom_features wacom_features_0x3c8 =
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+
++static const struct wacom_features wacom_features_0x94 =
++ { "Wacom Bootloader", .type = BOOTLOADER };
++
+ #define USB_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
+@@ -4855,6 +4858,7 @@ const struct hid_device_id wacom_ids[] = {
+ { USB_DEVICE_WACOM(0x84) },
+ { USB_DEVICE_WACOM(0x90) },
+ { USB_DEVICE_WACOM(0x93) },
++ { USB_DEVICE_WACOM(0x94) },
+ { USB_DEVICE_WACOM(0x97) },
+ { USB_DEVICE_WACOM(0x9A) },
+ { USB_DEVICE_WACOM(0x9F) },
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 8dea7cb298e69..ca172efcf072f 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -242,6 +242,7 @@ enum {
+ MTTPC,
+ MTTPC_B,
+ HID_GENERIC,
++ BOOTLOADER,
+ MAX_TYPE
+ };
+
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index 5aa6955b609f2..329889bf42f9a 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -502,8 +502,10 @@ static int ssi_probe(struct platform_device *pd)
+ platform_set_drvdata(pd, ssi);
+
+ err = ssi_add_controller(ssi, pd);
+- if (err < 0)
++ if (err < 0) {
++ hsi_put_controller(ssi);
+ goto out1;
++ }
+
+ pm_runtime_enable(&pd->dev);
+
+@@ -536,9 +538,9 @@ out3:
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+ out2:
+ ssi_remove_controller(ssi);
++ pm_runtime_disable(&pd->dev);
+ out1:
+ platform_set_drvdata(pd, NULL);
+- pm_runtime_disable(&pd->dev);
+
+ return err;
+ }
+@@ -629,7 +631,13 @@ static int __init ssi_init(void) {
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&ssi_port_pdriver);
++ ret = platform_driver_register(&ssi_port_pdriver);
++ if (ret) {
++ platform_driver_unregister(&ssi_pdriver);
++ return ret;
++ }
++
++ return 0;
+ }
+ module_init(ssi_init);
+
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index 53325419ec13d..e69f792466065 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -506,6 +506,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* Block Write */
+ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: WRITE\n");
++ if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
++ return -EINVAL;
++
+ dma_size = data->block[0] + 1;
+ dma_direction = DMA_TO_DEVICE;
+ desc->wr_len_cmd = dma_size;
+diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
+index f614cade432bb..30e38bc8b6db8 100644
+--- a/drivers/i2c/busses/i2c-pxa-pci.c
++++ b/drivers/i2c/busses/i2c-pxa-pci.c
+@@ -105,7 +105,7 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ int i;
+ struct ce4100_devices *sds;
+
+- ret = pci_enable_device_mem(dev);
++ ret = pcim_enable_device(dev);
+ if (ret)
+ return ret;
+
+@@ -114,10 +114,8 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+ return -EINVAL;
+ }
+ sds = kzalloc(sizeof(*sds), GFP_KERNEL);
+- if (!sds) {
+- ret = -ENOMEM;
+- goto err_mem;
+- }
++ if (!sds)
++ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
+ sds->pdev[i] = add_i2c_device(dev, i);
+@@ -133,8 +131,6 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
+
+ err_dev_add:
+ kfree(sds);
+-err_mem:
+- pci_disable_device(dev);
+ return ret;
+ }
+
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index 2640b75fb774c..a2171c887561e 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -283,10 +283,10 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ unsigned int data_reg;
+ int ret = 0;
+
+- if (iio_buffer_enabled(indio_dev))
+- return -EBUSY;
++ ret = iio_device_claim_direct_mode(indio_dev);
++ if (ret)
++ return ret;
+
+- mutex_lock(&indio_dev->mlock);
+ ad_sigma_delta_set_channel(sigma_delta, chan->address);
+
+ spi_bus_lock(sigma_delta->spi->master);
+@@ -325,7 +325,7 @@ out:
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
+- mutex_unlock(&indio_dev->mlock);
++ iio_device_release_direct_mode(indio_dev);
+
+ if (ret)
+ return ret;
+diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
+index 4267d756cd50e..c1de80bd82f9f 100644
+--- a/drivers/iio/adc/ti-adc128s052.c
++++ b/drivers/iio/adc/ti-adc128s052.c
+@@ -194,13 +194,13 @@ static int adc128_remove(struct spi_device *spi)
+ }
+
+ static const struct of_device_id adc128_of_match[] = {
+- { .compatible = "ti,adc128s052", },
+- { .compatible = "ti,adc122s021", },
+- { .compatible = "ti,adc122s051", },
+- { .compatible = "ti,adc122s101", },
+- { .compatible = "ti,adc124s021", },
+- { .compatible = "ti,adc124s051", },
+- { .compatible = "ti,adc124s101", },
++ { .compatible = "ti,adc128s052", .data = (void*)0L, },
++ { .compatible = "ti,adc122s021", .data = (void*)1L, },
++ { .compatible = "ti,adc122s051", .data = (void*)1L, },
++ { .compatible = "ti,adc122s101", .data = (void*)1L, },
++ { .compatible = "ti,adc124s021", .data = (void*)2L, },
++ { .compatible = "ti,adc124s051", .data = (void*)2L, },
++ { .compatible = "ti,adc124s101", .data = (void*)2L, },
+ { /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, adc128_of_match);
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 372ca5347d3ce..a12ee8ef27a89 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -2796,8 +2796,8 @@ err:
+ static void __exit ib_core_cleanup(void)
+ {
+ roce_gid_mgmt_cleanup();
+- nldev_exit();
+ rdma_nl_unregister(RDMA_NL_LS);
++ nldev_exit();
+ unregister_pernet_device(&rdma_dev_net_ops);
+ unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
+ ib_sa_cleanup();
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 81b70f1f1290b..88c68d77e6b10 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -493,7 +493,7 @@ static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_cm_id *cm_id = &id_priv->id;
+
+ if (port && port != cm_id->port_num)
+- return 0;
++ return -EAGAIN;
+
+ if (cm_id->port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
+@@ -694,6 +694,8 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
+ int ret = 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
++ if (!table_attr)
++ return -EMSGSIZE;
+
+ rt = &counter->device->res[RDMA_RESTRACK_QP];
+ xa_lock(&rt->xa);
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index d413dafb9211d..39cbb853f9134 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1952,7 +1952,7 @@ static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs)
+ * Last bit is reserved for extending the attr_mask by
+ * using another field.
+ */
+- BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
++ BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1ULL << 31));
+
+ if (cmd.base.attr_mask &
+ ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index 1aeea5d65c015..832b878fa67eb 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -218,6 +218,8 @@ out:
+ for (node = 0; node < node_affinity.num_possible_nodes; node++)
+ hfi1_per_node_cntr[node] = 1;
+
++ pci_dev_put(dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
+index c090807124858..747ec08dec0d9 100644
+--- a/drivers/infiniband/hw/hfi1/firmware.c
++++ b/drivers/infiniband/hw/hfi1/firmware.c
+@@ -1786,6 +1786,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+
+ if (!dd->platform_config.data) {
+ dd_dev_err(dd, "%s: Missing config file\n", __func__);
++ ret = -EINVAL;
+ goto bail;
+ }
+ ptr = (u32 *)dd->platform_config.data;
+@@ -1794,6 +1795,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ ptr++;
+ if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
+ dd_dev_err(dd, "%s: Bad config file\n", __func__);
++ ret = -EINVAL;
+ goto bail;
+ }
+
+@@ -1817,6 +1819,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ if (file_length > dd->platform_config.size) {
+ dd_dev_info(dd, "%s:File claims to be larger than read size\n",
+ __func__);
++ ret = -EINVAL;
+ goto bail;
+ } else if (file_length < dd->platform_config.size) {
+ dd_dev_info(dd,
+@@ -1837,6 +1840,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail;
+ }
+
+@@ -1883,6 +1887,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table = ptr;
+@@ -1907,6 +1912,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
++ ret = -EINVAL;
+ goto bail; /* We don't trust this file now */
+ }
+ pcfgcache->config_tables[table_type].table_metadata =
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 634f29cb7395c..6edd30c92156e 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3890,6 +3890,40 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ return err;
+ }
+
++static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
++ int attr_mask, enum ib_qp_type qp_type)
++{
++ int log_max_ra_res;
++ int log_max_ra_req;
++
++ if (qp_type == MLX5_IB_QPT_DCI) {
++ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
++ log_max_ra_res_dc);
++ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
++ log_max_ra_req_dc);
++ } else {
++ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
++ log_max_ra_res_qp);
++ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
++ log_max_ra_req_qp);
++ }
++
++ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
++ attr->max_rd_atomic > log_max_ra_res) {
++ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
++ attr->max_rd_atomic);
++ return false;
++ }
++
++ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
++ attr->max_dest_rd_atomic > log_max_ra_req) {
++ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
++ attr->max_dest_rd_atomic);
++ return false;
++ }
++ return true;
++}
++
+ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+ {
+@@ -3986,21 +4020,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ }
+ }
+
+- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+- attr->max_rd_atomic >
+- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
+- mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+- attr->max_rd_atomic);
+- goto out;
+- }
+-
+- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+- attr->max_dest_rd_atomic >
+- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
+- mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+- attr->max_dest_rd_atomic);
++ if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
+ goto out;
+- }
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ err = 0;
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 89f6d54a43120..5dd9bcef5921a 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -842,12 +842,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ qp->resp.mr = NULL;
+ }
+
+- if (qp_type(qp) == IB_QPT_RC)
+- sk_dst_reset(qp->sk->sk);
+-
+ free_rd_atomic_resources(qp);
+
+ if (qp->sk) {
++ if (qp_type(qp) == IB_QPT_RC)
++ sk_dst_reset(qp->sk->sk);
++
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
+index d8db3bee9da7f..214714afacb7c 100644
+--- a/drivers/infiniband/sw/siw/siw_cq.c
++++ b/drivers/infiniband/sw/siw/siw_cq.c
+@@ -56,8 +56,6 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
+ memset(wc, 0, sizeof(*wc));
+ wc->wr_id = cqe->id;
+- wc->status = map_cqe_status[cqe->status].ib;
+- wc->opcode = map_wc_opcode[cqe->opcode];
+ wc->byte_len = cqe->bytes;
+
+ /*
+@@ -71,10 +69,32 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ }
+ wc->qp = cqe->base_qp;
++ wc->opcode = map_wc_opcode[cqe->opcode];
++ wc->status = map_cqe_status[cqe->status].ib;
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%pK\n",
+ cq->cq_get % cq->num_cqe, cqe->opcode,
+ cqe->flags, (void *)(uintptr_t)cqe->id);
++ } else {
++ /*
++ * A malicious user may set invalid opcode or
++ * status in the user mmapped CQE array.
++ * Sanity check and correct values in that case
++ * to avoid out-of-bounds access to global arrays
++ * for opcode and status mapping.
++ */
++ u8 opcode = cqe->opcode;
++ u16 status = cqe->status;
++
++ if (opcode >= SIW_NUM_OPCODES) {
++ opcode = 0;
++ status = SIW_WC_GENERAL_ERR;
++ } else if (status >= SIW_NUM_WC_STATUS) {
++ status = SIW_WC_GENERAL_ERR;
++ }
++ wc->opcode = map_wc_opcode[opcode];
++ wc->status = map_cqe_status[status].ib;
++
+ }
+ WRITE_ONCE(cqe->flags, 0);
+ cq->cq_get++;
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 5e6d96bd2eb12..2b5120a13e376 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+
+ if (paddr)
+- return virt_to_page((void *)paddr);
++ return virt_to_page((void *)(uintptr_t)paddr);
+
+ return NULL;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index b9ca54e372b47..c8c2014b79d22 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -694,13 +694,45 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
+ static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+ {
+- struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+- sqe.id = wr->wr_id;
+- sqe.opcode = wr->opcode;
+- rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
++ struct siw_sqe sqe = {};
++
++ switch (wr->opcode) {
++ case IB_WR_RDMA_WRITE:
++ sqe.opcode = SIW_OP_WRITE;
++ break;
++ case IB_WR_RDMA_READ:
++ sqe.opcode = SIW_OP_READ;
++ break;
++ case IB_WR_RDMA_READ_WITH_INV:
++ sqe.opcode = SIW_OP_READ_LOCAL_INV;
++ break;
++ case IB_WR_SEND:
++ sqe.opcode = SIW_OP_SEND;
++ break;
++ case IB_WR_SEND_WITH_IMM:
++ sqe.opcode = SIW_OP_SEND_WITH_IMM;
++ break;
++ case IB_WR_SEND_WITH_INV:
++ sqe.opcode = SIW_OP_SEND_REMOTE_INV;
++ break;
++ case IB_WR_LOCAL_INV:
++ sqe.opcode = SIW_OP_INVAL_STAG;
++ break;
++ case IB_WR_REG_MR:
++ sqe.opcode = SIW_OP_REG_MR;
++ break;
++ default:
++ rv = -EINVAL;
++ break;
++ }
++ if (!rv) {
++ sqe.id = wr->wr_id;
++ rv = siw_sqe_complete(qp, &sqe, 0,
++ SIW_WC_WR_FLUSH_ERR);
++ }
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+index 5b05cf3837da1..28e9b70844e44 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+@@ -42,6 +42,11 @@ static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
+ [IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
+ };
+
++static unsigned int ipoib_get_max_num_queues(void)
++{
++ return min_t(unsigned int, num_possible_cpus(), 128);
++}
++
+ static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ {
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+@@ -173,6 +178,8 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
+ .changelink = ipoib_changelink,
+ .get_size = ipoib_get_size,
+ .fill_info = ipoib_fill_info,
++ .get_num_rx_queues = ipoib_get_max_num_queues,
++ .get_num_tx_queues = ipoib_get_max_num_queues,
+ };
+
+ struct rtnl_link_ops *ipoib_get_link_ops(void)
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index a51e7c85f5819..4022816a4736f 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -1078,14 +1078,12 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ if (IS_ERR_OR_NULL(ts->reset_gpio))
+ return 0;
+
+- gpiod_set_value_cansleep(ts->reset_gpio, 1);
+-
+ error = regulator_enable(ts->vcc33);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to enable vcc33 regulator: %d\n",
+ error);
+- goto release_reset_gpio;
++ return error;
+ }
+
+ error = regulator_enable(ts->vccio);
+@@ -1094,7 +1092,7 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ "failed to enable vccio regulator: %d\n",
+ error);
+ regulator_disable(ts->vcc33);
+- goto release_reset_gpio;
++ return error;
+ }
+
+ /*
+@@ -1103,7 +1101,6 @@ static int elants_i2c_power_on(struct elants_data *ts)
+ */
+ udelay(ELAN_POWERON_DELAY_USEC);
+
+-release_reset_gpio:
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ if (error)
+ return error;
+@@ -1211,7 +1208,7 @@ static int elants_i2c_probe(struct i2c_client *client,
+ return error;
+ }
+
+- ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
++ ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 82d0083104182..533b920ed7df2 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -3051,6 +3051,13 @@ static int __init parse_ivrs_acpihid(char *str)
+ return 1;
+ }
+
++ /*
++ * Ignore leading zeroes after ':', so e.g., AMDI0095:00
++ * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match
++ */
++ while (*uid == '0' && *(uid + 1))
++ uid++;
++
+ i = early_acpihid_map_size++;
+ memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
+index 05f3d93cf480c..db391dd779c0f 100644
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -591,6 +591,7 @@ out_drop_state:
+ put_device_state(dev_state);
+
+ out:
++ pci_dev_put(pdev);
+ return ret;
+ }
+
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index cde281b97afa8..4dbecd14034a0 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -1122,7 +1122,7 @@ static int fsl_pamu_probe(struct platform_device *pdev)
+ ret = create_csd(ppaact_phys, mem_size, csd_port_id);
+ if (ret) {
+ dev_err(dev, "could not create coherence subdomain\n");
+- return ret;
++ goto error;
+ }
+ }
+
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index b5efd6dac9539..e31bd281e59d6 100644
+--- a/drivers/iommu/mtk_iommu_v1.c
++++ b/drivers/iommu/mtk_iommu_v1.c
+@@ -626,18 +626,34 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+- return ret;
++ goto out_clk_unprepare;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+- return ret;
++ goto out_sysfs_remove;
+
+- if (!iommu_present(&platform_bus_type))
+- bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
++ if (!iommu_present(&platform_bus_type)) {
++ ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
++ if (ret)
++ goto out_dev_unreg;
++ }
+
+- return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++ ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
++ if (ret)
++ goto out_bus_set_null;
++ return ret;
++
++out_bus_set_null:
++ bus_set_iommu(&platform_bus_type, NULL);
++out_dev_unreg:
++ iommu_device_unregister(&data->iommu);
++out_sysfs_remove:
++ iommu_device_sysfs_remove(&data->iommu);
++out_clk_unprepare:
++ clk_disable_unprepare(data->bclk);
++ return ret;
+ }
+
+ static int mtk_iommu_remove(struct platform_device *pdev)
+diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
+index 1337ceceb59b9..8be7d136c3bf8 100644
+--- a/drivers/irqchip/irq-gic-pm.c
++++ b/drivers/irqchip/irq-gic-pm.c
+@@ -104,7 +104,7 @@ static int gic_probe(struct platform_device *pdev)
+
+ pm_runtime_enable(dev);
+
+- ret = pm_runtime_get_sync(dev);
++ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
+index 86669ec8b977a..2c74064652334 100644
+--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
+@@ -3219,6 +3219,7 @@ static int
+ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ {
+ struct hfc_multi *hc = dch->hw;
++ struct sk_buff_head free_queue;
+ u_long flags;
+
+ switch (cmd) {
+@@ -3247,6 +3248,7 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ l1_event(dch->l1, HW_POWERUP_IND);
+ break;
+ case HW_DEACT_REQ:
++ __skb_queue_head_init(&free_queue);
+ /* start deactivation */
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->ctype == HFC_TYPE_E1) {
+@@ -3266,20 +3268,21 @@ hfcm_l1callback(struct dchannel *dch, u_int cmd)
+ plxsd_checksync(hc, 0);
+ }
+ }
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
+ del_timer(&dch->timer);
+ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ break;
+ case HW_POWERUP_REQ:
+ spin_lock_irqsave(&hc->lock, flags);
+@@ -3386,6 +3389,9 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ case PH_DEACTIVATE_REQ:
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ if (dch->dev.D.protocol != ISDN_P_TE_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ spin_lock_irqsave(&hc->lock, flags);
+ if (debug & DEBUG_HFCMULTI_MSG)
+ printk(KERN_DEBUG
+@@ -3407,14 +3413,14 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ /* deactivate */
+ dch->state = 1;
+ }
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -3426,6 +3432,7 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
+ #endif
+ ret = 0;
+ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ } else
+ ret = l1_event(dch->l1, hh->prim);
+ break;
+diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
+index a2b2ce1dfec81..41ff2e3dc8430 100644
+--- a/drivers/isdn/hardware/mISDN/hfcpci.c
++++ b/drivers/isdn/hardware/mISDN/hfcpci.c
+@@ -1617,16 +1617,19 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+ spin_lock_irqsave(&hc->lock, flags);
+ if (hc->hw.protocol == ISDN_P_NT_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ /* prepare deactivation */
+ Write_hfc(hc, HFCPCI_STATES, 0x40);
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+@@ -1639,10 +1642,12 @@ hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ hc->hw.mst_m &= ~HFCPCI_MASTER;
+ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
+ ret = 0;
++ spin_unlock_irqrestore(&hc->lock, flags);
++ __skb_queue_purge(&free_queue);
+ } else {
+ ret = l1_event(dch->l1, hh->prim);
++ spin_unlock_irqrestore(&hc->lock, flags);
+ }
+- spin_unlock_irqrestore(&hc->lock, flags);
+ break;
+ }
+ if (!ret)
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 1f89378b56231..111a597ef23c2 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -327,20 +327,24 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
+ test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
+
+ if (hw->protocol == ISDN_P_NT_S0) {
++ struct sk_buff_head free_queue;
++
++ __skb_queue_head_init(&free_queue);
+ hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT);
+ spin_lock_irqsave(&hw->lock, flags);
+- skb_queue_purge(&dch->squeue);
++ skb_queue_splice_init(&dch->squeue, &free_queue);
+ if (dch->tx_skb) {
+- dev_kfree_skb(dch->tx_skb);
++ __skb_queue_tail(&free_queue, dch->tx_skb);
+ dch->tx_skb = NULL;
+ }
+ dch->tx_idx = 0;
+ if (dch->rx_skb) {
+- dev_kfree_skb(dch->rx_skb);
++ __skb_queue_tail(&free_queue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ spin_unlock_irqrestore(&hw->lock, flags);
++ __skb_queue_purge(&free_queue);
+ #ifdef FIXME
+ if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
+ dchannel_sched_event(&hc->dch, D_CLEARBUSY);
+@@ -1331,7 +1335,7 @@ tx_iso_complete(struct urb *urb)
+ printk("\n");
+ }
+
+- dev_kfree_skb(tx_skb);
++ dev_consume_skb_irq(tx_skb);
+ tx_skb = NULL;
+ if (fifo->dch && get_next_dframe(fifo->dch))
+ tx_skb = fifo->dch->tx_skb;
+diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
+index eb3adfb7f88d3..172a8b18c5799 100644
+--- a/drivers/macintosh/macio-adb.c
++++ b/drivers/macintosh/macio-adb.c
+@@ -106,6 +106,10 @@ int macio_init(void)
+ return -ENXIO;
+ }
+ adb = ioremap(r.start, sizeof(struct adb_regs));
++ if (!adb) {
++ of_node_put(adbs);
++ return -ENOMEM;
++ }
+
+ out_8(&adb->ctrl.r, 0);
+ out_8(&adb->intr.r, 0);
+diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
+index 92d142d2b75f4..176bbd062617c 100644
+--- a/drivers/macintosh/macio_asic.c
++++ b/drivers/macintosh/macio_asic.c
+@@ -425,7 +425,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
+ if (of_device_register(&dev->ofdev) != 0) {
+ printk(KERN_DEBUG"macio: device registration error for %s!\n",
+ dev_name(&dev->ofdev.dev));
+- kfree(dev);
++ put_device(&dev->ofdev.dev);
+ return NULL;
+ }
+
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index f9cc674ba9b76..1d0b8abbafc30 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
++ put_device(&ipi_mbox->dev);
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+- device_unregister(&ipi_mbox->dev);
++ if (device_is_registered(&ipi_mbox->dev))
++ device_unregister(&ipi_mbox->dev);
+ }
+ }
+ }
+diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
+index c799bb81ab03d..2df3ab3b76e4e 100644
+--- a/drivers/mcb/mcb-core.c
++++ b/drivers/mcb/mcb-core.c
+@@ -71,8 +71,10 @@ static int mcb_probe(struct device *dev)
+
+ get_device(dev);
+ ret = mdrv->probe(mdev, found_id);
+- if (ret)
++ if (ret) {
+ module_put(carrier_mod);
++ put_device(dev);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 3b69e6aa3d88a..cfe5c95ce0cef 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -108,7 +108,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ return 0;
+
+ err:
+- mcb_free_dev(mdev);
++ put_device(&mdev->dev);
+
+ return ret;
+ }
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index af6d4f898e4c1..2ecd0db0f2945 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+ return r;
+ }
+
+-static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
++static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
++ bool destroy_bm)
+ {
+ dm_sm_destroy(cmd->metadata_sm);
+ dm_tm_destroy(cmd->tm);
+- dm_block_manager_destroy(cmd->bm);
++ if (destroy_bm)
++ dm_block_manager_destroy(cmd->bm);
+ }
+
+ typedef unsigned long (*flags_mutator)(unsigned long);
+@@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ cmd2 = lookup(bdev);
+ if (cmd2) {
+ mutex_unlock(&table_lock);
+- __destroy_persistent_data_objects(cmd);
++ __destroy_persistent_data_objects(cmd, true);
+ kfree(cmd);
+ return cmd2;
+ }
+@@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+ mutex_unlock(&table_lock);
+
+ if (!cmd->fail_io)
+- __destroy_persistent_data_objects(cmd);
++ __destroy_persistent_data_objects(cmd, true);
+ kfree(cmd);
+ }
+ }
+@@ -1808,14 +1810,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
+
+ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
+ {
+- int r;
++ int r = -EINVAL;
++ struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
++
++ /* fail_io is double-checked with cmd->root_lock held below */
++ if (unlikely(cmd->fail_io))
++ return r;
++
++ /*
++ * Replacement block manager (new_bm) is created and old_bm destroyed outside of
++ * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
++ * shrinker associated with the block manager's bufio client vs cmd root_lock).
++ * - must take shrinker_rwsem without holding cmd->root_lock
++ */
++ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
++ CACHE_MAX_CONCURRENT_LOCKS);
+
+ WRITE_LOCK(cmd);
+- __destroy_persistent_data_objects(cmd);
+- r = __create_persistent_data_objects(cmd, false);
++ if (cmd->fail_io) {
++ WRITE_UNLOCK(cmd);
++ goto out;
++ }
++
++ __destroy_persistent_data_objects(cmd, false);
++ old_bm = cmd->bm;
++ if (IS_ERR(new_bm)) {
++ DMERR("could not create block manager during abort");
++ cmd->bm = NULL;
++ r = PTR_ERR(new_bm);
++ goto out_unlock;
++ }
++
++ cmd->bm = new_bm;
++ r = __open_or_format_metadata(cmd, false);
++ if (r) {
++ cmd->bm = NULL;
++ goto out_unlock;
++ }
++ new_bm = NULL;
++out_unlock:
+ if (r)
+ cmd->fail_io = true;
+ WRITE_UNLOCK(cmd);
++ dm_block_manager_destroy(old_bm);
++out:
++ if (new_bm && !IS_ERR(new_bm))
++ dm_block_manager_destroy(new_bm);
+
+ return r;
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index f595e9867cbec..10b2a4e10a46b 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1011,16 +1011,16 @@ static void abort_transaction(struct cache *cache)
+ if (get_cache_mode(cache) >= CM_READ_ONLY)
+ return;
+
+- if (dm_cache_metadata_set_needs_check(cache->cmd)) {
+- DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+- set_cache_mode(cache, CM_FAIL);
+- }
+-
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_cache_metadata_abort(cache->cmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_cache_mode(cache, CM_FAIL);
+ }
++
++ if (dm_cache_metadata_set_needs_check(cache->cmd)) {
++ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
++ set_cache_mode(cache, CM_FAIL);
++ }
+ }
+
+ static void metadata_operation_failed(struct cache *cache, const char *op, int r)
+@@ -1992,6 +1992,7 @@ static void destroy(struct cache *cache)
+ if (cache->prison)
+ dm_bio_prison_destroy_v2(cache->prison);
+
++ cancel_delayed_work_sync(&cache->waker);
+ if (cache->wq)
+ destroy_workqueue(cache->wq);
+
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index eb7a5d3ba81a2..ce9d03f4e9813 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -1977,6 +1977,7 @@ static void clone_dtr(struct dm_target *ti)
+
+ mempool_exit(&clone->hydration_pool);
+ dm_kcopyd_client_destroy(clone->kcopyd_client);
++ cancel_delayed_work_sync(&clone->waker);
+ destroy_workqueue(clone->wq);
+ hash_table_exit(clone);
+ dm_clone_metadata_close(clone->cmd);
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 32f36810c3c94..3c9a2be97e55c 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4195,6 +4195,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
+ BUG_ON(!list_empty(&ic->wait_list));
+
++ if (ic->mode == 'B')
++ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ if (ic->metadata_wq)
+ destroy_workqueue(ic->metadata_wq);
+ if (ic->wait_wq)
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 784468f3cb747..92e646d597eaa 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -701,6 +701,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ goto bad_cleanup_data_sm;
+ }
+
++ /*
++ * For pool metadata opening process, root setting is redundant
++ * because it will be set again in __begin_transaction(). But dm
++ * pool aborting process really needs to get last transaction's
++ * root to avoid accessing broken btree.
++ */
++ pmd->root = le64_to_cpu(disk_super->data_mapping_root);
++ pmd->details_root = le64_to_cpu(disk_super->device_details_root);
++
+ __setup_btree_details(pmd);
+ dm_bm_unlock(sblock);
+
+@@ -753,13 +762,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
+ return r;
+ }
+
+-static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
++static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
++ bool destroy_bm)
+ {
+ dm_sm_destroy(pmd->data_sm);
+ dm_sm_destroy(pmd->metadata_sm);
+ dm_tm_destroy(pmd->nb_tm);
+ dm_tm_destroy(pmd->tm);
+- dm_block_manager_destroy(pmd->bm);
++ if (destroy_bm)
++ dm_block_manager_destroy(pmd->bm);
+ }
+
+ static int __begin_transaction(struct dm_pool_metadata *pmd)
+@@ -966,7 +977,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ }
+ pmd_write_unlock(pmd);
+ if (!pmd->fail_io)
+- __destroy_persistent_data_objects(pmd);
++ __destroy_persistent_data_objects(pmd, true);
+
+ kfree(pmd);
+ return 0;
+@@ -1875,19 +1886,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+ {
+ int r = -EINVAL;
++ struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
++
++ /* fail_io is double-checked with pmd->root_lock held below */
++ if (unlikely(pmd->fail_io))
++ return r;
++
++ /*
++ * Replacement block manager (new_bm) is created and old_bm destroyed outside of
++ * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
++ * shrinker associated with the block manager's bufio client vs pmd root_lock).
++ * - must take shrinker_rwsem without holding pmd->root_lock
++ */
++ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
++ THIN_MAX_CONCURRENT_LOCKS);
+
+ pmd_write_lock(pmd);
+- if (pmd->fail_io)
++ if (pmd->fail_io) {
++ pmd_write_unlock(pmd);
+ goto out;
++ }
+
+ __set_abort_with_changes_flags(pmd);
+- __destroy_persistent_data_objects(pmd);
+- r = __create_persistent_data_objects(pmd, false);
++ __destroy_persistent_data_objects(pmd, false);
++ old_bm = pmd->bm;
++ if (IS_ERR(new_bm)) {
++ DMERR("could not create block manager during abort");
++ pmd->bm = NULL;
++ r = PTR_ERR(new_bm);
++ goto out_unlock;
++ }
++
++ pmd->bm = new_bm;
++ r = __open_or_format_metadata(pmd, false);
++ if (r) {
++ pmd->bm = NULL;
++ goto out_unlock;
++ }
++ new_bm = NULL;
++out_unlock:
+ if (r)
+ pmd->fail_io = true;
+-
+-out:
+ pmd_write_unlock(pmd);
++ dm_block_manager_destroy(old_bm);
++out:
++ if (new_bm && !IS_ERR(new_bm))
++ dm_block_manager_destroy(new_bm);
+
+ return r;
+ }
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index bf0b67e72254c..4f161725dda0a 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2931,6 +2931,8 @@ static void __pool_destroy(struct pool *pool)
+ dm_bio_prison_destroy(pool->prison);
+ dm_kcopyd_client_destroy(pool->copier);
+
++ cancel_delayed_work_sync(&pool->waker);
++ cancel_delayed_work_sync(&pool->no_space_timeout);
+ if (pool->wq)
+ destroy_workqueue(pool->wq);
+
+@@ -3591,23 +3593,31 @@ static int pool_preresume(struct dm_target *ti)
+ */
+ r = bind_control_target(pool, ti);
+ if (r)
+- return r;
++ goto out;
+
+ dm_pool_register_pre_commit_callback(pool->pmd,
+ metadata_pre_commit_callback, pt);
+
+ r = maybe_resize_data_dev(ti, &need_commit1);
+ if (r)
+- return r;
++ goto out;
+
+ r = maybe_resize_metadata_dev(ti, &need_commit2);
+ if (r)
+- return r;
++ goto out;
+
+ if (need_commit1 || need_commit2)
+ (void) commit(pool);
++out:
++ /*
++ * When a thin-pool is PM_FAIL, it cannot be rebuilt if
++ * bio is in deferred list. Therefore need to return 0
++ * to allow pool_resume() to flush IO.
++ */
++ if (r && get_pool_mode(pool) == PM_FAIL)
++ r = 0;
+
+- return 0;
++ return r;
+ }
+
+ static void pool_suspend_active_thins(struct pool *pool)
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index a95e20c3d0d4f..0545cdccf6369 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -489,7 +489,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
+ sb = kmap_atomic(bitmap->storage.sb_page);
+ pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+ pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
+- pr_debug(" version: %d\n", le32_to_cpu(sb->version));
++ pr_debug(" version: %u\n", le32_to_cpu(sb->version));
+ pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
+ le32_to_cpu(*(__le32 *)(sb->uuid+0)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+4)),
+@@ -500,11 +500,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
+ pr_debug("events cleared: %llu\n",
+ (unsigned long long) le64_to_cpu(sb->events_cleared));
+ pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
+- pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+- pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
++ pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
++ pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
+ pr_debug(" sync size: %llu KB\n",
+ (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+- pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
++ pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
+ kunmap_atomic(sb);
+ }
+
+@@ -2110,7 +2110,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ bytes = DIV_ROUND_UP(chunks, 8);
+ if (!bitmap->mddev->bitmap_info.external)
+ bytes += sizeof(bitmap_super_t);
+- } while (bytes > (space << 9));
++ } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
++ (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
+ } else
+ chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
+
+@@ -2155,7 +2156,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ bitmap->counts.missing_pages = pages;
+ bitmap->counts.chunkshift = chunkshift;
+ bitmap->counts.chunks = chunks;
+- bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
++ bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
+ BITMAP_BLOCK_SHIFT);
+
+ blocks = min(old_counts.chunks << old_counts.chunkshift,
+@@ -2181,8 +2182,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ bitmap->counts.missing_pages = old_counts.pages;
+ bitmap->counts.chunkshift = old_counts.chunkshift;
+ bitmap->counts.chunks = old_counts.chunks;
+- bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
+- BITMAP_BLOCK_SHIFT);
++ bitmap->mddev->bitmap_info.chunksize =
++ 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
+ blocks = old_counts.chunks << old_counts.chunkshift;
+ pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ break;
+@@ -2200,20 +2201,23 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+
+ if (set) {
+ bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
+- if (*bmc_new == 0) {
+- /* need to set on-disk bits too. */
+- sector_t end = block + new_blocks;
+- sector_t start = block >> chunkshift;
+- start <<= chunkshift;
+- while (start < end) {
+- md_bitmap_file_set_bit(bitmap, block);
+- start += 1 << chunkshift;
++ if (bmc_new) {
++ if (*bmc_new == 0) {
++ /* need to set on-disk bits too. */
++ sector_t end = block + new_blocks;
++ sector_t start = block >> chunkshift;
++
++ start <<= chunkshift;
++ while (start < end) {
++ md_bitmap_file_set_bit(bitmap, block);
++ start += 1 << chunkshift;
++ }
++ *bmc_new = 2;
++ md_bitmap_count_page(&bitmap->counts, block, 1);
++ md_bitmap_set_pending(&bitmap->counts, block);
+ }
+- *bmc_new = 2;
+- md_bitmap_count_page(&bitmap->counts, block, 1);
+- md_bitmap_set_pending(&bitmap->counts, block);
++ *bmc_new |= NEEDED_MASK;
+ }
+- *bmc_new |= NEEDED_MASK;
+ if (new_blocks < old_blocks)
+ old_blocks = new_blocks;
+ }
+@@ -2520,6 +2524,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
+ if (csize < 512 ||
+ !is_power_of_2(csize))
+ return -EINVAL;
++ if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
++ sizeof(((bitmap_super_t *)0)->chunksize))))
++ return -EOVERFLOW;
+ mddev->bitmap_info.chunksize = csize;
+ return len;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 5226a23c72dba..aa2993d5d5d38 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -481,13 +481,14 @@ static void md_end_flush(struct bio *bio)
+ struct md_rdev *rdev = bio->bi_private;
+ struct mddev *mddev = rdev->mddev;
+
++ bio_put(bio);
++
+ rdev_dec_pending(rdev, mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
+ }
+- bio_put(bio);
+ }
+
+ static void md_submit_flush_data(struct work_struct *ws);
+@@ -885,10 +886,12 @@ static void super_written(struct bio *bio)
+ } else
+ clear_bit(LastDev, &rdev->flags);
+
++ bio_put(bio);
++
++ rdev_dec_pending(rdev, mddev);
++
+ if (atomic_dec_and_test(&mddev->pending_writes))
+ wake_up(&mddev->sb_wait);
+- rdev_dec_pending(rdev, mddev);
+- bio_put(bio);
+ }
+
+ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index e87184645c540..1919de4c8c12d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -3132,6 +3132,7 @@ static int raid1_run(struct mddev *mddev)
+ * RAID1 needs at least one disk in active
+ */
+ if (conf->raid_disks - mddev->degraded < 1) {
++ md_unregister_thread(&conf->thread);
+ ret = -EINVAL;
+ goto abort;
+ }
+diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
+index e58cb8434dafe..12b7f698f5623 100644
+--- a/drivers/media/dvb-core/dmxdev.c
++++ b/drivers/media/dvb-core/dmxdev.c
+@@ -800,6 +800,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
+ if (mutex_lock_interruptible(&dmxdev->mutex))
+ return -ERESTARTSYS;
+
++ if (dmxdev->exit) {
++ mutex_unlock(&dmxdev->mutex);
++ return -ENODEV;
++ }
++
+ for (i = 0; i < dmxdev->filternum; i++)
+ if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
+ break;
+@@ -1458,7 +1463,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init);
+
+ void dvb_dmxdev_release(struct dmxdev *dmxdev)
+ {
++ mutex_lock(&dmxdev->mutex);
+ dmxdev->exit = 1;
++ mutex_unlock(&dmxdev->mutex);
++
+ if (dmxdev->dvbdev->users > 1) {
+ wait_event(dmxdev->dvbdev->wait_queue,
+ dmxdev->dvbdev->users == 1);
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
+index cfc27629444f3..fd476536d32ed 100644
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c
+@@ -157,7 +157,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
+ {
+ unsigned int i;
+
+- dvb_free_device(ca->dvbdev);
++ dvb_device_put(ca->dvbdev);
+ for (i = 0; i < ca->slot_count; i++)
+ vfree(ca->slot_info[i].rx_buffer.data);
+
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 06ea30a689d75..b04638321b75b 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -135,7 +135,7 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ if (fepriv)
+- dvb_free_device(fepriv->dvbdev);
++ dvb_device_put(fepriv->dvbdev);
+
+ dvb_frontend_invoke_release(fe, fe->ops.release);
+
+@@ -2961,6 +2961,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ .name = fe->ops.info.name,
+ #endif
+ };
++ int ret;
+
+ dev_dbg(dvb->device, "%s:\n", __func__);
+
+@@ -2994,8 +2995,13 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
+ "DVB: registering adapter %i frontend %i (%s)...\n",
+ fe->dvb->num, fe->id, fe->ops.info.name);
+
+- dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
++ ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template,
+ fe, DVB_DEVICE_FRONTEND, 0);
++ if (ret) {
++ dvb_frontend_put(fe);
++ mutex_unlock(&frontend_mutex);
++ return ret;
++ }
+
+ /*
+ * Initialize the cache to the proper values according with the
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index 197cf17b246f7..454301149a8f5 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -107,7 +107,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
+ new_fops = fops_get(dvbdev->fops);
+ if (!new_fops)
+ goto fail;
+- file->private_data = dvbdev;
++ file->private_data = dvb_device_get(dvbdev);
+ replace_fops(file, new_fops);
+ if (file->f_op->open)
+ err = file->f_op->open(inode, file);
+@@ -171,6 +171,9 @@ int dvb_generic_release(struct inode *inode, struct file *file)
+ }
+
+ dvbdev->users++;
++
++ dvb_device_put(dvbdev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(dvb_generic_release);
+@@ -342,6 +345,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
+ GFP_KERNEL);
+ if (!dvbdev->pads) {
+ kfree(dvbdev->entity);
++ dvbdev->entity = NULL;
+ return -ENOMEM;
+ }
+ }
+@@ -488,6 +492,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ }
+
+ memcpy(dvbdev, template, sizeof(struct dvb_device));
++ kref_init(&dvbdev->ref);
+ dvbdev->type = type;
+ dvbdev->id = id;
+ dvbdev->adapter = adap;
+@@ -517,7 +522,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ #endif
+
+ dvbdev->minor = minor;
+- dvb_minors[minor] = dvbdev;
++ dvb_minors[minor] = dvb_device_get(dvbdev);
+ up_write(&minor_rwsem);
+
+ ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
+@@ -557,6 +562,7 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+
+ down_write(&minor_rwsem);
+ dvb_minors[dvbdev->minor] = NULL;
++ dvb_device_put(dvbdev);
+ up_write(&minor_rwsem);
+
+ dvb_media_device_free(dvbdev);
+@@ -568,21 +574,34 @@ void dvb_remove_device(struct dvb_device *dvbdev)
+ EXPORT_SYMBOL(dvb_remove_device);
+
+
+-void dvb_free_device(struct dvb_device *dvbdev)
++static void dvb_free_device(struct kref *ref)
+ {
+- if (!dvbdev)
+- return;
++ struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
+
+ kfree (dvbdev->fops);
+ kfree (dvbdev);
+ }
+-EXPORT_SYMBOL(dvb_free_device);
++
++
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev)
++{
++ kref_get(&dvbdev->ref);
++ return dvbdev;
++}
++EXPORT_SYMBOL(dvb_device_get);
++
++
++void dvb_device_put(struct dvb_device *dvbdev)
++{
++ if (dvbdev)
++ kref_put(&dvbdev->ref, dvb_free_device);
++}
+
+
+ void dvb_unregister_device(struct dvb_device *dvbdev)
+ {
+ dvb_remove_device(dvbdev);
+- dvb_free_device(dvbdev);
++ dvb_device_put(dvbdev);
+ }
+ EXPORT_SYMBOL(dvb_unregister_device);
+
+diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
+index e92542b92d349..6457b0912d14d 100644
+--- a/drivers/media/dvb-frontends/bcm3510.c
++++ b/drivers/media/dvb-frontends/bcm3510.c
+@@ -649,6 +649,7 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
+ deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
+ if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
+ err("firmware download failed: %d\n",ret);
++ release_firmware(fw);
+ return ret;
+ }
+ i += 4 + len;
+diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
+index 3d54a0ec86afd..3ae1f3a2f1420 100644
+--- a/drivers/media/dvb-frontends/stv0288.c
++++ b/drivers/media/dvb-frontends/stv0288.c
+@@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
+ struct stv0288_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+- char tm;
+- unsigned char tda[3];
+- u8 reg, time_out = 0;
++ u8 tda[3], reg, time_out = 0;
++ s8 tm;
+
+ dprintk("%s : FE_SET_FRONTEND\n", __func__);
+
+diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
+index 7a49651f4d1f2..d7d85edeedd5e 100644
+--- a/drivers/media/i2c/ad5820.c
++++ b/drivers/media/i2c/ad5820.c
+@@ -314,18 +314,18 @@ static int ad5820_probe(struct i2c_client *client,
+
+ ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+ if (ret < 0)
+- goto cleanup2;
++ goto clean_mutex;
+
+ ret = v4l2_async_register_subdev(&coil->subdev);
+ if (ret < 0)
+- goto cleanup;
++ goto clean_entity;
+
+ return ret;
+
+-cleanup2:
+- mutex_destroy(&coil->power_lock);
+-cleanup:
++clean_entity:
+ media_entity_cleanup(&coil->subdev.entity);
++clean_mutex:
++ mutex_destroy(&coil->power_lock);
+ return ret;
+ }
+
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
+index 9ae04e18e6c68..59b039b953bbc 100644
+--- a/drivers/media/pci/saa7164/saa7164-core.c
++++ b/drivers/media/pci/saa7164/saa7164-core.c
+@@ -1227,7 +1227,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
+
+ if (saa7164_dev_setup(dev) < 0) {
+ err = -EINVAL;
+- goto fail_free;
++ goto fail_dev;
+ }
+
+ /* print pci info */
+@@ -1395,6 +1395,8 @@ fail_fw:
+
+ fail_irq:
+ saa7164_dev_unregister(dev);
++fail_dev:
++ pci_disable_device(pci_dev);
+ fail_free:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
+index 6e1ba4846ea47..c52ee141b8cc4 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-core.c
++++ b/drivers/media/pci/solo6x10/solo6x10-core.c
+@@ -420,6 +420,7 @@ static int solo_sysfs_init(struct solo_dev *solo_dev)
+ solo_dev->nr_chans);
+
+ if (device_register(dev)) {
++ put_device(dev);
+ dev->parent = NULL;
+ return -ENOMEM;
+ }
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index e6b68be09f8f0..73023d34d9202 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -852,7 +852,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+ /* Only H.264BP and H.263P3 are considered */
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
+- if (!iram_info->buf_dbk_c_use)
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+@@ -876,7 +876,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
+
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
+- if (!iram_info->buf_dbk_c_use)
++ if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+@@ -1082,10 +1082,16 @@ static int coda_start_encoding(struct coda_ctx *ctx)
+ }
+
+ if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
+- if (!ctx->params.jpeg_qmat_tab[0])
++ if (!ctx->params.jpeg_qmat_tab[0]) {
+ ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
+- if (!ctx->params.jpeg_qmat_tab[1])
++ if (!ctx->params.jpeg_qmat_tab[0])
++ return -ENOMEM;
++ }
++ if (!ctx->params.jpeg_qmat_tab[1]) {
+ ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
++ if (!ctx->params.jpeg_qmat_tab[1])
++ return -ENOMEM;
++ }
+ coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
+ }
+
+diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
+index cde60fbb23a88..5b06c83f5c99d 100644
+--- a/drivers/media/platform/exynos4-is/fimc-core.c
++++ b/drivers/media/platform/exynos4-is/fimc-core.c
+@@ -1231,7 +1231,7 @@ int __init fimc_register_driver(void)
+ return platform_driver_register(&fimc_driver);
+ }
+
+-void __exit fimc_unregister_driver(void)
++void fimc_unregister_driver(void)
+ {
+ platform_driver_unregister(&fimc_driver);
+ }
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index a07d796f63df0..707feb35a9507 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -1581,7 +1581,11 @@ static int __init fimc_md_init(void)
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&fimc_md_driver);
++ ret = platform_driver_register(&fimc_md_driver);
++ if (ret)
++ fimc_unregister_driver();
++
++ return ret;
+ }
+
+ static void __exit fimc_md_exit(void)
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index 4c2675b437181..6a5ec133a9572 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -438,7 +438,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+
+ ret = media_pipeline_start(&vdev->entity, &video->pipe);
+ if (ret < 0)
+- return ret;
++ goto flush_buffers;
+
+ ret = video_check_format(video);
+ if (ret < 0)
+@@ -467,6 +467,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+ error:
+ media_pipeline_stop(&vdev->entity);
+
++flush_buffers:
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 9faecd049002f..d3fd3375ce197 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -1580,8 +1580,18 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
+ .port_num = MFC_NUM_PORTS_V7,
+ .buf_size = &buf_size_v7,
+ .fw_name[0] = "s5p-mfc-v7.fw",
+- .clk_names = {"mfc", "sclk_mfc"},
+- .num_clocks = 2,
++ .clk_names = {"mfc"},
++ .num_clocks = 1,
++};
++
++static struct s5p_mfc_variant mfc_drvdata_v7_3250 = {
++ .version = MFC_VERSION_V7,
++ .version_bit = MFC_V7_BIT,
++ .port_num = MFC_NUM_PORTS_V7,
++ .buf_size = &buf_size_v7,
++ .fw_name[0] = "s5p-mfc-v7.fw",
++ .clk_names = {"mfc", "sclk_mfc"},
++ .num_clocks = 2,
+ };
+
+ static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+@@ -1651,6 +1661,9 @@ static const struct of_device_id exynos_mfc_match[] = {
+ }, {
+ .compatible = "samsung,mfc-v7",
+ .data = &mfc_drvdata_v7,
++ }, {
++ .compatible = "samsung,exynos3250-mfc",
++ .data = &mfc_drvdata_v7_3250,
+ }, {
+ .compatible = "samsung,mfc-v8",
+ .data = &mfc_drvdata_v8,
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+index da138c314963a..58822ec5370e2 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+@@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
+ s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ /* Wait until instance is returned or timeout occurred */
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+- S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0))
++ S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){
++ clear_work_bit_irqsave(ctx);
+ mfc_err("Err returning instance\n");
++ }
+
+ /* Free resources */
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+index 912fe0c5ab184..6ed3df5ae5bb5 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+@@ -1212,6 +1212,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ unsigned long mb_y_addr, mb_c_addr;
+ int slice_type;
+ unsigned int strm_size;
++ bool src_ready;
+
+ slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
+ strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
+@@ -1251,7 +1252,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ }
+ }
+ }
+- if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
++ if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING ||
++ ctx->state == MFCINST_FINISHING)) {
+ mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ if (mb_entry->flags & MFC_BUF_FLAG_USED) {
+@@ -1282,7 +1284,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+ vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size);
+ vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+- if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0))
++
++ src_ready = true;
++ if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0)
++ src_ready = false;
++ if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
++ src_ready = false;
++ if (!src_ready || ctx->dst_queue_cnt == 0)
+ clear_work_bit(ctx);
+
+ return 0;
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+index a1453053e31ab..ef8169f6c428c 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+@@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ }
+
+ /* aspect ratio VUI */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 5);
+ reg |= ((p_h264->vui_sar & 0x1) << 5);
+ writel(reg, mfc_regs->e_h264_options);
+@@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+
+ /* intra picture period for H.264 open GOP */
+ /* control */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 4);
+ reg |= ((p_h264->open_gop & 0x1) << 4);
+ writel(reg, mfc_regs->e_h264_options);
+@@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ }
+
+ /* 'WEIGHTED_BI_PREDICTION' for B is disable */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x3 << 9);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 14);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* ASO */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 6);
+ reg |= ((p_h264->aso & 0x1) << 6);
+ writel(reg, mfc_regs->e_h264_options);
+
+ /* hier qp enable */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 8);
+ reg |= ((p_h264->open_gop & 0x1) << 8);
+ writel(reg, mfc_regs->e_h264_options);
+@@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+ writel(reg, mfc_regs->e_h264_num_t_layer);
+
+ /* frame packing SEI generation */
+- readl(mfc_regs->e_h264_options);
++ reg = readl(mfc_regs->e_h264_options);
+ reg &= ~(0x1 << 25);
+ reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
+ writel(reg, mfc_regs->e_h264_options);
+diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+index 5baada4f65e5d..69070b7068318 100644
+--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
++++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+@@ -939,6 +939,7 @@ static int configure_channels(struct c8sectpfei *fei)
+ if (ret) {
+ dev_err(fei->dev,
+ "configure_memdma_and_inputblock failed\n");
++ of_node_put(child);
+ goto err_unmap;
+ }
+ index++;
+diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
+index 208807d3733f7..842ebfe9b117a 100644
+--- a/drivers/media/platform/vivid/vivid-vid-cap.c
++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
+@@ -935,6 +935,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
+ if (dev->has_compose_cap) {
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
++ v4l2_rect_map_inside(compose, &fmt);
+ }
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
+index 3f8634a465730..1365ae732b799 100644
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c
+@@ -733,8 +733,10 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+
+ /* start radio */
+ retval = si470x_start_usb(radio);
+- if (retval < 0)
++ if (retval < 0 && !radio->int_in_running)
+ goto err_buf;
++ else if (retval < 0) /* in case of radio->int_in_running == 1 */
++ goto err_all;
+
+ /* set initial frequency */
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index c683a244b9fa8..d8401ef9b0a79 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -604,15 +604,14 @@ static int send_packet(struct imon_context *ictx)
+ pr_err_ratelimited("error submitting urb(%d)\n", retval);
+ } else {
+ /* Wait for transmission to complete (or abort) */
+- mutex_unlock(&ictx->lock);
+ retval = wait_for_completion_interruptible(
+ &ictx->tx.finished);
+ if (retval) {
+ usb_kill_urb(ictx->tx_urb);
+ pr_err_ratelimited("task interrupted\n");
+ }
+- mutex_lock(&ictx->lock);
+
++ ictx->tx.busy = false;
+ retval = ictx->tx.status;
+ if (retval)
+ pr_err_ratelimited("packet tx failed (%d)\n", retval);
+@@ -919,7 +918,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
+ return -ENODEV;
+ }
+
+- mutex_lock(&ictx->lock);
++ if (mutex_lock_interruptible(&ictx->lock))
++ return -ERESTARTSYS;
+
+ if (!ictx->dev_present_intf0) {
+ pr_err_ratelimited("no iMON device present\n");
+diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
+index 5aa9c501ed9c9..ffc0db67d4d68 100644
+--- a/drivers/media/usb/dvb-usb/az6027.c
++++ b/drivers/media/usb/dvb-usb/az6027.c
+@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
+ if (msg[i].addr == 0x99) {
+ req = 0xBE;
+ index = 0;
++ if (msg[i].len < 1) {
++ i = -EOPNOTSUPP;
++ break;
++ }
+ value = msg[i].buf[0] & 0x00ff;
+ length = 1;
+ az6027_usb_out_op(d, req, value, index, data, length);
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index e7720ff11d3d9..cb5bf119df9f1 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -81,7 +81,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
+
+ ret = dvb_usb_adapter_stream_init(adap);
+ if (ret)
+- return ret;
++ goto stream_init_err;
+
+ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
+ if (ret)
+@@ -114,6 +114,8 @@ frontend_init_err:
+ dvb_usb_adapter_dvb_exit(adap);
+ dvb_init_err:
+ dvb_usb_adapter_stream_exit(adap);
++stream_init_err:
++ kfree(adap->priv);
+ return ret;
+ }
+
+diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
+index aeb2f497c6831..6a6cd046cefb6 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
+@@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
+
+ static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+- unsigned long size, gfp_t flags)
++ unsigned long size)
+ {
+ mem->size = size;
+- mem->vaddr = dma_alloc_coherent(dev, mem->size,
+- &mem->dma_handle, flags);
+-
++ mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
++ GFP_KERNEL);
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
+ return videobuf_dma_contig_user_get(mem, vb);
+
+ /* allocate memory for the read() method */
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+- GFP_KERNEL))
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
+ return -ENOMEM;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+- GFP_KERNEL | __GFP_COMP))
++ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
+ goto error;
+
+- /* Try to remap memory */
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+ /* the "vm_pgoff" is just used in v4l2 to find the
+ * corresponding buffer data structure which is allocated
+ * earlier and it does not mean the offset from the physical
+ * buffer start address as usual. So set it to 0 to pass
+- * the sanity check in vm_iomap_memory().
++ * the sanity check in dma_mmap_coherent().
+ */
+ vma->vm_pgoff = 0;
+-
+- retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
++ retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
++ mem->size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
+index 186308f1f8eba..6334376826a92 100644
+--- a/drivers/misc/cxl/guest.c
++++ b/drivers/misc/cxl/guest.c
+@@ -959,10 +959,10 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /*
+ * pHyp doesn't expose the programming models supported by the
+@@ -978,7 +978,7 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+ afu->modes_supported = CXL_MODE_DIRECTED;
+
+ if ((rc = cxl_afu_select_best_mode(afu)))
+- goto err_put2;
++ goto err_remove_sysfs;
+
+ adapter->afu[afu->slice] = afu;
+
+@@ -998,10 +998,12 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
+
+ return 0;
+
+-err_put2:
++err_remove_sysfs:
+ cxl_sysfs_afu_remove(afu);
+-err_put1:
+- device_unregister(&afu->dev);
++err_del_dev:
++ device_del(&afu->dev);
++err_put_dev:
++ put_device(&afu->dev);
+ free = false;
+ guest_release_serr_irq(afu);
+ err2:
+@@ -1135,18 +1137,20 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
+ return adapter;
+
+-err_put1:
+- device_unregister(&adapter->dev);
++err_del_dev:
++ device_del(&adapter->dev);
++err_put_dev:
++ put_device(&adapter->dev);
+ free = false;
+ cxl_guest_remove_chardev(adapter);
+ err1:
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 2ba899f5659ff..d183836d80e3f 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -387,6 +387,7 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ rc = get_phb_index(np, phb_index);
+ if (rc) {
+ pr_err("cxl: invalid phb index\n");
++ of_node_put(np);
+ return rc;
+ }
+
+@@ -1164,10 +1165,10 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+- goto err_put1;
++ goto err_del_dev;
+
+ adapter->afu[afu->slice] = afu;
+
+@@ -1176,10 +1177,12 @@ static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+
+ return 0;
+
+-err_put1:
++err_del_dev:
++ device_del(&afu->dev);
++err_put_dev:
+ pci_deconfigure_afu(afu);
+ cxl_debugfs_afu_remove(afu);
+- device_unregister(&afu->dev);
++ put_device(&afu->dev);
+ return rc;
+
+ err_free_native:
+@@ -1667,23 +1670,25 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+- goto err_put1;
++ goto err_put_dev;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+- goto err_put1;
++ goto err_del_dev;
+
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
+ return adapter;
+
+-err_put1:
++err_del_dev:
++ device_del(&adapter->dev);
++err_put_dev:
+ /* This should mirror cxl_remove_adapter, except without the
+ * sysfs parts
+ */
+ cxl_debugfs_adapter_remove(adapter);
+ cxl_deconfigure_adapter(adapter);
+- device_unregister(&adapter->dev);
++ put_device(&adapter->dev);
+ return ERR_PTR(rc);
+
+ err_release:
+diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
+index e094809b54ff5..524ded87964d1 100644
+--- a/drivers/misc/ocxl/file.c
++++ b/drivers/misc/ocxl/file.c
+@@ -543,8 +543,11 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
+ goto err_put;
+
+ rc = device_register(&info->dev);
+- if (rc)
+- goto err_put;
++ if (rc) {
++ free_minor(info);
++ put_device(&info->dev);
++ return rc;
++ }
+
+ rc = ocxl_sysfs_register_afu(info);
+ if (rc)
+diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
+index 4b713a80b5726..7f26a78bb4032 100644
+--- a/drivers/misc/sgi-gru/grufault.c
++++ b/drivers/misc/sgi-gru/grufault.c
+@@ -648,6 +648,7 @@ int gru_handle_user_call_os(unsigned long cb)
+ if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
+ return -EINVAL;
+
++again:
+ gts = gru_find_lock_gts(cb);
+ if (!gts)
+ return -EINVAL;
+@@ -656,7 +657,11 @@ int gru_handle_user_call_os(unsigned long cb)
+ if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
+ goto exit;
+
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ gru_unlock_gts(gts);
++ gru_unload_context(gts, 1);
++ goto again;
++ }
+
+ /*
+ * CCH may contain stale data if ts_force_cch_reload is set.
+@@ -874,7 +879,11 @@ int gru_set_context_option(unsigned long arg)
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ gru_unlock_gts(gts);
++ gru_unload_context(gts, 1);
++ return ret;
++ }
+ }
+ break;
+ case sco_gseg_owner:
+diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
+index 40ac59dd018c9..e2325e3d077ea 100644
+--- a/drivers/misc/sgi-gru/grumain.c
++++ b/drivers/misc/sgi-gru/grumain.c
+@@ -716,9 +716,10 @@ static int gru_check_chiplet_assignment(struct gru_state *gru,
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+-void gru_check_context_placement(struct gru_thread_state *gts)
++int gru_check_context_placement(struct gru_thread_state *gts)
+ {
+ struct gru_state *gru;
++ int ret = 0;
+
+ /*
+ * If the current task is the context owner, verify that the
+@@ -726,15 +727,23 @@ void gru_check_context_placement(struct gru_thread_state *gts)
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
++ /*
++ * If gru or gts->ts_tgid_owner isn't initialized properly, return
++ * success to indicate that the caller does not need to unload the
++ * gru context.The caller is responsible for their inspection and
++ * reinitialization if needed.
++ */
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+- return;
++ return ret;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+- gru_unload_context(gts, 1);
++ ret = -EINVAL;
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
++
++ return ret;
+ }
+
+
+@@ -934,7 +943,12 @@ again:
+ mutex_lock(&gts->ts_ctxlock);
+ preempt_disable();
+
+- gru_check_context_placement(gts);
++ if (gru_check_context_placement(gts)) {
++ preempt_enable();
++ mutex_unlock(&gts->ts_ctxlock);
++ gru_unload_context(gts, 1);
++ return VM_FAULT_NOPAGE;
++ }
+
+ if (!gts->ts_gru) {
+ STAT(load_user_context);
+diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
+index a7e44b2eb413f..6cebec4dd316b 100644
+--- a/drivers/misc/sgi-gru/grutables.h
++++ b/drivers/misc/sgi-gru/grutables.h
+@@ -637,7 +637,7 @@ extern int gru_user_flush_tlb(unsigned long arg);
+ extern int gru_user_unload_context(unsigned long arg);
+ extern int gru_get_exception_detail(unsigned long arg);
+ extern int gru_set_context_option(unsigned long address);
+-extern void gru_check_context_placement(struct gru_thread_state *gts);
++extern int gru_check_context_placement(struct gru_thread_state *gts);
+ extern int gru_cpu_fault_map_id(void);
+ extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
+ extern void gru_flush_all_tlb(struct gru_state *gru);
+diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
+index e6b40aa8fb425..8f0ffb46bf153 100644
+--- a/drivers/misc/tifm_7xx1.c
++++ b/drivers/misc/tifm_7xx1.c
+@@ -190,7 +190,7 @@ static void tifm_7xx1_switch_media(struct work_struct *work)
+ spin_unlock_irqrestore(&fm->lock, flags);
+ }
+ if (sock)
+- tifm_free_device(&sock->dev);
++ put_device(&sock->dev);
+ }
+ spin_lock_irqsave(&fm->lock, flags);
+ }
+diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
+index 026ca9194ce5b..a8ec2e6fefa87 100644
+--- a/drivers/mmc/host/alcor.c
++++ b/drivers/mmc/host/alcor.c
+@@ -1114,7 +1114,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ alcor_hw_init(host);
+
+ dev_set_drvdata(&pdev->dev, host);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto free_host;
++
+ return 0;
+
+ free_host:
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index c26fbe5f22221..9c084f64f7dba 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2217,6 +2217,7 @@ static int atmci_init_slot(struct atmel_mci *host,
+ {
+ struct mmc_host *mmc;
+ struct atmel_mci_slot *slot;
++ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
+ if (!mmc)
+@@ -2300,11 +2301,13 @@ static int atmci_init_slot(struct atmel_mci *host,
+
+ host->slot[id] = slot;
+ mmc_regulator_get_supply(mmc);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ mmc_free_host(mmc);
++ return ret;
++ }
+
+ if (gpio_is_valid(slot->detect_pin)) {
+- int ret;
+-
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
+
+ ret = request_irq(gpio_to_irq(slot->detect_pin),
+diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
+index 9044faf0050a1..95a8ba4cf3da9 100644
+--- a/drivers/mmc/host/meson-gx-mmc.c
++++ b/drivers/mmc/host/meson-gx-mmc.c
+@@ -1289,7 +1289,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
+ }
+
+ mmc->ops = &meson_mmc_ops;
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto err_free_irq;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index 7e4bc9124efde..1e5e2442b7486 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -2079,7 +2079,9 @@ static int mmci_probe(struct amba_device *dev,
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ pm_runtime_use_autosuspend(&dev->dev);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto clk_disable;
+
+ pm_runtime_put(&dev->dev);
+ return 0;
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index 1552d1f09c5c4..52307dce08ba8 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -660,7 +660,9 @@ static int moxart_probe(struct platform_device *pdev)
+ goto out;
+
+ dev_set_drvdata(dev, mmc);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto out;
+
+ dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width);
+
+diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
+index 011b59a3602eb..9165af4760e32 100644
+--- a/drivers/mmc/host/mxcmmc.c
++++ b/drivers/mmc/host/mxcmmc.c
+@@ -1158,7 +1158,9 @@ static int mxcmci_probe(struct platform_device *pdev)
+
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto out_free_dma;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d0df054b0b473..ee9edf817a326 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1998,7 +1998,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
+ if (!ret)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto err_irq;
+
+ if (mmc_pdata(host)->name != NULL) {
+ ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index 99f3958a037ce..7f96df4d2a871 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -761,7 +761,12 @@ static int pxamci_probe(struct platform_device *pdev)
+ dev_warn(dev, "gpio_ro and get_ro() both defined\n");
+ }
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ if (host->pdata && host->pdata->exit)
++ host->pdata->exit(dev, mmc);
++ goto out;
++ }
+
+ return 0;
+
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 81d0dfe553a82..3261560bede4e 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1338,6 +1338,7 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ #ifdef RTSX_USB_USE_LEDS_CLASS
+ int err;
+ #endif
++ int ret;
+
+ ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent));
+ if (!ucr)
+@@ -1374,7 +1375,15 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
+ INIT_WORK(&host->led_work, rtsx_usb_update_led);
+
+ #endif
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++#ifdef RTSX_USB_USE_LEDS_CLASS
++ led_classdev_unregister(&host->led);
++#endif
++ mmc_free_host(mmc);
++ pm_runtime_disable(&pdev->dev);
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index ccbf016dc3fd1..1b1dad9d9a604 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -223,13 +223,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ sdhci_enable_clk(host, div);
+
+- /* enable auto gate sdhc_enable_auto_gate */
+- val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
+- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+- if (mask != (val & mask)) {
+- val |= mask;
+- sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++ /* Enable CLK_AUTO when the clock is greater than 400K. */
++ if (clk > 400000) {
++ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
++ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
++ SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
++ if (mask != (val & mask)) {
++ val |= mask;
++ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
++ }
+ }
+ }
+
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index f8b939e63e027..9548d022d52ba 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -194,6 +194,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ if (reg & SDHCI_CAN_DO_8BIT)
+ priv->vendor_hs200 = F_SDH30_EMMC_HS200;
+
++ if (!(reg & SDHCI_TIMEOUT_CLK_MASK))
++ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
++
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
+index 8d037c2071abc..497791ffada6d 100644
+--- a/drivers/mmc/host/toshsd.c
++++ b/drivers/mmc/host/toshsd.c
+@@ -651,7 +651,9 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (ret)
+ goto unmap;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto free_irq;
+
+ base = pci_resource_start(pdev, 0);
+ dev_dbg(&pdev->dev, "MMIO %pa, IRQ %d\n", &base, pdev->irq);
+@@ -660,6 +662,8 @@ static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ return 0;
+
++free_irq:
++ free_irq(pdev->irq, host);
+ unmap:
+ pci_iounmap(pdev, host->ioaddr);
+ release:
+diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
+index 721e5dd1eb7de..2c4d390a8acd3 100644
+--- a/drivers/mmc/host/via-sdmmc.c
++++ b/drivers/mmc/host/via-sdmmc.c
+@@ -1154,7 +1154,9 @@ static int via_sd_probe(struct pci_dev *pcidev,
+ pcidev->subsystem_device == 0x3891)
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto unmap;
+
+ return 0;
+
+diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
+index 5e1d7025dbf78..58cc3c94f579b 100644
+--- a/drivers/mmc/host/vub300.c
++++ b/drivers/mmc/host/vub300.c
+@@ -2049,6 +2049,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ return;
+ kref_get(&vub300->kref);
+ if (enable) {
++ set_current_state(TASK_RUNNING);
+ mutex_lock(&vub300->irq_mutex);
+ if (vub300->irqs_queued) {
+ vub300->irqs_queued -= 1;
+@@ -2064,6 +2065,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ vub300_queue_poll_work(vub300, 0);
+ }
+ mutex_unlock(&vub300->irq_mutex);
++ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+ vub300->irq_enabled = 0;
+ }
+@@ -2306,14 +2308,14 @@ static int vub300_probe(struct usb_interface *interface,
+ 0x0000, 0x0000, &vub300->system_port_status,
+ sizeof(vub300->system_port_status), 1000);
+ if (retval < 0) {
+- goto error4;
++ goto error5;
+ } else if (sizeof(vub300->system_port_status) == retval) {
+ vub300->card_present =
+ (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
+ vub300->read_only =
+ (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
+ } else {
+- goto error4;
++ goto error5;
+ }
+ usb_set_intfdata(interface, vub300);
+ INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
+@@ -2336,8 +2338,13 @@ static int vub300_probe(struct usb_interface *interface,
+ "USB vub300 remote SDIO host controller[%d]"
+ "connected with no SD/SDIO card inserted\n",
+ interface_to_InterfaceNumber(interface));
+- mmc_add_host(mmc);
++ retval = mmc_add_host(mmc);
++ if (retval)
++ goto error6;
++
+ return 0;
++error6:
++ del_timer_sync(&vub300->inactivity_timer);
+ error5:
+ mmc_free_host(mmc);
+ /*
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 740179f42cf21..639f87ba1606b 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1701,7 +1701,17 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+ */
+ wbsd_init_device(host);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret) {
++ if (!pnp)
++ wbsd_chip_poweroff(host);
++
++ wbsd_release_resources(host);
++ wbsd_free_mmc(dev);
++
++ mmc_free_host(mmc);
++ return ret;
++ }
+
+ pr_info("%s: W83L51xD", mmc_hostname(mmc));
+ if (host->chip_id != 0)
+diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
+index d774068dba30d..b1e4199f82920 100644
+--- a/drivers/mmc/host/wmt-sdmmc.c
++++ b/drivers/mmc/host/wmt-sdmmc.c
+@@ -859,11 +859,15 @@ static int wmt_mci_probe(struct platform_device *pdev)
+ /* configure the controller to a known 'ready' state */
+ wmt_reset_hardware(mmc);
+
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto fail7;
+
+ dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
+
+ return 0;
++fail7:
++ clk_disable_unprepare(priv->clk_sdmmc);
+ fail6:
+ clk_put(priv->clk_sdmmc);
+ fail5_and_a_half:
+diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
+index 72f5c7b300790..add4386f99f00 100644
+--- a/drivers/mtd/lpddr/lpddr2_nvm.c
++++ b/drivers/mtd/lpddr/lpddr2_nvm.c
+@@ -433,6 +433,8 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
+
+ /* lpddr2_nvm address range */
+ add_range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!add_range)
++ return -ENODEV;
+
+ /* Populate map_info data structure */
+ *map = (struct map_info) {
+diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
+index 7d96758a8f04e..6e5e557559704 100644
+--- a/drivers/mtd/maps/pxa2xx-flash.c
++++ b/drivers/mtd/maps/pxa2xx-flash.c
+@@ -66,6 +66,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ if (!info->map.virt) {
+ printk(KERN_WARNING "Failed to ioremap %s\n",
+ info->map.name);
++ kfree(info);
+ return -ENOMEM;
+ }
+ info->map.cached = ioremap_cache(info->map.phys, info->map.size);
+@@ -87,6 +88,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
+ iounmap((void *)info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
++ kfree(info);
+ return -EIO;
+ }
+ info->mtd->dev.parent = &pdev->dev;
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index ac5d3b6db9b84..83012d74dcd54 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -673,8 +673,10 @@ int add_mtd_device(struct mtd_info *mtd)
+ dev_set_drvdata(&mtd->dev, mtd);
+ of_node_get(mtd_get_of_node(mtd));
+ error = device_register(&mtd->dev);
+- if (error)
++ if (error) {
++ put_device(&mtd->dev);
+ goto fail_added;
++ }
+
+ /* Add the nvmem provider */
+ error = mtd_nvmem_add(mtd);
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index dd6963e4af2c7..dc7f5db544087 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1010,6 +1010,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ continue;
+
+ erase = &map->erase_type[i];
++ if (!erase->size)
++ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION &&
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 0d6cd2a4cc416..0c4e6fcac58e6 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -1529,6 +1529,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
+ slave_err(bond->dev, port->slave->dev,
+ "Port %d did not find a suitable aggregator\n",
+ port->actor_port_number);
++ return;
+ }
+ }
+ /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 8bee935c8f90f..20114e1dde77e 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1360,7 +1360,7 @@ netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+ struct bond_up_slave *slaves;
+ unsigned int count;
+
+- slaves = rcu_dereference(bond->slave_arr);
++ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (likely(count))
+ tx_slave = slaves->arr[hash_index %
+@@ -1494,7 +1494,7 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+ struct bond_up_slave *slaves;
+ unsigned int count;
+
+- slaves = rcu_dereference(bond->slave_arr);
++ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (likely(count))
+ tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 246bcbd650b4b..0885991347d09 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2102,12 +2102,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
+ /* called with rcu_read_lock() */
+ static int bond_miimon_inspect(struct bonding *bond)
+ {
++ bool ignore_updelay = false;
+ int link_state, commit = 0;
+ struct list_head *iter;
+ struct slave *slave;
+- bool ignore_updelay;
+
+- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
++ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
++ } else {
++ struct bond_up_slave *usable_slaves;
++
++ usable_slaves = rcu_dereference(bond->usable_slaves);
++
++ if (usable_slaves && usable_slaves->count == 0)
++ ignore_updelay = true;
++ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
+@@ -4040,6 +4049,29 @@ err:
+ bond_slave_arr_work_rearm(bond, 1);
+ }
+
++static void bond_skip_slave(struct bond_up_slave *slaves,
++ struct slave *skipslave)
++{
++ int idx;
++
++ /* Rare situation where caller has asked to skip a specific
++ * slave but allocation failed (most likely!). BTW this is
++ * only possible when the call is initiated from
++ * __bond_release_one(). In this situation; overwrite the
++ * skipslave entry in the array with the last entry from the
++ * array to avoid a situation where the xmit path may choose
++ * this to-be-skipped slave to send a packet out.
++ */
++ for (idx = 0; slaves && idx < slaves->count; idx++) {
++ if (skipslave == slaves->arr[idx]) {
++ slaves->arr[idx] =
++ slaves->arr[slaves->count - 1];
++ slaves->count--;
++ break;
++ }
++ }
++}
++
+ /* Build the usable slaves array in control path for modes that use xmit-hash
+ * to determine the slave interface -
+ * (a) BOND_MODE_8023AD
+@@ -4050,9 +4082,9 @@ err:
+ */
+ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
+ {
++ struct bond_up_slave *usable_slaves, *old_usable_slaves;
+ struct slave *slave;
+ struct list_head *iter;
+- struct bond_up_slave *new_arr, *old_arr;
+ int agg_id = 0;
+ int ret = 0;
+
+@@ -4060,11 +4092,10 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
+ WARN_ON(lockdep_is_held(&bond->mode_lock));
+ #endif
+
+- new_arr = kzalloc(offsetof(struct bond_up_slave, arr[bond->slave_cnt]),
+- GFP_KERNEL);
+- if (!new_arr) {
++ usable_slaves = kzalloc(struct_size(usable_slaves, arr,
++ bond->slave_cnt), GFP_KERNEL);
++ if (!usable_slaves) {
+ ret = -ENOMEM;
+- pr_err("Failed to build slave-array.\n");
+ goto out;
+ }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+@@ -4072,14 +4103,14 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
+
+ if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("bond_3ad_get_active_agg_info failed\n");
+- kfree_rcu(new_arr, rcu);
++ kfree_rcu(usable_slaves, rcu);
+ /* No active aggragator means it's not safe to use
+ * the previous array.
+ */
+- old_arr = rtnl_dereference(bond->slave_arr);
+- if (old_arr) {
+- RCU_INIT_POINTER(bond->slave_arr, NULL);
+- kfree_rcu(old_arr, rcu);
++ old_usable_slaves = rtnl_dereference(bond->usable_slaves);
++ if (old_usable_slaves) {
++ RCU_INIT_POINTER(bond->usable_slaves, NULL);
++ kfree_rcu(old_usable_slaves, rcu);
+ }
+ goto out;
+ }
+@@ -4099,37 +4130,20 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
+ continue;
+
+ slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
+- new_arr->count);
++ usable_slaves->count);
+
+- new_arr->arr[new_arr->count++] = slave;
++ usable_slaves->arr[usable_slaves->count++] = slave;
+ }
+
+- old_arr = rtnl_dereference(bond->slave_arr);
+- rcu_assign_pointer(bond->slave_arr, new_arr);
+- if (old_arr)
+- kfree_rcu(old_arr, rcu);
++ old_usable_slaves = rtnl_dereference(bond->usable_slaves);
++ rcu_assign_pointer(bond->usable_slaves, usable_slaves);
++ if (old_usable_slaves)
++ kfree_rcu(old_usable_slaves, rcu);
+ out:
+- if (ret != 0 && skipslave) {
+- int idx;
+-
+- /* Rare situation where caller has asked to skip a specific
+- * slave but allocation failed (most likely!). BTW this is
+- * only possible when the call is initiated from
+- * __bond_release_one(). In this situation; overwrite the
+- * skipslave entry in the array with the last entry from the
+- * array to avoid a situation where the xmit path may choose
+- * this to-be-skipped slave to send a packet out.
+- */
+- old_arr = rtnl_dereference(bond->slave_arr);
+- for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
+- if (skipslave == old_arr->arr[idx]) {
+- old_arr->arr[idx] =
+- old_arr->arr[old_arr->count-1];
+- old_arr->count--;
+- break;
+- }
+- }
+- }
++ if (ret != 0 && skipslave)
++ bond_skip_slave(rtnl_dereference(bond->usable_slaves),
++ skipslave);
++
+ return ret;
+ }
+
+@@ -4145,7 +4159,7 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
+ struct bond_up_slave *slaves;
+ unsigned int count;
+
+- slaves = rcu_dereference(bond->slave_arr);
++ slaves = rcu_dereference(bond->usable_slaves);
+ count = slaves ? READ_ONCE(slaves->count) : 0;
+ if (likely(count)) {
+ slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
+@@ -4452,9 +4466,9 @@ static void bond_uninit(struct net_device *bond_dev)
+ __bond_release_one(bond_dev, slave->dev, true, true);
+ netdev_info(bond_dev, "Released all slaves\n");
+
+- arr = rtnl_dereference(bond->slave_arr);
++ arr = rtnl_dereference(bond->usable_slaves);
+ if (arr) {
+- RCU_INIT_POINTER(bond->slave_arr, NULL);
++ RCU_INIT_POINTER(bond->usable_slaves, NULL);
+ kfree_rcu(arr, rcu);
+ }
+
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 0d66582bd3560..b312cbf30df7d 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -291,11 +291,6 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
+ if (ret)
+ return ret;
+
+- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
+- TCAN4X5X_ENABLE_MCAN_INT);
+- if (ret)
+- return ret;
+-
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+index 62958f04a2f20..5699531f87873 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+@@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context {
+ int dlc;
+ };
+
++struct kvaser_usb_busparams {
++ __le32 bitrate;
++ u8 tseg1;
++ u8 tseg2;
++ u8 sjw;
++ u8 nsamples;
++} __packed;
++
+ struct kvaser_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+@@ -104,13 +112,19 @@ struct kvaser_usb_net_priv {
+ struct can_priv can;
+ struct can_berr_counter bec;
+
++ /* subdriver-specific data */
++ void *sub_priv;
++
+ struct kvaser_usb *dev;
+ struct net_device *netdev;
+ int channel;
+
+- struct completion start_comp, stop_comp, flush_comp;
++ struct completion start_comp, stop_comp, flush_comp,
++ get_busparams_comp;
+ struct usb_anchor tx_submitted;
+
++ struct kvaser_usb_busparams busparams_nominal, busparams_data;
++
+ spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
+ int active_tx_contexts;
+ struct kvaser_usb_tx_urb_context tx_contexts[];
+@@ -120,11 +134,15 @@ struct kvaser_usb_net_priv {
+ * struct kvaser_usb_dev_ops - Device specific functions
+ * @dev_set_mode: used for can.do_set_mode
+ * @dev_set_bittiming: used for can.do_set_bittiming
++ * @dev_get_busparams: readback arbitration busparams
+ * @dev_set_data_bittiming: used for can.do_set_data_bittiming
++ * @dev_get_data_busparams: readback data busparams
+ * @dev_get_berr_counter: used for can.do_get_berr_counter
+ *
+ * @dev_setup_endpoints: setup USB in and out endpoints
+ * @dev_init_card: initialize card
++ * @dev_init_channel: initialize channel
++ * @dev_remove_channel: uninitialize channel
+ * @dev_get_software_info: get software info
+ * @dev_get_software_details: get software details
+ * @dev_get_card_info: get card info
+@@ -140,12 +158,18 @@ struct kvaser_usb_net_priv {
+ */
+ struct kvaser_usb_dev_ops {
+ int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
+- int (*dev_set_bittiming)(struct net_device *netdev);
+- int (*dev_set_data_bittiming)(struct net_device *netdev);
++ int (*dev_set_bittiming)(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams);
++ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
++ int (*dev_set_data_bittiming)(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams);
++ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_get_berr_counter)(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+ int (*dev_setup_endpoints)(struct kvaser_usb *dev);
+ int (*dev_init_card)(struct kvaser_usb *dev);
++ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
++ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
+ int (*dev_get_software_info)(struct kvaser_usb *dev);
+ int (*dev_get_software_details)(struct kvaser_usb *dev);
+ int (*dev_get_card_info)(struct kvaser_usb *dev);
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+index 7491f85e85b30..1f015b496a472 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+@@ -416,10 +416,6 @@ static int kvaser_usb_open(struct net_device *netdev)
+ if (err)
+ return err;
+
+- err = kvaser_usb_setup_rx_urbs(dev);
+- if (err)
+- goto error;
+-
+ err = ops->dev_set_opt_mode(priv);
+ if (err)
+ goto error;
+@@ -510,6 +506,93 @@ static int kvaser_usb_close(struct net_device *netdev)
+ return 0;
+ }
+
++static int kvaser_usb_set_bittiming(struct net_device *netdev)
++{
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++ struct kvaser_usb *dev = priv->dev;
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++ struct can_bittiming *bt = &priv->can.bittiming;
++
++ struct kvaser_usb_busparams busparams;
++ int tseg1 = bt->prop_seg + bt->phase_seg1;
++ int tseg2 = bt->phase_seg2;
++ int sjw = bt->sjw;
++ int err = -EOPNOTSUPP;
++
++ busparams.bitrate = cpu_to_le32(bt->bitrate);
++ busparams.sjw = (u8)sjw;
++ busparams.tseg1 = (u8)tseg1;
++ busparams.tseg2 = (u8)tseg2;
++ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
++ busparams.nsamples = 3;
++ else
++ busparams.nsamples = 1;
++
++ err = ops->dev_set_bittiming(netdev, &busparams);
++ if (err)
++ return err;
++
++ err = kvaser_usb_setup_rx_urbs(priv->dev);
++ if (err)
++ return err;
++
++ err = ops->dev_get_busparams(priv);
++ if (err) {
++ /* Treat EOPNOTSUPP as success */
++ if (err == -EOPNOTSUPP)
++ err = 0;
++ return err;
++ }
++
++ if (memcmp(&busparams, &priv->busparams_nominal,
++ sizeof(priv->busparams_nominal)) != 0)
++ err = -EINVAL;
++
++ return err;
++}
++
++static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
++{
++ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
++ struct kvaser_usb *dev = priv->dev;
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
++ struct can_bittiming *dbt = &priv->can.data_bittiming;
++
++ struct kvaser_usb_busparams busparams;
++ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
++ int tseg2 = dbt->phase_seg2;
++ int sjw = dbt->sjw;
++ int err;
++
++ if (!ops->dev_set_data_bittiming ||
++ !ops->dev_get_data_busparams)
++ return -EOPNOTSUPP;
++
++ busparams.bitrate = cpu_to_le32(dbt->bitrate);
++ busparams.sjw = (u8)sjw;
++ busparams.tseg1 = (u8)tseg1;
++ busparams.tseg2 = (u8)tseg2;
++ busparams.nsamples = 1;
++
++ err = ops->dev_set_data_bittiming(netdev, &busparams);
++ if (err)
++ return err;
++
++ err = kvaser_usb_setup_rx_urbs(priv->dev);
++ if (err)
++ return err;
++
++ err = ops->dev_get_data_busparams(priv);
++ if (err)
++ return err;
++
++ if (memcmp(&busparams, &priv->busparams_data,
++ sizeof(priv->busparams_data)) != 0)
++ err = -EINVAL;
++
++ return err;
++}
++
+ static void kvaser_usb_write_bulk_callback(struct urb *urb)
+ {
+ struct kvaser_usb_tx_urb_context *context = urb->context;
+@@ -645,6 +728,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
+
+ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ {
++ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ int i;
+
+ for (i = 0; i < dev->nchannels; i++) {
+@@ -660,6 +744,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
+ if (!dev->nets[i])
+ continue;
+
++ if (ops->dev_remove_channel)
++ ops->dev_remove_channel(dev->nets[i]);
++
+ free_candev(dev->nets[i]->netdev);
+ }
+ }
+@@ -691,6 +778,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ init_completion(&priv->start_comp);
+ init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
++ init_completion(&priv->get_busparams_comp);
+ priv->can.ctrlmode_supported = 0;
+
+ priv->dev = dev;
+@@ -703,7 +791,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+ priv->can.state = CAN_STATE_STOPPED;
+ priv->can.clock.freq = dev->cfg->clock.freq;
+ priv->can.bittiming_const = dev->cfg->bittiming_const;
+- priv->can.do_set_bittiming = ops->dev_set_bittiming;
++ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
+ (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
+@@ -715,7 +803,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
+- priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming;
++ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ }
+
+ netdev->flags |= IFF_ECHO;
+@@ -727,17 +815,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
+
+ dev->nets[channel] = priv;
+
++ if (ops->dev_init_channel) {
++ err = ops->dev_init_channel(priv);
++ if (err)
++ goto err;
++ }
++
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(&dev->intf->dev, "Failed to register CAN device\n");
+- free_candev(netdev);
+- dev->nets[channel] = NULL;
+- return err;
++ goto err;
+ }
+
+ netdev_dbg(netdev, "device registered\n");
+
+ return 0;
++
++err:
++ free_candev(netdev);
++ dev->nets[channel] = NULL;
++ return err;
+ }
+
+ static int kvaser_usb_probe(struct usb_interface *intf,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+index 45d2787248839..2764fdd7e84b3 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+@@ -43,6 +43,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
+
+ /* Minihydra command IDs */
+ #define CMD_SET_BUSPARAMS_REQ 16
++#define CMD_GET_BUSPARAMS_REQ 17
++#define CMD_GET_BUSPARAMS_RESP 18
+ #define CMD_GET_CHIP_STATE_REQ 19
+ #define CMD_CHIP_STATE_EVENT 20
+ #define CMD_SET_DRIVERMODE_REQ 21
+@@ -193,21 +195,26 @@ struct kvaser_cmd_chip_state_event {
+ #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
+ #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
+ struct kvaser_cmd_set_busparams {
+- __le32 bitrate;
+- u8 tseg1;
+- u8 tseg2;
+- u8 sjw;
+- u8 nsamples;
++ struct kvaser_usb_busparams busparams_nominal;
+ u8 reserved0[4];
+- __le32 bitrate_d;
+- u8 tseg1_d;
+- u8 tseg2_d;
+- u8 sjw_d;
+- u8 nsamples_d;
++ struct kvaser_usb_busparams busparams_data;
+ u8 canfd_mode;
+ u8 reserved1[7];
+ } __packed;
+
++/* Busparam type */
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
++#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
++struct kvaser_cmd_get_busparams_req {
++ u8 type;
++ u8 reserved[27];
++} __packed;
++
++struct kvaser_cmd_get_busparams_res {
++ struct kvaser_usb_busparams busparams;
++ u8 reserved[20];
++} __packed;
++
+ /* Ctrl modes */
+ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
+ #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
+@@ -278,6 +285,8 @@ struct kvaser_cmd {
+ struct kvaser_cmd_error_event error_event;
+
+ struct kvaser_cmd_set_busparams set_busparams_req;
++ struct kvaser_cmd_get_busparams_req get_busparams_req;
++ struct kvaser_cmd_get_busparams_res get_busparams_res;
+
+ struct kvaser_cmd_chip_state_event chip_state_event;
+
+@@ -293,6 +302,7 @@ struct kvaser_cmd {
+ #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
+ #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
+ #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
++#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
+ /* CAN frame flags. Used in ext_rx_can and ext_tx_can */
+ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
+ #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
+@@ -359,6 +369,10 @@ struct kvaser_cmd_ext {
+ } __packed;
+ } __packed;
+
++struct kvaser_usb_net_hydra_priv {
++ int pending_get_busparams_type;
++};
++
+ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
+ .name = "kvaser_usb_kcan",
+ .tseg1_min = 1,
+@@ -812,6 +826,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
+ complete(&priv->flush_comp);
+ }
+
++static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ struct kvaser_usb_net_priv *priv;
++ struct kvaser_usb_net_hydra_priv *hydra;
++
++ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
++ if (!priv)
++ return;
++
++ hydra = priv->sub_priv;
++ if (!hydra)
++ return;
++
++ switch (hydra->pending_get_busparams_type) {
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
++ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
++ sizeof(priv->busparams_nominal));
++ break;
++ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
++ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
++ sizeof(priv->busparams_nominal));
++ break;
++ default:
++ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
++ hydra->pending_get_busparams_type);
++ break;
++ }
++ hydra->pending_get_busparams_type = -1;
++
++ complete(&priv->get_busparams_comp);
++}
++
+ static void
+ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
+ u8 bus_status,
+@@ -1099,6 +1146,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
+ struct kvaser_usb_net_priv *priv;
+ unsigned long irq_flags;
+ bool one_shot_fail = false;
++ bool is_err_frame = false;
+ u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+@@ -1117,10 +1165,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
+ kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
+ one_shot_fail = true;
+ }
++
++ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
++ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
+ }
+
+ context = &priv->tx_contexts[transid % dev->max_tx_urbs];
+- if (!one_shot_fail) {
++ if (!one_shot_fail && !is_err_frame) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+@@ -1294,6 +1345,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
+ kvaser_usb_hydra_state_event(dev, cmd);
+ break;
+
++ case CMD_GET_BUSPARAMS_RESP:
++ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
++ break;
++
+ case CMD_ERROR_EVENT:
+ kvaser_usb_hydra_error_event(dev, cmd);
+ break;
+@@ -1494,15 +1549,58 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
+ return err;
+ }
+
+-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
++ int busparams_type)
++{
++ struct kvaser_usb *dev = priv->dev;
++ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
++ struct kvaser_cmd *cmd;
++ int err;
++
++ if (!hydra)
++ return -EINVAL;
++
++ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
++ kvaser_usb_hydra_set_cmd_dest_he
++ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
++ kvaser_usb_hydra_set_cmd_transid
++ (cmd, kvaser_usb_hydra_get_next_transid(dev));
++ cmd->get_busparams_req.type = busparams_type;
++ hydra->pending_get_busparams_type = busparams_type;
++
++ reinit_completion(&priv->get_busparams_comp);
++
++ err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
++ if (err)
++ return err;
++
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++ return -ETIMEDOUT;
++
++ return err;
++}
++
++static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
++{
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
++}
++
++static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
++{
++ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
++}
++
++static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+- int tseg1 = bt->prop_seg + bt->phase_seg1;
+- int tseg2 = bt->phase_seg2;
+- int sjw = bt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+@@ -1510,11 +1608,8 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
+- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
+- cmd->set_busparams_req.sjw = (u8)sjw;
+- cmd->set_busparams_req.tseg1 = (u8)tseg1;
+- cmd->set_busparams_req.tseg2 = (u8)tseg2;
+- cmd->set_busparams_req.nsamples = 1;
++ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
++ sizeof(cmd->set_busparams_req.busparams_nominal));
+
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+@@ -1528,15 +1623,12 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+ return err;
+ }
+
+-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
++static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_cmd *cmd;
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb *dev = priv->dev;
+- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+- int tseg2 = dbt->phase_seg2;
+- int sjw = dbt->sjw;
+ int err;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+@@ -1544,11 +1636,8 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
+- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
+- cmd->set_busparams_req.sjw_d = (u8)sjw;
+- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
+- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
+- cmd->set_busparams_req.nsamples_d = 1;
++ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
++ sizeof(cmd->set_busparams_req.busparams_data));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+@@ -1655,6 +1744,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
+ return 0;
+ }
+
++static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_hydra_priv *hydra;
++
++ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
++ if (!hydra)
++ return -ENOMEM;
++
++ priv->sub_priv = hydra;
++
++ return 0;
++}
++
+ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
+ {
+ struct kvaser_cmd cmd;
+@@ -1997,10 +2099,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
+ .dev_set_mode = kvaser_usb_hydra_set_mode,
+ .dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
++ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
+ .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
++ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
+ .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
+ .dev_init_card = kvaser_usb_hydra_init_card,
++ .dev_init_channel = kvaser_usb_hydra_init_channel,
+ .dev_get_software_info = kvaser_usb_hydra_get_software_info,
+ .dev_get_software_details = kvaser_usb_hydra_get_software_details,
+ .dev_get_card_info = kvaser_usb_hydra_get_card_info,
+diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+index 15380cc08ee69..f06d63db9077b 100644
+--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+@@ -20,6 +20,7 @@
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/usb.h>
++#include <linux/workqueue.h>
+
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+@@ -55,6 +56,9 @@
+ #define CMD_RX_EXT_MESSAGE 14
+ #define CMD_TX_EXT_MESSAGE 15
+ #define CMD_SET_BUS_PARAMS 16
++#define CMD_GET_BUS_PARAMS 17
++#define CMD_GET_BUS_PARAMS_REPLY 18
++#define CMD_GET_CHIP_STATE 19
+ #define CMD_CHIP_STATE_EVENT 20
+ #define CMD_SET_CTRL_MODE 21
+ #define CMD_RESET_CHIP 24
+@@ -69,10 +73,13 @@
+ #define CMD_GET_CARD_INFO_REPLY 35
+ #define CMD_GET_SOFTWARE_INFO 38
+ #define CMD_GET_SOFTWARE_INFO_REPLY 39
++#define CMD_ERROR_EVENT 45
+ #define CMD_FLUSH_QUEUE 48
+ #define CMD_TX_ACKNOWLEDGE 50
+ #define CMD_CAN_ERROR_EVENT 51
+ #define CMD_FLUSH_QUEUE_REPLY 68
++#define CMD_GET_CAPABILITIES_REQ 95
++#define CMD_GET_CAPABILITIES_RESP 96
+
+ #define CMD_LEAF_LOG_MESSAGE 106
+
+@@ -82,6 +89,8 @@
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+ #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
++#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
++
+ /* error factors */
+ #define M16C_EF_ACKE BIT(0)
+ #define M16C_EF_CRCE BIT(1)
+@@ -156,11 +165,7 @@ struct usbcan_cmd_softinfo {
+ struct kvaser_cmd_busparams {
+ u8 tid;
+ u8 channel;
+- __le32 bitrate;
+- u8 tseg1;
+- u8 tseg2;
+- u8 sjw;
+- u8 no_samp;
++ struct kvaser_usb_busparams busparams;
+ } __packed;
+
+ struct kvaser_cmd_tx_can {
+@@ -229,7 +234,7 @@ struct kvaser_cmd_tx_acknowledge_header {
+ u8 tid;
+ } __packed;
+
+-struct leaf_cmd_error_event {
++struct leaf_cmd_can_error_event {
+ u8 tid;
+ u8 flags;
+ __le16 time[3];
+@@ -241,7 +246,7 @@ struct leaf_cmd_error_event {
+ u8 error_factor;
+ } __packed;
+
+-struct usbcan_cmd_error_event {
++struct usbcan_cmd_can_error_event {
+ u8 tid;
+ u8 padding;
+ u8 tx_errors_count_ch0;
+@@ -253,6 +258,28 @@ struct usbcan_cmd_error_event {
+ __le16 time;
+ } __packed;
+
++/* CMD_ERROR_EVENT error codes */
++#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
++#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
++
++struct leaf_cmd_error_event {
++ u8 tid;
++ u8 error_code;
++ __le16 timestamp[3];
++ __le16 padding;
++ __le16 info1;
++ __le16 info2;
++} __packed;
++
++struct usbcan_cmd_error_event {
++ u8 tid;
++ u8 error_code;
++ __le16 info1;
++ __le16 info2;
++ __le16 timestamp;
++ __le16 padding;
++} __packed;
++
+ struct kvaser_cmd_ctrl_mode {
+ u8 tid;
+ u8 channel;
+@@ -277,6 +304,28 @@ struct leaf_cmd_log_message {
+ u8 data[8];
+ } __packed;
+
++/* Sub commands for cap_req and cap_res */
++#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
++#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
++struct kvaser_cmd_cap_req {
++ __le16 padding0;
++ __le16 cap_cmd;
++ __le16 padding1;
++ __le16 channel;
++} __packed;
++
++/* Status codes for cap_res */
++#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
++#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
++#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
++struct kvaser_cmd_cap_res {
++ __le16 padding;
++ __le16 cap_cmd;
++ __le16 status;
++ __le32 mask;
++ __le32 value;
++} __packed;
++
+ struct kvaser_cmd {
+ u8 len;
+ u8 id;
+@@ -292,14 +341,18 @@ struct kvaser_cmd {
+ struct leaf_cmd_softinfo softinfo;
+ struct leaf_cmd_rx_can rx_can;
+ struct leaf_cmd_chip_state_event chip_state_event;
+- struct leaf_cmd_error_event error_event;
++ struct leaf_cmd_can_error_event can_error_event;
+ struct leaf_cmd_log_message log_message;
++ struct leaf_cmd_error_event error_event;
++ struct kvaser_cmd_cap_req cap_req;
++ struct kvaser_cmd_cap_res cap_res;
+ } __packed leaf;
+
+ union {
+ struct usbcan_cmd_softinfo softinfo;
+ struct usbcan_cmd_rx_can rx_can;
+ struct usbcan_cmd_chip_state_event chip_state_event;
++ struct usbcan_cmd_can_error_event can_error_event;
+ struct usbcan_cmd_error_event error_event;
+ } __packed usbcan;
+
+@@ -322,7 +375,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
++ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
++ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+ };
+@@ -336,7 +392,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+- [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
++ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
++ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ };
+@@ -364,6 +421,12 @@ struct kvaser_usb_err_summary {
+ };
+ };
+
++struct kvaser_usb_net_leaf_priv {
++ struct kvaser_usb_net_priv *net;
++
++ struct delayed_work chip_state_req_work;
++};
++
+ static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+@@ -607,6 +670,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
++ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
++ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
++
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+@@ -694,6 +760,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
+ return 0;
+ }
+
++static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
++ u16 cap_cmd_req, u16 *status)
++{
++ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
++ struct kvaser_cmd *cmd;
++ u32 value = 0;
++ u32 mask = 0;
++ u16 cap_cmd_res;
++ int err;
++ int i;
++
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ cmd->id = CMD_GET_CAPABILITIES_REQ;
++ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
++ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
++
++ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
++ if (err)
++ goto end;
++
++ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
++ if (err)
++ goto end;
++
++ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
++
++ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
++ goto end;
++
++ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
++ switch (cap_cmd_res) {
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
++ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
++ break;
++ default:
++ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
++ cap_cmd_res);
++ break;
++ }
++
++ for (i = 0; i < dev->nchannels; i++) {
++ if (BIT(i) & (value & mask)) {
++ switch (cap_cmd_res) {
++ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
++ card_data->ctrlmode_supported |=
++ CAN_CTRLMODE_LISTENONLY;
++ break;
++ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
++ card_data->capabilities |=
++ KVASER_USB_CAP_BERR_CAP;
++ break;
++ }
++ }
++ }
++
++end:
++ kfree(cmd);
++
++ return err;
++}
++
++static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
++{
++ int err;
++ u16 status;
++
++ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
++ dev_info(&dev->intf->dev,
++ "No extended capability support. Upgrade device firmware.\n");
++ return 0;
++ }
++
++ err = kvaser_usb_leaf_get_single_capability(dev,
++ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
++ &status);
++ if (err)
++ return err;
++ if (status)
++ dev_info(&dev->intf->dev,
++ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
++ status);
++
++ err = kvaser_usb_leaf_get_single_capability(dev,
++ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
++ &status);
++ if (err)
++ return err;
++ if (status)
++ dev_info(&dev->intf->dev,
++ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
++ status);
++
++ return 0;
++}
++
++static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
++{
++ int err = 0;
++
++ if (dev->driver_info->family == KVASER_LEAF)
++ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
++
++ return err;
++}
++
+ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -722,7 +898,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
+ context = &priv->tx_contexts[tid % dev->max_tx_urbs];
+
+ /* Sometimes the state change doesn't come after a bus-off event */
+- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
++ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+@@ -778,6 +954,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
+ return err;
+ }
+
++static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
++{
++ struct kvaser_usb_net_leaf_priv *leaf =
++ container_of(work, struct kvaser_usb_net_leaf_priv,
++ chip_state_req_work.work);
++ struct kvaser_usb_net_priv *priv = leaf->net;
++
++ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
++}
++
+ static void
+ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ const struct kvaser_usb_err_summary *es,
+@@ -796,20 +982,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (es->status & M16C_STATE_BUS_PASSIVE) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+- } else if (es->status & M16C_STATE_BUS_ERROR) {
++ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
++ cur_state >= CAN_STATE_BUS_OFF) {
+ /* Guard against spurious error events after a busoff */
+- if (cur_state < CAN_STATE_BUS_OFF) {
+- if (es->txerr >= 128 || es->rxerr >= 128)
+- new_state = CAN_STATE_ERROR_PASSIVE;
+- else if (es->txerr >= 96 || es->rxerr >= 96)
+- new_state = CAN_STATE_ERROR_WARNING;
+- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
+- new_state = CAN_STATE_ERROR_ACTIVE;
+- }
+- }
+-
+- if (!es->status)
++ } else if (es->txerr >= 128 || es->rxerr >= 128) {
++ new_state = CAN_STATE_ERROR_PASSIVE;
++ } else if (es->txerr >= 96 || es->rxerr >= 96) {
++ new_state = CAN_STATE_ERROR_WARNING;
++ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
++ }
+
+ if (new_state != cur_state) {
+ tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
+@@ -819,7 +1001,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
+ }
+
+ if (priv->can.restart_ms &&
+- cur_state >= CAN_STATE_BUS_OFF &&
++ cur_state == CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF)
+ priv->can.can_stats.restarts++;
+
+@@ -853,6 +1035,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ struct sk_buff *skb;
+ struct net_device_stats *stats;
+ struct kvaser_usb_net_priv *priv;
++ struct kvaser_usb_net_leaf_priv *leaf;
+ enum can_state old_state, new_state;
+
+ if (es->channel >= dev->nchannels) {
+@@ -862,8 +1045,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ }
+
+ priv = dev->nets[es->channel];
++ leaf = priv->sub_priv;
+ stats = &priv->netdev->stats;
+
++ /* Ignore e.g. state change to bus-off reported just after stopping */
++ if (!netif_running(priv->netdev))
++ return;
++
+ /* Update all of the CAN interface's state and error counters before
+ * trying any memory allocation that can actually fail with -ENOMEM.
+ *
+@@ -878,6 +1066,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ new_state = priv->can.state;
+
++ /* If there are errors, request status updates periodically as we do
++ * not get automatic notifications of improved state.
++ */
++ if (new_state < CAN_STATE_BUS_OFF &&
++ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE))
++ schedule_delayed_work(&leaf->chip_state_req_work,
++ msecs_to_jiffies(500));
++
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+@@ -895,7 +1091,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
+ }
+
+ if (priv->can.restart_ms &&
+- old_state >= CAN_STATE_BUS_OFF &&
++ old_state == CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+ netif_carrier_on(priv->netdev);
+@@ -995,11 +1191,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+
+ case CMD_CAN_ERROR_EVENT:
+ es.channel = 0;
+- es.status = cmd->u.usbcan.error_event.status_ch0;
+- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
+- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
++ es.status = cmd->u.usbcan.can_error_event.status_ch0;
++ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
++ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
+ es.usbcan.other_ch_status =
+- cmd->u.usbcan.error_event.status_ch1;
++ cmd->u.usbcan.can_error_event.status_ch1;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+
+ /* The USBCAN firmware supports up to 2 channels.
+@@ -1007,13 +1203,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
+ */
+ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
+ es.channel = 1;
+- es.status = cmd->u.usbcan.error_event.status_ch1;
++ es.status = cmd->u.usbcan.can_error_event.status_ch1;
+ es.txerr =
+- cmd->u.usbcan.error_event.tx_errors_count_ch1;
++ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
+ es.rxerr =
+- cmd->u.usbcan.error_event.rx_errors_count_ch1;
++ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
+ es.usbcan.other_ch_status =
+- cmd->u.usbcan.error_event.status_ch0;
++ cmd->u.usbcan.can_error_event.status_ch0;
+ kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
+ }
+ break;
+@@ -1030,11 +1226,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
+
+ switch (cmd->id) {
+ case CMD_CAN_ERROR_EVENT:
+- es.channel = cmd->u.leaf.error_event.channel;
+- es.status = cmd->u.leaf.error_event.status;
+- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
+- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
+- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
++ es.channel = cmd->u.leaf.can_error_event.channel;
++ es.status = cmd->u.leaf.can_error_event.status;
++ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
++ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
++ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
+ break;
+ case CMD_LEAF_LOG_MESSAGE:
+ es.channel = cmd->u.leaf.log_message.channel;
+@@ -1166,6 +1362,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
+ netif_rx(skb);
+ }
+
++static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ u16 info1 = 0;
++
++ switch (dev->driver_info->family) {
++ case KVASER_LEAF:
++ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
++ break;
++ case KVASER_USBCAN:
++ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
++ break;
++ }
++
++ /* info1 will contain the offending cmd_no */
++ switch (info1) {
++ case CMD_SET_CTRL_MODE:
++ dev_warn(&dev->intf->dev,
++ "CMD_SET_CTRL_MODE error in parameter\n");
++ break;
++
++ case CMD_SET_BUS_PARAMS:
++ dev_warn(&dev->intf->dev,
++ "CMD_SET_BUS_PARAMS error in parameter\n");
++ break;
++
++ default:
++ dev_warn(&dev->intf->dev,
++ "Unhandled parameter error event cmd_no (%u)\n",
++ info1);
++ break;
++ }
++}
++
++static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ u8 error_code = 0;
++
++ switch (dev->driver_info->family) {
++ case KVASER_LEAF:
++ error_code = cmd->u.leaf.error_event.error_code;
++ break;
++ case KVASER_USBCAN:
++ error_code = cmd->u.usbcan.error_event.error_code;
++ break;
++ }
++
++ switch (error_code) {
++ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
++ /* Received additional CAN message, when firmware TX queue is
++ * already full. Something is wrong with the driver.
++ * This should never happen!
++ */
++ dev_err(&dev->intf->dev,
++ "Received error event TX_QUEUE_FULL\n");
++ break;
++ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
++ kvaser_usb_leaf_error_event_parameter(dev, cmd);
++ break;
++
++ default:
++ dev_warn(&dev->intf->dev,
++ "Unhandled error event (%d)\n", error_code);
++ break;
++ }
++}
++
+ static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -1206,6 +1470,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
+ complete(&priv->stop_comp);
+ }
+
++static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
++ const struct kvaser_cmd *cmd)
++{
++ struct kvaser_usb_net_priv *priv;
++ u8 channel = cmd->u.busparams.channel;
++
++ if (channel >= dev->nchannels) {
++ dev_err(&dev->intf->dev,
++ "Invalid channel number (%d)\n", channel);
++ return;
++ }
++
++ priv = dev->nets[channel];
++ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
++ sizeof(priv->busparams_nominal));
++
++ complete(&priv->get_busparams_comp);
++}
++
+ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+ {
+@@ -1244,6 +1527,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+ kvaser_usb_leaf_tx_acknowledge(dev, cmd);
+ break;
+
++ case CMD_ERROR_EVENT:
++ kvaser_usb_leaf_error_event(dev, cmd);
++ break;
++
++ case CMD_GET_BUS_PARAMS_REPLY:
++ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
++ break;
++
+ /* Ignored commands */
+ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
+ if (dev->driver_info->family != KVASER_USBCAN)
+@@ -1340,10 +1631,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
+
+ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
+ {
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+ int err;
+
+ reinit_completion(&priv->stop_comp);
+
++ cancel_delayed_work(&leaf->chip_state_req_work);
++
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
+ priv->channel);
+ if (err)
+@@ -1390,10 +1684,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
+ return 0;
+ }
+
+-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
++static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_leaf_priv *leaf;
++
++ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
++ if (!leaf)
++ return -ENOMEM;
++
++ leaf->net = priv;
++ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
++ kvaser_usb_leaf_chip_state_req_work);
++
++ priv->sub_priv = leaf;
++
++ return 0;
++}
++
++static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
++{
++ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
++
++ if (leaf)
++ cancel_delayed_work_sync(&leaf->chip_state_req_work);
++}
++
++static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
++ const struct kvaser_usb_busparams *busparams)
+ {
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+- struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_cmd *cmd;
+ int rc;
+@@ -1406,15 +1725,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
+ cmd->u.busparams.channel = priv->channel;
+ cmd->u.busparams.tid = 0xff;
+- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
+- cmd->u.busparams.sjw = bt->sjw;
+- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
+- cmd->u.busparams.tseg2 = bt->phase_seg2;
+-
+- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+- cmd->u.busparams.no_samp = 3;
+- else
+- cmd->u.busparams.no_samp = 1;
++ memcpy(&cmd->u.busparams.busparams, busparams,
++ sizeof(cmd->u.busparams.busparams));
+
+ rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+
+@@ -1422,6 +1734,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+ return rc;
+ }
+
++static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
++{
++ int err;
++
++ if (priv->dev->driver_info->family == KVASER_USBCAN)
++ return -EOPNOTSUPP;
++
++ reinit_completion(&priv->get_busparams_comp);
++
++ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
++ priv->channel);
++ if (err)
++ return err;
++
++ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
++ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
+ static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
+ enum can_mode mode)
+ {
+@@ -1483,14 +1816,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
+ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
+ .dev_set_mode = kvaser_usb_leaf_set_mode,
+ .dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
++ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
+ .dev_set_data_bittiming = NULL,
++ .dev_get_data_busparams = NULL,
+ .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
+ .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
+ .dev_init_card = kvaser_usb_leaf_init_card,
++ .dev_init_channel = kvaser_usb_leaf_init_channel,
++ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
+ .dev_get_software_info = kvaser_usb_leaf_get_software_info,
+ .dev_get_software_details = NULL,
+ .dev_get_card_info = kvaser_usb_leaf_get_card_info,
+- .dev_get_capabilities = NULL,
++ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
+ .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
+ .dev_start_chip = kvaser_usb_leaf_start_chip,
+ .dev_stop_chip = kvaser_usb_leaf_stop_chip,
+diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
+index e981b0184077a..625db92792dab 100644
+--- a/drivers/net/dsa/lan9303-core.c
++++ b/drivers/net/dsa/lan9303-core.c
+@@ -1001,9 +1001,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
+ ret = lan9303_read_switch_port(
+ chip, port, lan9303_mib[u].offset, &reg);
+
+- if (ret)
++ if (ret) {
+ dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
+ port, lan9303_mib[u].offset);
++ reg = 0;
++ }
+ data[u] = reg;
+ }
+ }
+diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
+index d3d44e07afbc0..414b990827e8e 100644
+--- a/drivers/net/ethernet/amd/atarilance.c
++++ b/drivers/net/ethernet/amd/atarilance.c
+@@ -825,7 +825,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ dev->stats.tx_bytes += skb->len;
+- dev_kfree_skb( skb );
++ dev_consume_skb_irq(skb);
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
+index f90b454b16429..7ba3da856105e 100644
+--- a/drivers/net/ethernet/amd/lance.c
++++ b/drivers/net/ethernet/amd/lance.c
+@@ -997,7 +997,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+- dev_kfree_skb(skb);
++ dev_consume_skb_irq(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 0442d7e1cd20b..7f705483c1c57 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1139,6 +1139,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
++ tasklet_kill(&pdata->tasklet_dev);
++ tasklet_kill(&pdata->tasklet_ecc);
++
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+index 4d9062d35930f..530043742a07a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
+ xgbe_i2c_disable(pdata);
+ xgbe_i2c_clear_all_interrupts(pdata);
+
+- if (pdata->dev_irq != pdata->i2c_irq)
++ if (pdata->dev_irq != pdata->i2c_irq) {
+ devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
++ tasklet_kill(&pdata->tasklet_i2c);
++ }
+ }
+
+ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 156a0bc8ab01d..97167fc9bebe7 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1390,8 +1390,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ /* Disable auto-negotiation */
+ xgbe_an_disable_all(pdata);
+
+- if (pdata->dev_irq != pdata->an_irq)
++ if (pdata->dev_irq != pdata->an_irq) {
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
++ tasklet_kill(&pdata->tasklet_an);
++ }
+
+ pdata->phy_if.phy_impl.stop(pdata);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index b76138cd09359..0a15c617c7029 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -188,6 +188,7 @@ enum xgbe_sfp_cable {
+ XGBE_SFP_CABLE_UNKNOWN = 0,
+ XGBE_SFP_CABLE_ACTIVE,
+ XGBE_SFP_CABLE_PASSIVE,
++ XGBE_SFP_CABLE_FIBER,
+ };
+
+ enum xgbe_sfp_base {
+@@ -235,10 +236,7 @@ enum xgbe_sfp_speed {
+
+ #define XGBE_SFP_BASE_BR 12
+ #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+ #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
+-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
+-#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
+
+ #define XGBE_SFP_BASE_CU_CABLE_LEN 18
+
+@@ -825,29 +823,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+ enum xgbe_sfp_speed sfp_speed)
+ {
+- u8 *sfp_base, min, max;
++ u8 *sfp_base, min;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case XGBE_SFP_SPEED_1000:
+ min = XGBE_SFP_BASE_BR_1GBE_MIN;
+- max = XGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ min = XGBE_SFP_BASE_BR_10GBE_MIN;
+- if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+- XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
+- max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
+- else
+- max = XGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
+- (sfp_base[XGBE_SFP_BASE_BR] <= max));
++ return sfp_base[XGBE_SFP_BASE_BR] >= min;
+ }
+
+ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
+@@ -1148,16 +1139,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
+- /* Assume ACTIVE cable unless told it is PASSIVE */
++ /* Assume FIBER cable unless told otherwise */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
+- } else {
++ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
++ } else {
++ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
+ }
+
+ /* Determine the type of SFP */
+- if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
++ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
+index 3e3711b60d01b..11d9884eb14d3 100644
+--- a/drivers/net/ethernet/apple/bmac.c
++++ b/drivers/net/ethernet/apple/bmac.c
+@@ -1511,7 +1511,7 @@ static void bmac_tx_timeout(struct timer_list *t)
+ i = bp->tx_empty;
+ ++dev->stats.tx_errors;
+ if (i != bp->tx_fill) {
+- dev_kfree_skb(bp->tx_bufs[i]);
++ dev_kfree_skb_irq(bp->tx_bufs[i]);
+ bp->tx_bufs[i] = NULL;
+ if (++i >= N_TX_RING) i = 0;
+ bp->tx_empty = i;
+diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
+index b8ba2abf5b3ab..65ed373d04f5b 100644
+--- a/drivers/net/ethernet/apple/mace.c
++++ b/drivers/net/ethernet/apple/mace.c
+@@ -841,7 +841,7 @@ static void mace_tx_timeout(struct timer_list *t)
+ if (mp->tx_bad_runt) {
+ mp->tx_bad_runt = 0;
+ } else if (i != mp->tx_fill) {
+- dev_kfree_skb(mp->tx_bufs[i]);
++ dev_kfree_skb_irq(mp->tx_bufs[i]);
+ if (++i >= N_TX_RING)
+ i = 0;
+ mp->tx_empty = i;
+diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
+index e249790109699..da9f9ec3e1230 100644
+--- a/drivers/net/ethernet/dnet.c
++++ b/drivers/net/ethernet/dnet.c
+@@ -553,11 +553,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ skb_tx_timestamp(skb);
+
++ spin_unlock_irqrestore(&bp->lock, flags);
++
+ /* free the buffer */
+ dev_kfree_skb(skb);
+
+- spin_unlock_irqrestore(&bp->lock, flags);
+-
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 403c1b9cf6ab8..48956c30d2eee 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2592,7 +2592,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+- if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
++ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
++ hdev->reset_type == HNAE3_FLR_RESET) &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index f718eadd87744..10b16c292541f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1213,8 +1213,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ if (!q_vector) {
+ q_vector = kzalloc(size, GFP_KERNEL);
+ } else if (size > ksize(q_vector)) {
+- kfree_rcu(q_vector, rcu);
+- q_vector = kzalloc(size, GFP_KERNEL);
++ struct igb_q_vector *new_q_vector;
++
++ new_q_vector = kzalloc(size, GFP_KERNEL);
++ if (new_q_vector)
++ kfree_rcu(q_vector, rcu);
++ q_vector = new_q_vector;
+ } else {
+ memset(q_vector, 0, size);
+ }
+@@ -7133,7 +7137,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+ {
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+- u32 reg, msgbuf[3];
++ u32 reg, msgbuf[3] = {};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* process all the same items cleared in a function level reset */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+index 2fb97967961c4..ba36be0c7eb4d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+@@ -851,9 +851,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
+ rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
+ if (rp_pdev && rp_pdev->subordinate) {
+ bus = rp_pdev->subordinate->number;
++ pci_dev_put(rp_pdev);
+ return pci_get_domain_bus_and_slot(0, bus, 0);
+ }
+
++ pci_dev_put(rp_pdev);
+ return NULL;
+ }
+
+@@ -870,6 +872,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+ struct ixgbe_adapter *adapter = hw->back;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *func0_pdev;
++ bool has_mii = false;
+
+ /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
+ * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
+@@ -880,15 +883,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
+ if (func0_pdev) {
+ if (func0_pdev == pdev)
+- return true;
+- else
+- return false;
++ has_mii = true;
++ goto out;
+ }
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
+ if (func0_pdev == pdev)
+- return true;
++ has_mii = true;
+
+- return false;
++out:
++ pci_dev_put(func0_pdev);
++ return has_mii;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 492ff2ef9a404..2c81ec31e0a2d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -417,8 +417,8 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+- .name = "mlx5_p2p",
+- .max_adj = 100000000,
++ .name = "mlx5_ptp",
++ .max_adj = 50000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index c4c7160949825..5aee774768bcb 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3956,6 +3956,7 @@ abort_with_slices:
+ myri10ge_free_slices(mgp);
+
+ abort_with_firmware:
++ kfree(mgp->msix_vectors);
+ myri10ge_dummy_rdma(mgp, 0);
+
+ abort_with_ioremap:
+diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
+index 69316ddcf067f..496052a6b9b86 100644
+--- a/drivers/net/ethernet/neterion/s2io.c
++++ b/drivers/net/ethernet/neterion/s2io.c
+@@ -2375,7 +2375,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
+ skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
+ if (skb) {
+ swstats->mem_freed += skb->truesize;
+- dev_kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ cnt++;
+ }
+ }
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+index 10286215092f6..85419b8258b59 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+@@ -2525,7 +2525,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+ goto disable_mbx_intr;
+
+ qlcnic_83xx_clear_function_resources(adapter);
+- qlcnic_dcb_enable(adapter->dcb);
++
++ err = qlcnic_dcb_enable(adapter->dcb);
++ if (err) {
++ qlcnic_dcb_free(adapter->dcb);
++ goto disable_mbx_intr;
++ }
++
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
+
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+index 0a9d24e86715d..eb8000d9b6d0e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+@@ -42,11 +42,6 @@ struct qlcnic_dcb {
+ unsigned long state;
+ };
+
+-static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+-{
+- kfree(dcb);
+-}
+-
+ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+ {
+ if (dcb && dcb->ops->get_hw_capability)
+@@ -113,9 +108,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+ dcb->ops->init_dcbnl_ops(dcb);
+ }
+
+-static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
++static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+ {
+- if (dcb && qlcnic_dcb_attach(dcb))
+- qlcnic_clear_dcb_ops(dcb);
++ return dcb ? qlcnic_dcb_attach(dcb) : 0;
+ }
+ #endif
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+index 3a96fd6deef72..9d5b74c804b5e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+@@ -2639,7 +2639,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ "Device does not support MSI interrupts\n");
+
+ if (qlcnic_82xx_check(adapter)) {
+- qlcnic_dcb_enable(adapter->dcb);
++ err = qlcnic_dcb_enable(adapter->dcb);
++ if (err) {
++ qlcnic_dcb_free(adapter->dcb);
++ dev_err(&pdev->dev, "Failed to enable DCB\n");
++ goto err_out_free_hw;
++ }
++
+ qlcnic_dcb_get_info(adapter->dcb);
+ err = qlcnic_setup_intr(adapter);
+
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index 400bc2c3f222e..7c782df3793dd 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -222,6 +222,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+ return 0;
+
+ qlcnic_destroy_async_wq:
++ while (i--)
++ kfree(sriov->vf_info[i].vp);
+ destroy_workqueue(bc->bc_async_wq);
+
+ qlcnic_destroy_trans_wq:
+diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
+index f158fdf3aab2c..b66689e0e6f2b 100644
+--- a/drivers/net/ethernet/rdc/r6040.c
++++ b/drivers/net/ethernet/rdc/r6040.c
+@@ -1162,10 +1162,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+- goto err_out_mdio_unregister;
++ goto err_out_phy_disconnect;
+ }
+ return 0;
+
++err_out_phy_disconnect:
++ phy_disconnect(dev->phydev);
+ err_out_mdio_unregister:
+ mdiobus_unregister(lp->mii_bus);
+ err_out_mdio:
+@@ -1189,6 +1191,7 @@ static void r6040_remove_one(struct pci_dev *pdev)
+ struct r6040_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
++ phy_disconnect(dev->phydev);
+ mdiobus_unregister(lp->mii_bus);
+ mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 95fd1f2d54392..3fd5155bdd5fa 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -2216,11 +2216,11 @@ static int ravb_remove(struct platform_device *pdev)
+ priv->desc_bat_dma);
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+- pm_runtime_put_sync(&pdev->dev);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
++ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+index 8c14c99663941..79546810bb3d1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+@@ -43,7 +43,8 @@ static void config_sub_second_increment(void __iomem *ioaddr,
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
+
+- data &= PTP_SSIR_SSINC_MASK;
++ if (data > PTP_SSIR_SSINC_MAX)
++ data = PTP_SSIR_SSINC_MAX;
+
+ reg_value = data;
+ if (gmac4)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+index 7abb1d47e7dac..60e6b085e2f6d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+@@ -61,7 +61,7 @@
+ #define PTP_TCR_TSENMACADDR BIT(18)
+
+ /* SSIR defines */
+-#define PTP_SSIR_SSINC_MASK 0xff
++#define PTP_SSIR_SSINC_MAX 0xff
+ #define GMAC4_PTP_SSIR_SSINC_SHIFT 16
+
+ #endif /* __STMMAC_PTP_H__ */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+index ba03a2d774344..e65577f1da547 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+@@ -1614,12 +1614,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
+ }
+
+ ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
+- if (ret)
++ if (ret) {
++ kfree_skb(skb);
+ goto cleanup;
++ }
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+- if (ret)
++ if (ret) {
++ kfree_skb(skb);
+ goto cleanup;
++ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
+index 4154c48d1ddf6..5dbb4ed1b1328 100644
+--- a/drivers/net/ethernet/ti/netcp_core.c
++++ b/drivers/net/ethernet/ti/netcp_core.c
+@@ -1262,7 +1262,7 @@ out:
+ }
+
+ /* Submit the packet */
+-static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
++static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ {
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *tx_stats = &netcp->stats;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index 6e5ea68b6a7e6..951482d899f9f 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -541,7 +541,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
+ xemaclite_enable_interrupts(lp);
+
+ if (lp->deferred_skb) {
+- dev_kfree_skb(lp->deferred_skb);
++ dev_kfree_skb_irq(lp->deferred_skb);
+ lp->deferred_skb = NULL;
+ dev->stats.tx_errors++;
+ }
+diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
+index c866f58dab709..28bf530cb0053 100644
+--- a/drivers/net/fddi/defxx.c
++++ b/drivers/net/fddi/defxx.c
+@@ -3844,10 +3844,24 @@ static int dfx_init(void)
+ int status;
+
+ status = pci_register_driver(&dfx_pci_driver);
+- if (!status)
+- status = eisa_driver_register(&dfx_eisa_driver);
+- if (!status)
+- status = tc_register_driver(&dfx_tc_driver);
++ if (status)
++ goto err_pci_register;
++
++ status = eisa_driver_register(&dfx_eisa_driver);
++ if (status)
++ goto err_eisa_register;
++
++ status = tc_register_driver(&dfx_tc_driver);
++ if (status)
++ goto err_tc_register;
++
++ return 0;
++
++err_tc_register:
++ eisa_driver_unregister(&dfx_eisa_driver);
++err_eisa_register:
++ pci_unregister_driver(&dfx_pci_driver);
++err_pci_register:
+ return status;
+ }
+
+diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
+index 4476491b58f9d..c5495ca5e8e6b 100644
+--- a/drivers/net/hamradio/baycom_epp.c
++++ b/drivers/net/hamradio/baycom_epp.c
+@@ -758,7 +758,7 @@ static void epp_bh(struct work_struct *work)
+ * ===================== network driver interface =========================
+ */
+
+-static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct baycom_state *bc = netdev_priv(dev);
+
+diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
+index 6c03932d8a6bc..3dc4eb841aa1d 100644
+--- a/drivers/net/hamradio/scc.c
++++ b/drivers/net/hamradio/scc.c
+@@ -300,12 +300,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
+ spin_lock_irqsave(&scc->lock, flags);
+ if (scc->tx_buff != NULL)
+ {
+- dev_kfree_skb(scc->tx_buff);
++ dev_kfree_skb_irq(scc->tx_buff);
+ scc->tx_buff = NULL;
+ }
+
+ while (!skb_queue_empty(&scc->tx_queue))
+- dev_kfree_skb(skb_dequeue(&scc->tx_queue));
++ dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
+
+ spin_unlock_irqrestore(&scc->lock, flags);
+ }
+@@ -1667,7 +1667,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
+ if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
+ struct sk_buff *skb_del;
+ skb_del = skb_dequeue(&scc->tx_queue);
+- dev_kfree_skb(skb_del);
++ dev_kfree_skb_irq(skb_del);
+ }
+ skb_queue_tail(&scc->tx_queue, skb);
+ netif_trans_update(dev);
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 14545a8797a8a..7788f72c262e6 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -206,7 +206,7 @@ static __net_init int loopback_net_init(struct net *net)
+ int err;
+
+ err = -ENOMEM;
+- dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup);
++ dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup);
+ if (!dev)
+ goto out;
+
+diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
+index 1b7d588ff3c5c..b701ee83e64a8 100644
+--- a/drivers/net/ntb_netdev.c
++++ b/drivers/net/ntb_netdev.c
+@@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ enqueue_again:
+ rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+ if (rc) {
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_fifo_errors++;
+ }
+@@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+ ndev->stats.tx_aborted_errors++;
+ }
+
+- dev_kfree_skb(skb);
++ dev_kfree_skb_any(skb);
+
+ if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
+ /* Make sure anybody stopping the queue after this sees the new
+diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
+index 151c2a3f0b3a3..7a78dfdfa5bdd 100644
+--- a/drivers/net/phy/xilinx_gmii2rgmii.c
++++ b/drivers/net/phy/xilinx_gmii2rgmii.c
+@@ -82,6 +82,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
+
+ if (!priv->phy_dev->drv) {
+ dev_info(dev, "Attached phy not ready\n");
++ put_device(&priv->phy_dev->mdio.dev);
+ return -EPROBE_DEFER;
+ }
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index a085213dc2eaa..078c0f474f966 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1522,6 +1522,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
+ int len;
+ unsigned char *cp;
+
++ skb->dev = ppp->dev;
++
+ if (proto < 0x8000) {
+ #ifdef CONFIG_PPP_FILTER
+ /* check if we should pass this packet */
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index 1505fe3f87ed3..1ff723e15d523 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
+
+ off = le32_to_cpu(u.get_c->offset);
+ len = le32_to_cpu(u.get_c->len);
+- if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
++ if (unlikely((off > CONTROL_BUFFER_SIZE - 8) ||
++ (len > CONTROL_BUFFER_SIZE - 8 - off)))
+ goto response_error;
+
+ if (*reply_len != -1 && len != *reply_len)
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 1901ec7948d8d..a2527351f8a7a 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2613,6 +2613,7 @@ fst_remove_one(struct pci_dev *pdev)
+ for (i = 0; i < card->nports; i++) {
+ struct net_device *dev = port_to_dev(&card->ports[i]);
+ unregister_hdlc_device(dev);
++ free_netdev(dev);
+ }
+
+ fst_disable_intr(card);
+@@ -2633,6 +2634,7 @@ fst_remove_one(struct pci_dev *pdev)
+ card->tx_dma_handle_card);
+ }
+ fst_card_array[card->card_no] = NULL;
++ kfree(card);
+ }
+
+ static struct pci_driver fst_driver = {
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index 58e189ec672f9..5d3cf354f6cb5 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb)
+ }
+ }
+
++static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
++{
++ usb_kill_urb(ar->tx_cmd.urb_tx);
++}
++
+ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+ {
+@@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
++ ar5523_cancel_tx_cmd(ar);
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 0f055e5777490..c28328c96307b 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -3769,18 +3769,22 @@ static struct pci_driver ath10k_pci_driver = {
+
+ static int __init ath10k_pci_init(void)
+ {
+- int ret;
++ int ret1, ret2;
+
+- ret = pci_register_driver(&ath10k_pci_driver);
+- if (ret)
++ ret1 = pci_register_driver(&ath10k_pci_driver);
++ if (ret1)
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+- ret);
++ ret1);
+
+- ret = ath10k_ahb_init();
+- if (ret)
+- printk(KERN_ERR "ahb init failed: %d\n", ret);
++ ret2 = ath10k_ahb_init();
++ if (ret2)
++ printk(KERN_ERR "ahb init failed: %d\n", ret2);
+
+- return ret;
++ if (ret1 && ret2)
++ return ret1;
++
++ /* registered to at least one bus */
++ return 0;
+ }
+ module_init(ath10k_pci_init);
+
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index c8c7afe0e343e..8a18a33b5b59f 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -709,14 +709,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct hif_device_usb *hif_dev = rx_buf->hif_dev;
+ struct sk_buff *skb = rx_buf->skb;
+- struct sk_buff *nskb;
+ int ret;
+
+ if (!skb)
+ return;
+
+ if (!hif_dev)
+- goto free;
++ goto free_skb;
+
+ switch (urb->status) {
+ case 0:
+@@ -725,7 +724,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+- goto free;
++ goto free_skb;
+ default:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+@@ -736,25 +735,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
+ if (likely(urb->actual_length != 0)) {
+ skb_put(skb, urb->actual_length);
+
+- /* Process the command first */
++ /*
++ * Process the command first.
++ * skb is either freed here or passed to be
++ * managed to another callback function.
++ */
+ ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
+ skb->len, USB_REG_IN_PIPE);
+
+-
+- nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
+- if (!nskb) {
++ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
++ if (!skb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: REG_IN memory allocation failure\n");
+- urb->context = NULL;
+- return;
++ goto free_rx_buf;
+ }
+
+- rx_buf->skb = nskb;
++ rx_buf->skb = skb;
+
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
+ USB_REG_IN_PIPE),
+- nskb->data, MAX_REG_IN_BUF_SIZE,
++ skb->data, MAX_REG_IN_BUF_SIZE,
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
+ }
+
+@@ -763,12 +764,13 @@ resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ usb_unanchor_urb(urb);
+- goto free;
++ goto free_skb;
+ }
+
+ return;
+-free:
++free_skb:
+ kfree_skb(skb);
++free_rx_buf:
+ kfree(rx_buf);
+ urb->context = NULL;
+ }
+@@ -781,14 +783,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_buf, list) {
+- usb_get_urb(tx_buf->urb);
+- spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+- usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+- spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ }
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+@@ -1330,10 +1328,24 @@ static int send_eject_command(struct usb_interface *interface)
+ static int ath9k_hif_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+ {
++ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out;
+ struct usb_device *udev = interface_to_usbdev(interface);
++ struct usb_host_interface *alt;
+ struct hif_device_usb *hif_dev;
+ int ret = 0;
+
++ /* Verify the expected endpoints are present */
++ alt = interface->cur_altsetting;
++ if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 ||
++ usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE ||
++ usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE ||
++ usb_endpoint_num(int_in) != USB_REG_IN_PIPE ||
++ usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) {
++ dev_err(&udev->dev,
++ "ath9k_htc: Device endpoint numbers are not the expected ones\n");
++ return -ENODEV;
++ }
++
+ if (id->driver_info == STORAGE_DEVICE)
+ return send_eject_command(interface);
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+index 544ad80629a99..47e33fe53eeb0 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+@@ -703,6 +703,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
+ u32 i, j;
+ char end = '\0';
+
++ if (chiprev >= BITS_PER_TYPE(u32)) {
++ brcmf_err("Invalid chip revision %u\n", chiprev);
++ return NULL;
++ }
++
+ for (i = 0; i < table_size; i++) {
+ if (mapping_table[i].chipid == chip &&
+ mapping_table[i].revmask & BIT(chiprev))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index b5d2e5b9f67cc..092501eee9aa6 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -616,7 +616,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
+ }
+
+ if (!brcmf_chip_set_active(devinfo->ci, resetintr))
+- return -EINVAL;
++ return -EIO;
+ return 0;
+ }
+
+@@ -1109,6 +1109,10 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
+ BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
+ }
++ if (max_flowrings > 256) {
++ brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
++ return -EIO;
++ }
+
+ if (devinfo->dma_idx_sz != 0) {
+ bufsz = (max_submissionrings + max_completionrings) *
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index ddc999670484f..5874f56c12da7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -3367,6 +3367,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+ /* Take arm out of reset */
+ if (!brcmf_chip_set_active(bus->ci, rstvec)) {
+ brcmf_err("error getting out of ARM core reset\n");
++ bcmerror = -EIO;
+ goto err;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index d46e606b7b025..9a81ce299d0d1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -1209,6 +1209,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
++ struct sk_buff *orig_skb = skb;
+
+ if (WARN_ON_ONCE(!mvmsta))
+ return -1;
+@@ -1241,8 +1242,17 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ if (ret) {
++ /* Free skbs created as part of TSO logic that have not yet been dequeued */
+ __skb_queue_purge(&mpdus_skbs);
+- return ret;
++ /* skb here is not necessarily same as skb that entered this method,
++ * so free it explicitly.
++ */
++ if (skb == orig_skb)
++ ieee80211_free_txskb(mvm->hw, skb);
++ else
++ kfree_skb(skb);
++ /* there was error, but we consumed skb one way or another, so return 0 */
++ return 0;
+ }
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 6858f7de0915b..2a02d4d72dec9 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -1178,7 +1178,7 @@ struct rtl8723bu_c2h {
+ u8 dummy3_0;
+ } __packed ra_report;
+ };
+-};
++} __packed;
+
+ struct rtl8xxxu_fileops;
+
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index b472dc4c551e2..4a81e810a0ce3 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1608,18 +1608,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ {
+ struct device *dev = &priv->udev->dev;
+- u32 val32, bonding;
++ u32 val32, bonding, sys_cfg;
+ u16 val16;
+
+- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+- priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
++ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
++ priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >>
+ SYS_CFG_CHIP_VERSION_SHIFT;
+- if (val32 & SYS_CFG_TRP_VAUX_EN) {
++ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ return -ENOTSUPP;
+ }
+
+- if (val32 & SYS_CFG_BT_FUNC) {
++ if (sys_cfg & SYS_CFG_BT_FUNC) {
+ if (priv->chip_cut >= 3) {
+ sprintf(priv->chip_name, "8723BU");
+ priv->rtl_chip = RTL8723B;
+@@ -1641,7 +1641,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ if (val32 & MULTI_GPS_FUNC_EN)
+ priv->has_gps = 1;
+ priv->is_multi_func = 1;
+- } else if (val32 & SYS_CFG_TYPE_ID) {
++ } else if (sys_cfg & SYS_CFG_TYPE_ID) {
+ bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+ bonding &= HPON_FSM_BONDING_MASK;
+ if (priv->fops->tx_desc_size ==
+@@ -1689,7 +1689,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ case RTL8188E:
+ case RTL8192E:
+ case RTL8723B:
+- switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
++ switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) {
+ case SYS_CFG_VENDOR_ID_TSMC:
+ sprintf(priv->chip_vendor, "TSMC");
+ break;
+@@ -1706,7 +1706,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+ }
+ break;
+ default:
+- if (val32 & SYS_CFG_VENDOR_ID) {
++ if (sys_cfg & SYS_CFG_VENDOR_ID) {
+ sprintf(priv->chip_vendor, "UMC");
+ priv->vendor_umc = 1;
+ } else {
+diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
+index c6c29034b2ead..a939b552a8e47 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_core.c
++++ b/drivers/net/wireless/rsi/rsi_91x_core.c
+@@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+ tid, 0);
+ }
+ }
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++
++ if (IEEE80211_SKB_CB(skb)->control.flags &
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ q_num = MGMT_SOFT_Q;
+ skb->priority = q_num;
+ }
+diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
+index 7d0b44fd56901..062c5da741047 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
+@@ -162,12 +162,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ u8 header_size;
+ u8 vap_id = 0;
+ u8 dword_align_bytes;
++ bool tx_eapol;
+ u16 seq_num;
+
+ info = IEEE80211_SKB_CB(skb);
+ vif = info->control.vif;
+ tx_params = (struct skb_info *)info->driver_data;
+
++ tx_eapol = IEEE80211_SKB_CB(skb)->control.flags &
++ IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++
+ header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc);
+ if (header_size > skb_headroom(skb)) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+@@ -231,7 +235,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
+ }
+ }
+
+- if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
++ if (tx_eapol) {
+ rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n");
+
+ data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 44e353dd2ba19..43bd881ab3ddb 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -202,10 +202,10 @@ static int netback_remove(struct xenbus_device *dev)
+ set_backend_state(be, XenbusStateClosed);
+
+ unregister_hotplug_status_watch(be);
++ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ xen_unregister_watchers(be->vif);
+- xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ xenvif_free(be->vif);
+ be->vif = NULL;
+ }
+@@ -435,7 +435,6 @@ static void backend_disconnect(struct backend_info *be)
+ unsigned int queue_index;
+
+ xen_unregister_watchers(vif);
+- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
+ #ifdef CONFIG_DEBUG_FS
+ xenvif_debugfs_delif(vif);
+ #endif /* CONFIG_DEBUG_FS */
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index 807b7b37d9dce..1e90ff17f87db 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -1293,6 +1293,8 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
++ memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+ rc = rsp->status & PN533_CMD_RET_MASK;
+@@ -1774,6 +1776,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+
+ dev_dbg(dev->dev, "Creating new target\n");
+
++ memset(&nfc_target, 0, sizeof(struct nfc_target));
++
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ nfc_target.nfcid1_len = 10;
+ memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index d7a355d053687..82e5b7dbaee9f 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+ return usb_submit_urb(phy->ack_urb, flags);
+ }
+
++struct pn533_out_arg {
++ struct pn533_usb_phy *phy;
++ struct completion done;
++};
++
+ static int pn533_usb_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+ {
+ struct pn533_usb_phy *phy = dev->phy;
++ struct pn533_out_arg arg;
++ void *cntx;
+ int rc;
+
+ if (phy->priv == NULL)
+@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
++ init_completion(&arg.done);
++ cntx = phy->out_urb->context;
++ phy->out_urb->context = &arg;
++
+ rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+ if (rc)
+ return rc;
+
++ wait_for_completion(&arg.done);
++ phy->out_urb->context = cntx;
++
+ if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+ /* request for response for sent packet directly */
+ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
+@@ -412,7 +426,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+ return arg.rc;
+ }
+
+-static void pn533_send_complete(struct urb *urb)
++static void pn533_out_complete(struct urb *urb)
++{
++ struct pn533_out_arg *arg = urb->context;
++ struct pn533_usb_phy *phy = arg->phy;
++
++ switch (urb->status) {
++ case 0:
++ break; /* success */
++ case -ECONNRESET:
++ case -ENOENT:
++ dev_dbg(&phy->udev->dev,
++ "The urb has been stopped (status %d)\n",
++ urb->status);
++ break;
++ case -ESHUTDOWN:
++ default:
++ nfc_err(&phy->udev->dev,
++ "Urb failure (status %d)\n",
++ urb->status);
++ }
++
++ complete(&arg->done);
++}
++
++static void pn533_ack_complete(struct urb *urb)
+ {
+ struct pn533_usb_phy *phy = urb->context;
+
+@@ -500,10 +538,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
+
+ usb_fill_bulk_urb(phy->out_urb, phy->udev,
+ usb_sndbulkpipe(phy->udev, out_endpoint),
+- NULL, 0, pn533_send_complete, phy);
++ NULL, 0, pn533_out_complete, phy);
+ usb_fill_bulk_urb(phy->ack_urb, phy->udev,
+ usb_sndbulkpipe(phy->udev, out_endpoint),
+- NULL, 0, pn533_send_complete, phy);
++ NULL, 0, pn533_ack_complete, phy);
+
+ switch (id->driver_info) {
+ case PN533_DEVICE_STD:
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 10fe7a7a2163c..5d62d1042c0e6 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -117,9 +117,9 @@ struct nvme_dev {
+ mempool_t *iod_mempool;
+
+ /* shadow doorbell buffer support: */
+- u32 *dbbuf_dbs;
++ __le32 *dbbuf_dbs;
+ dma_addr_t dbbuf_dbs_dma_addr;
+- u32 *dbbuf_eis;
++ __le32 *dbbuf_eis;
+ dma_addr_t dbbuf_eis_dma_addr;
+
+ /* host memory buffer support: */
+@@ -187,10 +187,10 @@ struct nvme_queue {
+ #define NVMEQ_SQ_CMB 1
+ #define NVMEQ_DELETE_ERROR 2
+ #define NVMEQ_POLLED 3
+- u32 *dbbuf_sq_db;
+- u32 *dbbuf_cq_db;
+- u32 *dbbuf_sq_ei;
+- u32 *dbbuf_cq_ei;
++ __le32 *dbbuf_sq_db;
++ __le32 *dbbuf_cq_db;
++ __le32 *dbbuf_sq_ei;
++ __le32 *dbbuf_cq_ei;
+ struct completion delete_done;
+ };
+
+@@ -311,11 +311,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
+ }
+
+ /* Update dbbuf and return true if an MMIO is required */
+-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+- volatile u32 *dbbuf_ei)
++static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
++ volatile __le32 *dbbuf_ei)
+ {
+ if (dbbuf_db) {
+- u16 old_value;
++ u16 old_value, event_idx;
+
+ /*
+ * Ensure that the queue is written before updating
+@@ -323,8 +323,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ */
+ wmb();
+
+- old_value = *dbbuf_db;
+- *dbbuf_db = value;
++ old_value = le32_to_cpu(*dbbuf_db);
++ *dbbuf_db = cpu_to_le32(value);
+
+ /*
+ * Ensure that the doorbell is updated before reading the event
+@@ -334,7 +334,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
+ */
+ mb();
+
+- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
++ event_idx = le32_to_cpu(*dbbuf_ei);
++ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
+ return false;
+ }
+
+diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
+index 8420ef42d89ea..dc298775f7620 100644
+--- a/drivers/of/overlay.c
++++ b/drivers/of/overlay.c
+@@ -547,7 +547,7 @@ static int find_dup_cset_node_entry(struct overlay_changeset *ovcs,
+
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+- node_path_match = !strcmp(fn_1, fn_2);
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ kfree(fn_1);
+ kfree(fn_2);
+ if (node_path_match) {
+@@ -582,7 +582,7 @@ static int find_dup_cset_prop(struct overlay_changeset *ovcs,
+
+ fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np);
+ fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np);
+- node_path_match = !strcmp(fn_1, fn_2);
++ node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2);
+ kfree(fn_1);
+ kfree(fn_2);
+ if (node_path_match &&
+diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
+index 73e37bb877a4d..609c747402d5f 100644
+--- a/drivers/parisc/led.c
++++ b/drivers/parisc/led.c
+@@ -137,6 +137,9 @@ static int start_task(void)
+
+ /* Create the work queue and queue the LED task */
+ led_wq = create_singlethread_workqueue("led_wq");
++ if (!led_wq)
++ return -ENOMEM;
++
+ queue_delayed_work(led_wq, &led_task, 0);
+
+ return 0;
+diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
+index a1de501a27299..3f6a5d5202595 100644
+--- a/drivers/pci/irq.c
++++ b/drivers/pci/irq.c
+@@ -94,6 +94,8 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
+ va_start(ap, fmt);
+ devname = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
++ if (!devname)
++ return -ENOMEM;
+
+ ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
+ irqflags, devname, dev_id);
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index e401f040f1571..171988de988de 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1157,11 +1157,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+
+ sysfs_bin_attr_init(res_attr);
+ if (write_combine) {
+- pdev->res_attr_wc[num] = res_attr;
+ sprintf(res_attr_name, "resource%d_wc", num);
+ res_attr->mmap = pci_mmap_resource_wc;
+ } else {
+- pdev->res_attr[num] = res_attr;
+ sprintf(res_attr_name, "resource%d", num);
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+@@ -1177,10 +1175,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+ res_attr->size = pci_resource_len(pdev, num);
+ res_attr->private = (void *)(unsigned long)num;
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
+- if (retval)
++ if (retval) {
+ kfree(res_attr);
++ return retval;
++ }
+
+- return retval;
++ if (write_combine)
++ pdev->res_attr_wc[num] = res_attr;
++ else
++ pdev->res_attr[num] = res_attr;
++
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index ec741f92246d6..b7ca261f5e75b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -6093,6 +6093,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
+ {
+ u32 v;
+
++ /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
++ pdev = pci_physfn(pdev);
+ if (pci_dev_is_disconnected(pdev))
+ return false;
+ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
+index 4594e2ed13d59..96e76915da56c 100644
+--- a/drivers/perf/arm_dsu_pmu.c
++++ b/drivers/perf/arm_dsu_pmu.c
+@@ -816,7 +816,11 @@ static int __init dsu_pmu_init(void)
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+- return platform_driver_register(&dsu_pmu_driver);
++ ret = platform_driver_register(&dsu_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
++
++ return ret;
+ }
+
+ static void __exit dsu_pmu_exit(void)
+diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
+index 6a3fa1f69e68a..0b6af77196418 100644
+--- a/drivers/perf/arm_smmuv3_pmu.c
++++ b/drivers/perf/arm_smmuv3_pmu.c
+@@ -872,6 +872,8 @@ static struct platform_driver smmu_pmu_driver = {
+
+ static int __init arm_smmu_pmu_init(void)
+ {
++ int ret;
++
+ cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/pmcg:online",
+ NULL,
+@@ -879,7 +881,11 @@ static int __init arm_smmu_pmu_init(void)
+ if (cpuhp_state_num < 0)
+ return cpuhp_state_num;
+
+- return platform_driver_register(&smmu_pmu_driver);
++ ret = platform_driver_register(&smmu_pmu_driver);
++ if (ret)
++ cpuhp_remove_multi_state(cpuhp_state_num);
++
++ return ret;
+ }
+ module_init(arm_smmu_pmu_init);
+
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 355bc4c748e26..02c015577cf95 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -391,8 +391,10 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ for_each_available_child_of_node(np_config, np) {
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps, type);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(np);
+ goto exit;
++ }
+ }
+ return 0;
+
+diff --git a/drivers/platform/x86/mxm-wmi.c b/drivers/platform/x86/mxm-wmi.c
+index 9a19fbd2f7341..9a457956025a5 100644
+--- a/drivers/platform/x86/mxm-wmi.c
++++ b/drivers/platform/x86/mxm-wmi.c
+@@ -35,13 +35,11 @@ int mxm_wmi_call_mxds(int adapter)
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+- &output);
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+@@ -60,13 +58,11 @@ int mxm_wmi_call_mxmx(int adapter)
+ .xarg = 1,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ printk("calling mux switch %d\n", adapter);
+
+- status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input,
+- &output);
++ status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
+
+ if (ACPI_FAILURE(status))
+ return status;
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index fb088dd8529ec..5a290fed63a51 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -1899,14 +1899,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ break;
+ }
+
+- ret = sony_call_snc_handle(handle, probe_base, &result);
+- if (ret)
+- return ret;
++ /*
++ * Only probe if there is a separate probe_base, otherwise the probe call
++ * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
++ * the keyboard backlight being turned off.
++ */
++ if (probe_base) {
++ ret = sony_call_snc_handle(handle, probe_base, &result);
++ if (ret)
++ return ret;
+
+- if ((handle == 0x0137 && !(result & 0x02)) ||
+- !(result & 0x01)) {
+- dprintk("no backlight keyboard found\n");
+- return 0;
++ if ((handle == 0x0137 && !(result & 0x02)) ||
++ !(result & 0x01)) {
++ dprintk("no backlight keyboard found\n");
++ return 0;
++ }
+ }
+
+ kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
+index 3bf18d7189750..131b925b820d2 100644
+--- a/drivers/pnp/core.c
++++ b/drivers/pnp/core.c
+@@ -160,14 +160,14 @@ struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id,
+ dev->dev.coherent_dma_mask = dev->dma_mask;
+ dev->dev.release = &pnp_release_device;
+
+- dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
+-
+ dev_id = pnp_add_id(dev, pnpid);
+ if (!dev_id) {
+ kfree(dev);
+ return NULL;
+ }
+
++ dev_set_name(&dev->dev, "%02x:%02x", dev->protocol->number, dev->number);
++
+ return dev;
+ }
+
+diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
+index 4684e7df833a8..2365efe2dae15 100644
+--- a/drivers/power/avs/smartreflex.c
++++ b/drivers/power/avs/smartreflex.c
+@@ -942,6 +942,7 @@ static int omap_sr_probe(struct platform_device *pdev)
+ err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+ err_list_del:
++ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+
+ pm_runtime_put_sync(&pdev->dev);
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index a2f56a68c50d6..fd24254d90142 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -648,6 +648,11 @@ int power_supply_get_battery_info(struct power_supply *psy,
+ int i, tab_len, size;
+
+ propname = kasprintf(GFP_KERNEL, "ocv-capacity-table-%d", index);
++ if (!propname) {
++ power_supply_put_battery_info(psy, info);
++ err = -ENOMEM;
++ goto out_put_node;
++ }
+ list = of_get_property(battery_np, propname, &size);
+ if (!list || !size) {
+ dev_err(&psy->dev, "failed to get %s\n", propname);
+@@ -1104,8 +1109,8 @@ create_triggers_failed:
+ register_cooler_failed:
+ psy_unregister_thermal(psy);
+ register_thermal_failed:
+- device_del(dev);
+ wakeup_init_failed:
++ device_del(dev);
+ device_add_failed:
+ check_supplies_failed:
+ dev_set_name_failed:
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index cc63f9baa4819..538297ef82558 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -221,8 +221,11 @@ static int pwm_sifive_clock_notifier(struct notifier_block *nb,
+ struct pwm_sifive_ddata *ddata =
+ container_of(nb, struct pwm_sifive_ddata, notifier);
+
+- if (event == POST_RATE_CHANGE)
++ if (event == POST_RATE_CHANGE) {
++ mutex_lock(&ddata->lock);
+ pwm_sifive_update_clock(ddata, ndata->new_rate);
++ mutex_unlock(&ddata->lock);
++ }
+
+ return NOTIFY_OK;
+ }
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 2b08fdeb87c18..2371151bc8fc1 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1807,8 +1807,11 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
+ 0, 0xffff);
+ err = rio_add_device(rdev);
+- if (err)
+- goto cleanup;
++ if (err) {
++ put_device(&rdev->dev);
++ return err;
++ }
++
+ rio_dev_get(rdev);
+
+ return 0;
+@@ -1904,10 +1907,6 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+
+ priv->md = chdev;
+
+- mutex_lock(&chdev->file_mutex);
+- list_add_tail(&priv->list, &chdev->file_list);
+- mutex_unlock(&chdev->file_mutex);
+-
+ INIT_LIST_HEAD(&priv->db_filters);
+ INIT_LIST_HEAD(&priv->pw_filters);
+ spin_lock_init(&priv->fifo_lock);
+@@ -1916,6 +1915,7 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
+ GFP_KERNEL);
+ if (ret < 0) {
++ put_device(&chdev->dev);
+ dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_fifo;
+@@ -1926,6 +1926,9 @@ static int mport_cdev_open(struct inode *inode, struct file *filp)
+ spin_lock_init(&priv->req_lock);
+ mutex_init(&priv->dma_lock);
+ #endif
++ mutex_lock(&chdev->file_mutex);
++ list_add_tail(&priv->list, &chdev->file_list);
++ mutex_unlock(&chdev->file_mutex);
+
+ filp->private_data = priv;
+ goto out;
+diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
+index 0e90c5d4bb2b8..b1cd6e028f2b5 100644
+--- a/drivers/rapidio/rio-scan.c
++++ b/drivers/rapidio/rio-scan.c
+@@ -456,8 +456,12 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
+ 0, 0xffff);
+
+ ret = rio_add_device(rdev);
+- if (ret)
+- goto cleanup;
++ if (ret) {
++ if (rswitch)
++ kfree(rswitch->route_table);
++ put_device(&rdev->dev);
++ return NULL;
++ }
+
+ rio_dev_get(rdev);
+
+diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
+index 606986c5ba2c9..fcab174e58888 100644
+--- a/drivers/rapidio/rio.c
++++ b/drivers/rapidio/rio.c
+@@ -2267,11 +2267,16 @@ int rio_register_mport(struct rio_mport *port)
+ atomic_set(&port->state, RIO_DEVICE_RUNNING);
+
+ res = device_register(&port->dev);
+- if (res)
++ if (res) {
+ dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
+ port->id, res);
+- else
++ mutex_lock(&rio_mport_list_lock);
++ list_del(&port->node);
++ mutex_unlock(&rio_mport_list_lock);
++ put_device(&port->dev);
++ } else {
+ dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id);
++ }
+
+ return res;
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 7d15312d67920..0179716f74d74 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -956,7 +956,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
+ /* get input voltage */
+ input_uV = 0;
+ if (rdev->supply)
+- input_uV = regulator_get_voltage(rdev->supply);
++ input_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
+ if (input_uV <= 0)
+ input_uV = rdev->constraints->input_uV;
+ if (input_uV <= 0) {
+@@ -1387,7 +1387,13 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ if (rdev->supply_name && !rdev->supply)
+ return -EPROBE_DEFER;
+
+- if (rdev->supply) {
++ /* If supplying regulator has already been enabled,
++ * it's not intended to have use_count increment
++ * when rdev is only boot-on.
++ */
++ if (rdev->supply &&
++ (rdev->constraints->always_on ||
++ !regulator_is_enabled(rdev->supply))) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ _regulator_put(rdev->supply);
+@@ -1431,6 +1437,7 @@ static int set_supply(struct regulator_dev *rdev,
+
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ if (rdev->supply == NULL) {
++ module_put(supply_rdev->owner);
+ err = -ENOMEM;
+ return err;
+ }
+@@ -1604,7 +1611,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+
+ regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
+ if (regulator == NULL) {
+- kfree(supply_name);
++ kfree_const(supply_name);
+ return NULL;
+ }
+
+@@ -1734,6 +1741,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ node = of_get_regulator(dev, supply);
+ if (node) {
+ r = of_find_regulator_by_node(node);
++ of_node_put(node);
+ if (r)
+ return r;
+
+@@ -5266,6 +5274,7 @@ unset_supplies:
+ regulator_remove_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
+ wash:
++ regulator_put(rdev->supply);
+ kfree(rdev->coupling_desc.coupled_rdevs);
+ mutex_lock(&regulator_list_mutex);
+ regulator_ena_gpio_free(rdev);
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index bf80748f1ccc2..7baa6121cc66f 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -471,6 +471,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
+
+ chip->chip_irq = i2c->irq;
+
++ ret = da9211_regulator_init(chip);
++ if (ret < 0) {
++ dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
++ return ret;
++ }
++
+ if (chip->chip_irq != 0) {
+ ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
+ da9211_irq_handler,
+@@ -485,11 +491,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
+ dev_warn(chip->dev, "No IRQ configured\n");
+ }
+
+- ret = da9211_regulator_init(chip);
+-
+- if (ret < 0)
+- dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+-
+ return ret;
+ }
+
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index db4b3c4bacd77..d48f4b5c8df7b 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -230,6 +230,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
++ of_node_put(node);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
+index c231314eab667..b7d0c35c5058c 100644
+--- a/drivers/remoteproc/qcom_sysmon.c
++++ b/drivers/remoteproc/qcom_sysmon.c
+@@ -518,7 +518,9 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ if (sysmon->shutdown_irq != -ENODATA) {
+ dev_err(sysmon->dev,
+ "failed to retrieve shutdown-ack IRQ\n");
+- return ERR_PTR(sysmon->shutdown_irq);
++ ret = sysmon->shutdown_irq;
++ kfree(sysmon);
++ return ERR_PTR(ret);
+ }
+ } else {
+ ret = devm_request_threaded_irq(sysmon->dev,
+@@ -529,6 +531,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ if (ret) {
+ dev_err(sysmon->dev,
+ "failed to acquire shutdown-ack IRQ\n");
++ kfree(sysmon);
+ return ERR_PTR(ret);
+ }
+ }
+diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
+index d349cef09cb7c..48595b00ebb39 100644
+--- a/drivers/rtc/rtc-mxc_v2.c
++++ b/drivers/rtc/rtc-mxc_v2.c
+@@ -337,8 +337,10 @@ static int mxc_rtc_probe(struct platform_device *pdev)
+ }
+
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+- if (IS_ERR(pdata->rtc))
++ if (IS_ERR(pdata->rtc)) {
++ clk_disable_unprepare(pdata->clk);
+ return PTR_ERR(pdata->rtc);
++ }
+
+ pdata->rtc->ops = &mxc_rtc_ops;
+ pdata->rtc->range_max = U32_MAX;
+diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
+index 1afa6d9fa9fb2..3e7ea5244562e 100644
+--- a/drivers/rtc/rtc-pcf85063.c
++++ b/drivers/rtc/rtc-pcf85063.c
+@@ -159,10 +159,10 @@ static int pcf85063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ if (ret)
+ return ret;
+
+- alrm->time.tm_sec = bcd2bin(buf[0]);
+- alrm->time.tm_min = bcd2bin(buf[1]);
+- alrm->time.tm_hour = bcd2bin(buf[2]);
+- alrm->time.tm_mday = bcd2bin(buf[3]);
++ alrm->time.tm_sec = bcd2bin(buf[0] & 0x7f);
++ alrm->time.tm_min = bcd2bin(buf[1] & 0x7f);
++ alrm->time.tm_hour = bcd2bin(buf[2] & 0x3f);
++ alrm->time.tm_mday = bcd2bin(buf[3] & 0x3f);
+
+ ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &val);
+ if (ret)
+diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
+index 17653ed52ebbf..40f293621b01b 100644
+--- a/drivers/rtc/rtc-pic32.c
++++ b/drivers/rtc/rtc-pic32.c
+@@ -326,16 +326,16 @@ static int pic32_rtc_probe(struct platform_device *pdev)
+
+ spin_lock_init(&pdata->alarm_lock);
+
++ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(pdata->rtc))
++ return PTR_ERR(pdata->rtc);
++
+ clk_prepare_enable(pdata->clk);
+
+ pic32_rtc_enable(pdata, 1);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
+- if (IS_ERR(pdata->rtc))
+- return PTR_ERR(pdata->rtc);
+-
+ pdata->rtc->ops = &pic32_rtcops;
+ pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 757f4daa7181a..0f1e544ac8db1 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -33,6 +33,14 @@
+ #define SNVS_LPPGDR_INIT 0x41736166
+ #define CNTR_TO_SECS_SH 15
+
++/* The maximum RTC clock cycles that are allowed to pass between two
++ * consecutive clock counter register reads. If the values are corrupted a
++ * bigger difference is expected. The RTC frequency is 32kHz. With 320 cycles
++ * we end at 10ms which should be enough for most cases. If it once takes
++ * longer than expected we do a retry.
++ */
++#define MAX_RTC_READ_DIFF_CYCLES 320
++
+ struct snvs_rtc_data {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+@@ -57,6 +65,7 @@ static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ {
+ u64 read1, read2;
++ s64 diff;
+ unsigned int timeout = 100;
+
+ /* As expected, the registers might update between the read of the LSB
+@@ -67,7 +76,8 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ do {
+ read2 = read1;
+ read1 = rtc_read_lpsrt(data);
+- } while (read1 != read2 && --timeout);
++ diff = read1 - read2;
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ if (!timeout)
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+
+@@ -79,13 +89,15 @@ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+ {
+ u32 count1, count2;
++ s32 diff;
+ unsigned int timeout = 100;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ do {
+ count2 = count1;
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+- } while (count1 != count2 && --timeout);
++ diff = count1 - count2;
++ } while (((diff < 0) || (diff > MAX_RTC_READ_DIFF_CYCLES)) && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ return -ETIMEDOUT;
+diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
+index 49474a31c66d3..27261b020f8dd 100644
+--- a/drivers/rtc/rtc-st-lpc.c
++++ b/drivers/rtc/rtc-st-lpc.c
+@@ -241,6 +241,7 @@ static int st_rtc_probe(struct platform_device *pdev)
+
+ rtc->clkrate = clk_get_rate(rtc->clk);
+ if (!rtc->clkrate) {
++ clk_disable_unprepare(rtc->clk);
+ dev_err(&pdev->dev, "Unable to fetch clock rate\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
+index 437a6d8221057..87d05b13fbd51 100644
+--- a/drivers/s390/net/ctcm_main.c
++++ b/drivers/s390/net/ctcm_main.c
+@@ -865,16 +865,9 @@ done:
+ /**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+- *
+- * skb Pointer to buffer containing the packet.
+- * dev Pointer to interface struct.
+- *
+- * returns 0 if packet consumed, !0 if packet rejected.
+- * Note: If we return !0, then the packet is free'd by
+- * the generic network layer.
+ */
+ /* first merge version - leaving both functions separated */
+-static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct ctcm_priv *priv = dev->ml_priv;
+
+@@ -917,7 +910,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ /* unmerged MPC variant of ctcm_tx */
+-static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ int len = 0;
+ struct ctcm_priv *priv = dev->ml_priv;
+diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
+index 4eec7bfb5de93..73708166b255c 100644
+--- a/drivers/s390/net/lcs.c
++++ b/drivers/s390/net/lcs.c
+@@ -1518,9 +1518,8 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+ /**
+ * Packet transmit function called by network stack
+ */
+-static int
+-__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+- struct net_device *dev)
++static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
++ struct net_device *dev)
+ {
+ struct lcs_header *header;
+ int rc = NETDEV_TX_OK;
+@@ -1581,8 +1580,7 @@ out:
+ return rc;
+ }
+
+-static int
+-lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct lcs_card *card;
+ int rc;
+diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
+index 5ce2424ca7290..e2984b54447b7 100644
+--- a/drivers/s390/net/netiucv.c
++++ b/drivers/s390/net/netiucv.c
+@@ -1344,15 +1344,8 @@ out:
+ /**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+- *
+- * @param skb Pointer to buffer containing the packet.
+- * @param dev Pointer to interface struct.
+- *
+- * @return 0 if packet consumed, !0 if packet rejected.
+- * Note: If we return !0, then the packet is free'd by
+- * the generic network layer.
+ */
+-static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ int rc;
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 25dae9f0b2055..00ddb3fd940fa 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -2506,6 +2506,7 @@ static int __init fcoe_init(void)
+
+ out_free:
+ mutex_unlock(&fcoe_config_mutex);
++ fcoe_transport_detach(&fcoe_sw_transport);
+ out_destroy:
+ destroy_workqueue(fcoe_wq);
+ return rc;
+diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
+index 2cb7a8c93a15d..b3086cf406174 100644
+--- a/drivers/scsi/fcoe/fcoe_sysfs.c
++++ b/drivers/scsi/fcoe/fcoe_sysfs.c
+@@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+- if (error)
+- goto out_del_q2;
++ if (error) {
++ destroy_workqueue(ctlr->devloss_work_q);
++ destroy_workqueue(ctlr->work_q);
++ put_device(&ctlr->dev);
++ return NULL;
++ }
+
+ return ctlr;
+
+-out_del_q2:
+- destroy_workqueue(ctlr->devloss_work_q);
+- ctlr->devloss_work_q = NULL;
+ out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+@@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+- if (error)
+- goto out_del;
++ if (error) {
++ put_device(&fcf->dev);
++ goto out;
++ }
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+-out_del:
+- kfree(fcf);
+ out:
+ return NULL;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index bac705990a961..ba125ed7e06a7 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8903,7 +8903,7 @@ clean1: /* wq/aer/h */
+ destroy_workqueue(h->monitor_ctlr_wq);
+ h->monitor_ctlr_wq = NULL;
+ }
+- kfree(h);
++ hpda_free_ctlr_info(h);
+ return rc;
+ }
+
+@@ -9763,7 +9763,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
+ return 0;
+
+ free_sas_phy:
+- hpsa_free_sas_phy(hpsa_sas_phy);
++ sas_phy_free(hpsa_sas_phy->phy);
++ kfree(hpsa_sas_phy);
+ free_sas_port:
+ hpsa_free_sas_port(hpsa_sas_port);
+ free_sas_node:
+@@ -9799,10 +9800,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
+
+ rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
+ if (rc)
+- goto free_sas_port;
++ goto free_sas_rphy;
+
+ return 0;
+
++free_sas_rphy:
++ sas_rphy_free(rphy);
+ free_sas_port:
+ hpsa_free_sas_port(hpsa_sas_port);
+ device->sas_port = NULL;
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index a163fd9331b3c..a42837340edfc 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -10843,11 +10843,19 @@ static struct notifier_block ipr_notifier = {
+ **/
+ static int __init ipr_init(void)
+ {
++ int rc;
++
+ ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
+ IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+
+ register_reboot_notifier(&ipr_notifier);
+- return pci_register_driver(&ipr_driver);
++ rc = pci_register_driver(&ipr_driver);
++ if (rc) {
++ unregister_reboot_notifier(&ipr_notifier);
++ return rc;
++ }
++
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index 5324662751bf1..b909cf100ea48 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -712,6 +712,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
++ sas_rphy_free(rphy);
++ rphy = NULL;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 408166bd20f33..2c86ed1dc4b51 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -3139,7 +3139,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+- lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
++ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index e9ccfb97773f1..7cf871323b2c4 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -318,6 +318,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ ret);
+
+ put_device(&snic->shost->shost_gendev);
++ spin_lock_irqsave(snic->shost->host_lock, flags);
++ list_del(&tgt->list);
++ spin_unlock_irqrestore(snic->shost->host_lock, flags);
+ kfree(tgt);
+ tgt = NULL;
+
+diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
+index 661e47acc3543..31c44ea139efe 100644
+--- a/drivers/soc/qcom/Kconfig
++++ b/drivers/soc/qcom/Kconfig
+@@ -56,6 +56,7 @@ config QCOM_GSBI
+ config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
++ select REGMAP_MMIO
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver. This provides interfaces
+diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
+index b8210479ec997..341f1c7caa89a 100644
+--- a/drivers/soc/ti/knav_qmss_queue.c
++++ b/drivers/soc/ti/knav_qmss_queue.c
+@@ -64,7 +64,7 @@ static DEFINE_MUTEX(knav_dev_lock);
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+-const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
++static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
+ static bool device_ready;
+ bool knav_qmss_device_ready(void)
+@@ -1789,9 +1789,9 @@ static int knav_queue_probe(struct platform_device *pdev)
+ INIT_LIST_HEAD(&kdev->pdsps);
+
+ pm_runtime_enable(&pdev->dev);
+- ret = pm_runtime_get_sync(&pdev->dev);
++ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+- pm_runtime_put_noidle(&pdev->dev);
++ pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
+index d64feeb51a406..36d102230df23 100644
+--- a/drivers/soc/ux500/ux500-soc-id.c
++++ b/drivers/soc/ux500/ux500-soc-id.c
+@@ -159,20 +159,18 @@ static ssize_t ux500_get_process(struct device *dev,
+ static const char *db8500_read_soc_id(struct device_node *backupram)
+ {
+ void __iomem *base;
+- void __iomem *uid;
+ const char *retstr;
++ u32 uid[5];
+
+ base = of_iomap(backupram, 0);
+ if (!base)
+ return NULL;
+- uid = base + 0x1fc0;
++ memcpy_fromio(uid, base + 0x1fc0, sizeof(uid));
+
+ /* Throw these device-specific numbers into the entropy pool */
+- add_device_randomness(uid, 0x14);
++ add_device_randomness(uid, sizeof(uid));
+ retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+- readl((u32 *)uid+0),
+- readl((u32 *)uid+1), readl((u32 *)uid+2),
+- readl((u32 *)uid+3), readl((u32 *)uid+4));
++ uid[0], uid[1], uid[2], uid[3], uid[4]);
+ iounmap(base);
+ return retstr;
+ }
+diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
+index e7dc1fad4a87c..282c5ee41a626 100644
+--- a/drivers/spi/spi-gpio.c
++++ b/drivers/spi/spi-gpio.c
+@@ -244,9 +244,19 @@ static int spi_gpio_set_direction(struct spi_device *spi, bool output)
+ if (output)
+ return gpiod_direction_output(spi_gpio->mosi, 1);
+
+- ret = gpiod_direction_input(spi_gpio->mosi);
+- if (ret)
+- return ret;
++ /*
++ * Only change MOSI to an input if using 3WIRE mode.
++ * Otherwise, MOSI could be left floating if there is
++ * no pull resistor connected to the I/O pin, or could
++ * be left logic high if there is a pull-up. Transmitting
++ * logic high when only clocking MISO data in can put some
++ * SPI devices in to a bad state.
++ */
++ if (spi->mode & SPI_3WIRE) {
++ ret = gpiod_direction_input(spi_gpio->mosi);
++ if (ret)
++ return ret;
++ }
+ /*
+ * Send a turnaround high impedance cycle when switching
+ * from output to input. Theoretically there should be
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index be503a0e6ef73..2478ae471f4ee 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -373,12 +373,23 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+- retval = put_user(spi->mode & SPI_MODE_MASK,
+- (__u8 __user *)arg);
+- break;
+ case SPI_IOC_RD_MODE32:
+- retval = put_user(spi->mode & SPI_MODE_MASK,
+- (__u32 __user *)arg);
++ tmp = spi->mode;
++
++ {
++ struct spi_controller *ctlr = spi->controller;
++
++ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
++ ctlr->cs_gpiods[spi->chip_select])
++ tmp &= ~SPI_CS_HIGH;
++ }
++
++ if (cmd == SPI_IOC_RD_MODE)
++ retval = put_user(tmp & SPI_MODE_MASK,
++ (__u8 __user *)arg);
++ else
++ retval = put_user(tmp & SPI_MODE_MASK,
++ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
+index 83c30e2d82f5f..a78f914082fe5 100644
+--- a/drivers/staging/rtl8192e/rtllib_rx.c
++++ b/drivers/staging/rtl8192e/rtllib_rx.c
+@@ -1490,9 +1490,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
+ hdrlen += 4;
+ }
+
+- rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+ ieee->stats.rx_packets++;
+ ieee->stats.rx_bytes += skb->len;
++ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
+
+ return 1;
+ }
+diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+index 00e34c392a388..d51f734aca26b 100644
+--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
++++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+@@ -943,9 +943,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+ #endif
+
+ if (ieee->iw_mode == IW_MODE_MONITOR) {
++ unsigned int len = skb->len;
++
+ ieee80211_monitor_rx(ieee, skb, rx_stats);
+ stats->rx_packets++;
+- stats->rx_bytes += skb->len;
++ stats->rx_bytes += len;
+ return 1;
+ }
+
+diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
+index c787c5da8f2be..22a30da011e1e 100644
+--- a/drivers/staging/wilc1000/wilc_sdio.c
++++ b/drivers/staging/wilc1000/wilc_sdio.c
+@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) },
+ { },
+ };
++MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
+
+ #define WILC_SDIO_BLOCK_SIZE 512
+
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 2d2d04c071401..7dd11b62a196e 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
+
+ static struct xencons_info *vtermno_to_xencons(int vtermno)
+ {
+- struct xencons_info *entry, *n, *ret = NULL;
++ struct xencons_info *entry, *ret = NULL;
++ unsigned long flags;
+
+- if (list_empty(&xenconsoles))
+- return NULL;
++ spin_lock_irqsave(&xencons_lock, flags);
++ if (list_empty(&xenconsoles)) {
++ spin_unlock_irqrestore(&xencons_lock, flags);
++ return NULL;
++ }
+
+- list_for_each_entry_safe(entry, n, &xenconsoles, list) {
++ list_for_each_entry(entry, &xenconsoles, list) {
+ if (entry->vtermno == vtermno) {
+ ret = entry;
+ break;
+ }
+ }
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return ret;
+ }
+@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
+ {
+ int r;
+ uint64_t v = 0;
+- unsigned long gfn;
++ unsigned long gfn, flags;
+ struct xencons_info *info;
+
+ if (!xen_hvm_domain())
+@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
+ goto err;
+ info->vtermno = HVC_COOKIE;
+
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ err:
+@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ static int xen_pv_console_init(void)
+ {
+ struct xencons_info *info;
++ unsigned long flags;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
+ /* already configured */
+ return 0;
+ }
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ xencons_info_pv_init(info, HVC_COOKIE);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ }
+@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
+ static int xen_initial_domain_console_init(void)
+ {
+ struct xencons_info *info;
++ unsigned long flags;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
+ info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+ info->vtermno = HVC_COOKIE;
+
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+ }
+@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
+
+ static int xen_console_remove(struct xencons_info *info)
+ {
++ unsigned long flags;
++
+ xencons_disconnect_backend(info);
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_del(&info->list);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->xbdev != NULL)
+ xencons_free(info);
+ else {
+@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ {
+ int ret, devid;
+ struct xencons_info *info;
++ unsigned long flags;
+
+ devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
+ if (devid == 0)
+@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
+ ret = xencons_connect_backend(dev, info);
+ if (ret < 0)
+ goto error;
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_add_tail(&info->list, &xenconsoles);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+
+ return 0;
+
+@@ -583,10 +593,12 @@ static int __init xen_hvc_init(void)
+
+ info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
+ if (IS_ERR(info->hvc)) {
++ unsigned long flags;
++
+ r = PTR_ERR(info->hvc);
+- spin_lock(&xencons_lock);
++ spin_lock_irqsave(&xencons_lock, flags);
+ list_del(&info->list);
+- spin_unlock(&xencons_lock);
++ spin_unlock_irqrestore(&xencons_lock, flags);
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, NULL);
+ kfree(info);
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 0e487ce091ac9..d91f76b1d353e 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port,
+ */
+ }
+
+-static void altera_uart_rx_chars(struct altera_uart *pp)
++static void altera_uart_rx_chars(struct uart_port *port)
+ {
+- struct uart_port *port = &pp->port;
+ unsigned char ch, flag;
+ unsigned short status;
+
+@@ -248,9 +247,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
+ spin_lock(&port->lock);
+ }
+
+-static void altera_uart_tx_chars(struct altera_uart *pp)
++static void altera_uart_tx_chars(struct uart_port *port)
+ {
+- struct uart_port *port = &pp->port;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (port->x_char) {
+@@ -274,26 +272,25 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+- if (xmit->head == xmit->tail) {
+- pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
+- altera_uart_update_ctrl_reg(pp);
+- }
++ if (uart_circ_empty(xmit))
++ altera_uart_stop_tx(port);
+ }
+
+ static irqreturn_t altera_uart_interrupt(int irq, void *data)
+ {
+ struct uart_port *port = data;
+ struct altera_uart *pp = container_of(port, struct altera_uart, port);
++ unsigned long flags;
+ unsigned int isr;
+
+ isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr;
+
+- spin_lock(&port->lock);
++ spin_lock_irqsave(&port->lock, flags);
+ if (isr & ALTERA_UART_STATUS_RRDY_MSK)
+- altera_uart_rx_chars(pp);
++ altera_uart_rx_chars(port);
+ if (isr & ALTERA_UART_STATUS_TRDY_MSK)
+- altera_uart_tx_chars(pp);
+- spin_unlock(&port->lock);
++ altera_uart_tx_chars(port);
++ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_RETVAL(isr);
+ }
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 52b7d559b44bf..86084090232d5 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1053,6 +1053,9 @@ static void pl011_dma_rx_callback(void *data)
+ */
+ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
+ {
++ if (!uap->using_rx_dma)
++ return;
++
+ /* FIXME. Just disable the DMA enable */
+ uap->dmacr &= ~UART011_RXDMAE;
+ pl011_write(uap->dmacr, uap, REG_DMACR);
+@@ -1768,8 +1771,17 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ static void pl011_unthrottle_rx(struct uart_port *port)
+ {
+ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port);
++ unsigned long flags;
+
+- pl011_enable_interrupts(uap);
++ spin_lock_irqsave(&uap->port.lock, flags);
++
++ uap->im = UART011_RTIM;
++ if (!pl011_dma_rx_running(uap))
++ uap->im |= UART011_RXIM;
++
++ pl011_write(uap->im, uap, REG_IMSC);
++
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+ }
+
+ static int pl011_startup(struct uart_port *port)
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 77f18445bb988..a8b6759140dd7 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -718,6 +718,7 @@ static void pch_request_dma(struct uart_port *port)
+ if (!chan) {
+ dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Tx)\n",
+ __func__);
++ pci_dev_put(dma_dev);
+ return;
+ }
+ priv->chan_tx = chan;
+@@ -734,6 +735,7 @@ static void pch_request_dma(struct uart_port *port)
+ __func__);
+ dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
++ pci_dev_put(dma_dev);
+ return;
+ }
+
+@@ -741,6 +743,8 @@ static void pch_request_dma(struct uart_port *port)
+ priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
+ &priv->rx_buf_dma, GFP_KERNEL);
+ priv->chan_rx = chan;
++
++ pci_dev_put(dma_dev);
+ }
+
+ static void pch_dma_rx_complete(void *arg)
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index c5f43cd39664a..7930f2a81b4c1 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -141,6 +141,7 @@ struct tegra_uart_port {
+ int configured_rate;
+ bool use_rx_pio;
+ bool use_tx_pio;
++ bool rx_dma_active;
+ };
+
+ static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
+@@ -612,8 +613,9 @@ static void tegra_uart_stop_tx(struct uart_port *u)
+ if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
+ return;
+
+- dmaengine_terminate_all(tup->tx_dma_chan);
++ dmaengine_pause(tup->tx_dma_chan);
+ dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
++ dmaengine_terminate_all(tup->tx_dma_chan);
+ count = tup->tx_bytes_requested - state.residue;
+ async_tx_ack(tup->tx_dma_desc);
+ uart_xmit_advance(&tup->uport, count);
+@@ -692,11 +694,22 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
+ TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
+ }
+
++static void do_handle_rx_pio(struct tegra_uart_port *tup)
++{
++ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
++ struct tty_port *port = &tup->uport.state->port;
++
++ tegra_uart_handle_rx_pio(tup, port);
++ if (tty) {
++ tty_flip_buffer_push(port);
++ tty_kref_put(tty);
++ }
++}
++
+ static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
+ unsigned int residue)
+ {
+ struct tty_port *port = &tup->uport.state->port;
+- struct tty_struct *tty = tty_port_tty_get(port);
+ unsigned int count;
+
+ async_tx_ack(tup->rx_dma_desc);
+@@ -705,11 +718,7 @@ static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
+ /* If we are here, DMA is stopped */
+ tegra_uart_copy_rx_to_tty(tup, port, count);
+
+- tegra_uart_handle_rx_pio(tup, port);
+- if (tty) {
+- tty_flip_buffer_push(port);
+- tty_kref_put(tty);
+- }
++ do_handle_rx_pio(tup);
+ }
+
+ static void tegra_uart_rx_dma_complete(void *args)
+@@ -733,6 +742,7 @@ static void tegra_uart_rx_dma_complete(void *args)
+ if (tup->rts_active)
+ set_rts(tup, false);
+
++ tup->rx_dma_active = false;
+ tegra_uart_rx_buffer_push(tup, 0);
+ tegra_uart_start_rx_dma(tup);
+
+@@ -744,18 +754,30 @@ done:
+ spin_unlock_irqrestore(&u->lock, flags);
+ }
+
+-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
++static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
+ {
+ struct dma_tx_state state;
+
++ if (!tup->rx_dma_active) {
++ do_handle_rx_pio(tup);
++ return;
++ }
++
++ dmaengine_pause(tup->rx_dma_chan);
++ dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
++ dmaengine_terminate_all(tup->rx_dma_chan);
++
++ tegra_uart_rx_buffer_push(tup, state.residue);
++ tup->rx_dma_active = false;
++}
++
++static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
++{
+ /* Deactivate flow control to stop sender */
+ if (tup->rts_active)
+ set_rts(tup, false);
+
+- dmaengine_terminate_all(tup->rx_dma_chan);
+- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+- tegra_uart_rx_buffer_push(tup, state.residue);
+- tegra_uart_start_rx_dma(tup);
++ tegra_uart_terminate_rx_dma(tup);
+
+ if (tup->rts_active)
+ set_rts(tup, true);
+@@ -765,6 +787,9 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
+ {
+ unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
+
++ if (tup->rx_dma_active)
++ return 0;
++
+ tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
+ tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+@@ -773,6 +798,7 @@ static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
+ return -EIO;
+ }
+
++ tup->rx_dma_active = true;
+ tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
+ tup->rx_dma_desc->callback_param = tup;
+ dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
+@@ -804,24 +830,13 @@ static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
+ uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
+ }
+
+-static void do_handle_rx_pio(struct tegra_uart_port *tup)
+-{
+- struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+- struct tty_port *port = &tup->uport.state->port;
+-
+- tegra_uart_handle_rx_pio(tup, port);
+- if (tty) {
+- tty_flip_buffer_push(port);
+- tty_kref_put(tty);
+- }
+-}
+-
+ static irqreturn_t tegra_uart_isr(int irq, void *data)
+ {
+ struct tegra_uart_port *tup = data;
+ struct uart_port *u = &tup->uport;
+ unsigned long iir;
+ unsigned long ier;
++ bool is_rx_start = false;
+ bool is_rx_int = false;
+ unsigned long flags;
+
+@@ -834,10 +849,12 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
+ if (tup->rx_in_progress) {
+ ier = tup->ier_shadow;
+ ier |= (UART_IER_RLSI | UART_IER_RTOIE |
+- TEGRA_UART_IER_EORD);
++ TEGRA_UART_IER_EORD | UART_IER_RDI);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ }
++ } else if (is_rx_start) {
++ tegra_uart_start_rx_dma(tup);
+ }
+ spin_unlock_irqrestore(&u->lock, flags);
+ return IRQ_HANDLED;
+@@ -856,17 +873,23 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
+
+ case 4: /* End of data */
+ case 6: /* Rx timeout */
+- case 2: /* Receive */
+- if (!tup->use_rx_pio && !is_rx_int) {
+- is_rx_int = true;
++ if (!tup->use_rx_pio) {
++ is_rx_int = tup->rx_in_progress;
+ /* Disable Rx interrupts */
+ ier = tup->ier_shadow;
+- ier |= UART_IER_RDI;
+- tegra_uart_write(tup, ier, UART_IER);
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI |
+ UART_IER_RTOIE | TEGRA_UART_IER_EORD);
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
++ break;
++ }
++ /* Fall through */
++ case 2: /* Receive */
++ if (!tup->use_rx_pio) {
++ is_rx_start = tup->rx_in_progress;
++ tup->ier_shadow &= ~UART_IER_RDI;
++ tegra_uart_write(tup, tup->ier_shadow,
++ UART_IER);
+ } else {
+ do_handle_rx_pio(tup);
+ }
+@@ -888,7 +911,6 @@ static void tegra_uart_stop_rx(struct uart_port *u)
+ {
+ struct tegra_uart_port *tup = to_tegra_uport(u);
+ struct tty_port *port = &tup->uport.state->port;
+- struct dma_tx_state state;
+ unsigned long ier;
+
+ if (tup->rts_active)
+@@ -905,13 +927,11 @@ static void tegra_uart_stop_rx(struct uart_port *u)
+ tup->ier_shadow = ier;
+ tegra_uart_write(tup, ier, UART_IER);
+ tup->rx_in_progress = 0;
+- if (tup->rx_dma_chan && !tup->use_rx_pio) {
+- dmaengine_terminate_all(tup->rx_dma_chan);
+- dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
+- tegra_uart_rx_buffer_push(tup, state.residue);
+- } else {
++
++ if (!tup->use_rx_pio)
++ tegra_uart_terminate_rx_dma(tup);
++ else
+ tegra_uart_handle_rx_pio(tup, port);
+- }
+ }
+
+ static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
+@@ -1056,12 +1076,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
+ tup->fcr_shadow |= UART_FCR_DMA_SELECT;
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+-
+- ret = tegra_uart_start_rx_dma(tup);
+- if (ret < 0) {
+- dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
+- return ret;
+- }
+ } else {
+ tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
+ }
+@@ -1071,10 +1085,6 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+- * If using DMA mode, enable EORD instead of receive interrupt which
+- * will interrupt after the UART is done with the receive instead of
+- * the interrupt when the FIFO "threshold" is reached.
+- *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment (4 bytes) is not met. EORD will be
+@@ -1085,11 +1095,14 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ */
++ tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
++
++ /*
++ * If using DMA mode, enable EORD interrupt to notify about RX
++ * completion.
++ */
+ if (!tup->use_rx_pio)
+- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE |
+- TEGRA_UART_IER_EORD;
+- else
+- tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | UART_IER_RDI;
++ tup->ier_shadow |= TEGRA_UART_IER_EORD;
+
+ tegra_uart_write(tup, tup->ier_shadow, UART_IER);
+ return 0;
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 72131b5e132eb..beca02c304988 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -1140,7 +1140,13 @@ static int __init sunsab_init(void)
+ }
+ }
+
+- return platform_driver_register(&sab_driver);
++ err = platform_driver_register(&sab_driver);
++ if (err) {
++ kfree(sunsab_ports);
++ sunsab_ports = NULL;
++ }
++
++ return err;
+ }
+
+ static void __exit sunsab_exit(void)
+diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
+index 44858f70f5f52..39dbd8c562499 100644
+--- a/drivers/uio/uio_dmem_genirq.c
++++ b/drivers/uio/uio_dmem_genirq.c
+@@ -110,8 +110,10 @@ static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
+ * remember the state so we can allow user space to enable it later.
+ */
+
++ spin_lock(&priv->lock);
+ if (!test_and_set_bit(0, &priv->flags))
+ disable_irq_nosync(irq);
++ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+ }
+@@ -125,20 +127,19 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
+ * in the interrupt controller, but keep track of the
+ * state to prevent per-irq depth damage.
+ *
+- * Serialize this operation to support multiple tasks.
++ * Serialize this operation to support multiple tasks and concurrency
++ * with irq handler on SMP systems.
+ */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (irq_on) {
+ if (test_and_clear_bit(0, &priv->flags))
+ enable_irq(dev_info->irq);
+- spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+- if (!test_and_set_bit(0, &priv->flags)) {
+- spin_unlock_irqrestore(&priv->lock, flags);
+- disable_irq(dev_info->irq);
+- }
++ if (!test_and_set_bit(0, &priv->flags))
++ disable_irq_nosync(dev_info->irq);
+ }
++ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+ }
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index a9c49b2ce511b..db1aaed07e056 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -931,8 +931,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
+
+ if (!dwc->ulpi_ready) {
+ ret = dwc3_core_ulpi_init(dwc);
+- if (ret)
++ if (ret) {
++ if (ret == -ETIMEDOUT) {
++ dwc3_core_soft_reset(dwc);
++ ret = -EPROBE_DEFER;
++ }
+ goto err0;
++ }
+ dwc->ulpi_ready = true;
+ }
+
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index e4d71410a4b1c..c9d61d4dc9f52 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -45,12 +45,25 @@ struct f_hidg {
+ unsigned short report_desc_length;
+ char *report_desc;
+ unsigned short report_length;
++ /*
++ * use_out_ep - if true, the OUT Endpoint (interrupt out method)
++ * will be used to receive reports from the host
++ * using functions with the "intout" suffix.
++ * Otherwise, the OUT Endpoint will not be configured
++ * and the SETUP/SET_REPORT method ("ssreport" suffix)
++ * will be used to receive reports.
++ */
++ bool use_out_ep;
+
+ /* recv report */
+- struct list_head completed_out_req;
+ spinlock_t read_spinlock;
+ wait_queue_head_t read_queue;
++ /* recv report - interrupt out only (use_out_ep == 1) */
++ struct list_head completed_out_req;
+ unsigned int qlen;
++ /* recv report - setup set_report only (use_out_ep == 0) */
++ char *set_report_buf;
++ unsigned int set_report_length;
+
+ /* send report */
+ spinlock_t write_spinlock;
+@@ -58,7 +71,7 @@ struct f_hidg {
+ wait_queue_head_t write_queue;
+ struct usb_request *req;
+
+- int minor;
++ struct device dev;
+ struct cdev cdev;
+ struct usb_function func;
+
+@@ -71,6 +84,14 @@ static inline struct f_hidg *func_to_hidg(struct usb_function *f)
+ return container_of(f, struct f_hidg, func);
+ }
+
++static void hidg_release(struct device *dev)
++{
++ struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
++
++ kfree(hidg->set_report_buf);
++ kfree(hidg);
++}
++
+ /*-------------------------------------------------------------------------*/
+ /* Static descriptors */
+
+@@ -79,7 +100,7 @@ static struct usb_interface_descriptor hidg_interface_desc = {
+ .bDescriptorType = USB_DT_INTERFACE,
+ /* .bInterfaceNumber = DYNAMIC */
+ .bAlternateSetting = 0,
+- .bNumEndpoints = 2,
++ /* .bNumEndpoints = DYNAMIC (depends on use_out_ep) */
+ .bInterfaceClass = USB_CLASS_HID,
+ /* .bInterfaceSubClass = DYNAMIC */
+ /* .bInterfaceProtocol = DYNAMIC */
+@@ -140,7 +161,7 @@ static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+ /* .wBytesPerInterval = DYNAMIC */
+ };
+
+-static struct usb_descriptor_header *hidg_ss_descriptors[] = {
++static struct usb_descriptor_header *hidg_ss_descriptors_intout[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+@@ -150,6 +171,14 @@ static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+ NULL,
+ };
+
++static struct usb_descriptor_header *hidg_ss_descriptors_ssreport[] = {
++ (struct usb_descriptor_header *)&hidg_interface_desc,
++ (struct usb_descriptor_header *)&hidg_desc,
++ (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
++ (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
++ NULL,
++};
++
+ /* High-Speed Support */
+
+ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
+@@ -176,7 +205,7 @@ static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
+ */
+ };
+
+-static struct usb_descriptor_header *hidg_hs_descriptors[] = {
++static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
+@@ -184,6 +213,13 @@ static struct usb_descriptor_header *hidg_hs_descriptors[] = {
+ NULL,
+ };
+
++static struct usb_descriptor_header *hidg_hs_descriptors_ssreport[] = {
++ (struct usb_descriptor_header *)&hidg_interface_desc,
++ (struct usb_descriptor_header *)&hidg_desc,
++ (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
++ NULL,
++};
++
+ /* Full-Speed Support */
+
+ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
+@@ -210,7 +246,7 @@ static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
+ */
+ };
+
+-static struct usb_descriptor_header *hidg_fs_descriptors[] = {
++static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = {
+ (struct usb_descriptor_header *)&hidg_interface_desc,
+ (struct usb_descriptor_header *)&hidg_desc,
+ (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
+@@ -218,6 +254,13 @@ static struct usb_descriptor_header *hidg_fs_descriptors[] = {
+ NULL,
+ };
+
++static struct usb_descriptor_header *hidg_fs_descriptors_ssreport[] = {
++ (struct usb_descriptor_header *)&hidg_interface_desc,
++ (struct usb_descriptor_header *)&hidg_desc,
++ (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
++ NULL,
++};
++
+ /*-------------------------------------------------------------------------*/
+ /* Strings */
+
+@@ -241,8 +284,8 @@ static struct usb_gadget_strings *ct_func_strings[] = {
+ /*-------------------------------------------------------------------------*/
+ /* Char Device */
+
+-static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+- size_t count, loff_t *ptr)
++static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
++ size_t count, loff_t *ptr)
+ {
+ struct f_hidg *hidg = file->private_data;
+ struct f_hidg_req_list *list;
+@@ -258,15 +301,15 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+
+-#define READ_COND (!list_empty(&hidg->completed_out_req))
++#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req))
+
+ /* wait for at least one buffer to complete */
+- while (!READ_COND) {
++ while (!READ_COND_INTOUT) {
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+- if (wait_event_interruptible(hidg->read_queue, READ_COND))
++ if (wait_event_interruptible(hidg->read_queue, READ_COND_INTOUT))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
+@@ -316,6 +359,60 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ return count;
+ }
+
++#define READ_COND_SSREPORT (hidg->set_report_buf != NULL)
++
++static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
++ size_t count, loff_t *ptr)
++{
++ struct f_hidg *hidg = file->private_data;
++ char *tmp_buf = NULL;
++ unsigned long flags;
++
++ if (!count)
++ return 0;
++
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++
++ while (!READ_COND_SSREPORT) {
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ if (wait_event_interruptible(hidg->read_queue, READ_COND_SSREPORT))
++ return -ERESTARTSYS;
++
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++ }
++
++ count = min_t(unsigned int, count, hidg->set_report_length);
++ tmp_buf = hidg->set_report_buf;
++ hidg->set_report_buf = NULL;
++
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++
++ if (tmp_buf != NULL) {
++ count -= copy_to_user(buffer, tmp_buf, count);
++ kfree(tmp_buf);
++ } else {
++ count = -ENOMEM;
++ }
++
++ wake_up(&hidg->read_queue);
++
++ return count;
++}
++
++static ssize_t f_hidg_read(struct file *file, char __user *buffer,
++ size_t count, loff_t *ptr)
++{
++ struct f_hidg *hidg = file->private_data;
++
++ if (hidg->use_out_ep)
++ return f_hidg_intout_read(file, buffer, count, ptr);
++ else
++ return f_hidg_ssreport_read(file, buffer, count, ptr);
++}
++
+ static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+ struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+@@ -439,14 +536,20 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
+ if (WRITE_COND)
+ ret |= EPOLLOUT | EPOLLWRNORM;
+
+- if (READ_COND)
+- ret |= EPOLLIN | EPOLLRDNORM;
++ if (hidg->use_out_ep) {
++ if (READ_COND_INTOUT)
++ ret |= EPOLLIN | EPOLLRDNORM;
++ } else {
++ if (READ_COND_SSREPORT)
++ ret |= EPOLLIN | EPOLLRDNORM;
++ }
+
+ return ret;
+ }
+
+ #undef WRITE_COND
+-#undef READ_COND
++#undef READ_COND_SSREPORT
++#undef READ_COND_INTOUT
+
+ static int f_hidg_release(struct inode *inode, struct file *fd)
+ {
+@@ -473,7 +576,7 @@ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
+ return alloc_ep_req(ep, length);
+ }
+
+-static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
++static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+ struct f_hidg *hidg = (struct f_hidg *) req->context;
+ struct usb_composite_dev *cdev = hidg->func.config->cdev;
+@@ -508,6 +611,37 @@ free_req:
+ }
+ }
+
++static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct f_hidg *hidg = (struct f_hidg *)req->context;
++ struct usb_composite_dev *cdev = hidg->func.config->cdev;
++ char *new_buf = NULL;
++ unsigned long flags;
++
++ if (req->status != 0 || req->buf == NULL || req->actual == 0) {
++ ERROR(cdev,
++ "%s FAILED: status=%d, buf=%p, actual=%d\n",
++ __func__, req->status, req->buf, req->actual);
++ return;
++ }
++
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++
++ new_buf = krealloc(hidg->set_report_buf, req->actual, GFP_ATOMIC);
++ if (new_buf == NULL) {
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++ return;
++ }
++ hidg->set_report_buf = new_buf;
++
++ hidg->set_report_length = req->actual;
++ memcpy(hidg->set_report_buf, req->buf, req->actual);
++
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
++
++ wake_up(&hidg->read_queue);
++}
++
+ static int hidg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+ {
+@@ -555,7 +689,11 @@ static int hidg_setup(struct usb_function *f,
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+ | HID_REQ_SET_REPORT):
+ VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
+- goto stall;
++ if (hidg->use_out_ep)
++ goto stall;
++ req->complete = hidg_ssreport_complete;
++ req->context = hidg;
++ goto respond;
+ break;
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+@@ -643,15 +781,18 @@ static void hidg_disable(struct usb_function *f)
+ unsigned long flags;
+
+ usb_ep_disable(hidg->in_ep);
+- usb_ep_disable(hidg->out_ep);
+
+- spin_lock_irqsave(&hidg->read_spinlock, flags);
+- list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
+- free_ep_req(hidg->out_ep, list->req);
+- list_del(&list->list);
+- kfree(list);
++ if (hidg->out_ep) {
++ usb_ep_disable(hidg->out_ep);
++
++ spin_lock_irqsave(&hidg->read_spinlock, flags);
++ list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
++ free_ep_req(hidg->out_ep, list->req);
++ list_del(&list->list);
++ kfree(list);
++ }
++ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+ }
+- spin_unlock_irqrestore(&hidg->read_spinlock, flags);
+
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ if (!hidg->write_pending) {
+@@ -697,8 +838,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ }
+ }
+
+-
+- if (hidg->out_ep != NULL) {
++ if (hidg->use_out_ep && hidg->out_ep != NULL) {
+ /* restart endpoint */
+ usb_ep_disable(hidg->out_ep);
+
+@@ -723,7 +863,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ hidg_alloc_ep_req(hidg->out_ep,
+ hidg->report_length);
+ if (req) {
+- req->complete = hidg_set_report_complete;
++ req->complete = hidg_intout_complete;
+ req->context = hidg;
+ status = usb_ep_queue(hidg->out_ep, req,
+ GFP_ATOMIC);
+@@ -749,7 +889,8 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ }
+ return 0;
+ disable_out_ep:
+- usb_ep_disable(hidg->out_ep);
++ if (hidg->out_ep)
++ usb_ep_disable(hidg->out_ep);
+ free_req_in:
+ if (req_in)
+ free_ep_req(hidg->in_ep, req_in);
+@@ -777,9 +918,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ struct usb_ep *ep;
+ struct f_hidg *hidg = func_to_hidg(f);
+ struct usb_string *us;
+- struct device *device;
+ int status;
+- dev_t dev;
+
+ /* maybe allocate device-global string IDs, and patch descriptors */
+ us = usb_gstrings_attach(c->cdev, ct_func_strings,
+@@ -801,14 +940,21 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ goto fail;
+ hidg->in_ep = ep;
+
+- ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
+- if (!ep)
+- goto fail;
+- hidg->out_ep = ep;
++ hidg->out_ep = NULL;
++ if (hidg->use_out_ep) {
++ ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
++ if (!ep)
++ goto fail;
++ hidg->out_ep = ep;
++ }
++
++ /* used only if use_out_ep == 1 */
++ hidg->set_report_buf = NULL;
+
+ /* set descriptor dynamic values */
+ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
+ hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
++ hidg_interface_desc.bNumEndpoints = hidg->use_out_ep ? 2 : 1;
+ hidg->protocol = HID_REPORT_PROTOCOL;
+ hidg->idle = 1;
+ hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+@@ -839,9 +985,19 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+ hidg_ss_out_ep_desc.bEndpointAddress =
+ hidg_fs_out_ep_desc.bEndpointAddress;
+
+- status = usb_assign_descriptors(f, hidg_fs_descriptors,
+- hidg_hs_descriptors, hidg_ss_descriptors,
+- hidg_ss_descriptors);
++ if (hidg->use_out_ep)
++ status = usb_assign_descriptors(f,
++ hidg_fs_descriptors_intout,
++ hidg_hs_descriptors_intout,
++ hidg_ss_descriptors_intout,
++ hidg_ss_descriptors_intout);
++ else
++ status = usb_assign_descriptors(f,
++ hidg_fs_descriptors_ssreport,
++ hidg_hs_descriptors_ssreport,
++ hidg_ss_descriptors_ssreport,
++ hidg_ss_descriptors_ssreport);
++
+ if (status)
+ goto fail;
+
+@@ -855,21 +1011,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
+
+ /* create char device */
+ cdev_init(&hidg->cdev, &f_hidg_fops);
+- dev = MKDEV(major, hidg->minor);
+- status = cdev_add(&hidg->cdev, dev, 1);
++ status = cdev_device_add(&hidg->cdev, &hidg->dev);
+ if (status)
+ goto fail_free_descs;
+
+- device = device_create(hidg_class, NULL, dev, NULL,
+- "%s%d", "hidg", hidg->minor);
+- if (IS_ERR(device)) {
+- status = PTR_ERR(device);
+- goto del;
+- }
+-
+ return 0;
+-del:
+- cdev_del(&hidg->cdev);
+ fail_free_descs:
+ usb_free_all_descriptors(f);
+ fail:
+@@ -956,6 +1102,7 @@ CONFIGFS_ATTR(f_hid_opts_, name)
+
+ F_HID_OPT(subclass, 8, 255);
+ F_HID_OPT(protocol, 8, 255);
++F_HID_OPT(no_out_endpoint, 8, 1);
+ F_HID_OPT(report_length, 16, 65535);
+
+ static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
+@@ -1015,6 +1162,7 @@ CONFIGFS_ATTR_RO(f_hid_opts_, dev);
+ static struct configfs_attribute *hid_attrs[] = {
+ &f_hid_opts_attr_subclass,
+ &f_hid_opts_attr_protocol,
++ &f_hid_opts_attr_no_out_endpoint,
+ &f_hid_opts_attr_report_length,
+ &f_hid_opts_attr_report_desc,
+ &f_hid_opts_attr_dev,
+@@ -1098,8 +1246,7 @@ static void hidg_free(struct usb_function *f)
+
+ hidg = func_to_hidg(f);
+ opts = container_of(f->fi, struct f_hid_opts, func_inst);
+- kfree(hidg->report_desc);
+- kfree(hidg);
++ put_device(&hidg->dev);
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+@@ -1109,8 +1256,7 @@ static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct f_hidg *hidg = func_to_hidg(f);
+
+- device_destroy(hidg_class, MKDEV(major, hidg->minor));
+- cdev_del(&hidg->cdev);
++ cdev_device_del(&hidg->cdev, &hidg->dev);
+
+ usb_free_all_descriptors(f);
+ }
+@@ -1119,6 +1265,7 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ {
+ struct f_hidg *hidg;
+ struct f_hid_opts *opts;
++ int ret;
+
+ /* allocate and initialize one new instance */
+ hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
+@@ -1130,21 +1277,33 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
+ mutex_lock(&opts->lock);
+ ++opts->refcnt;
+
+- hidg->minor = opts->minor;
++ device_initialize(&hidg->dev);
++ hidg->dev.release = hidg_release;
++ hidg->dev.class = hidg_class;
++ hidg->dev.devt = MKDEV(major, opts->minor);
++ ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
++ if (ret) {
++ --opts->refcnt;
++ mutex_unlock(&opts->lock);
++ return ERR_PTR(ret);
++ }
++
+ hidg->bInterfaceSubClass = opts->subclass;
+ hidg->bInterfaceProtocol = opts->protocol;
+ hidg->report_length = opts->report_length;
+ hidg->report_desc_length = opts->report_desc_length;
+ if (opts->report_desc) {
+- hidg->report_desc = kmemdup(opts->report_desc,
+- opts->report_desc_length,
+- GFP_KERNEL);
++ hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc,
++ opts->report_desc_length,
++ GFP_KERNEL);
+ if (!hidg->report_desc) {
+- kfree(hidg);
++ put_device(&hidg->dev);
++ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
++ hidg->use_out_ep = !opts->no_out_endpoint;
+
+ mutex_unlock(&opts->lock);
+
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index c03b67aab1a85..094a88ff9a671 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -213,8 +213,9 @@ uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+
+ memset(&v4l2_event, 0, sizeof(v4l2_event));
+ v4l2_event.type = UVC_EVENT_DATA;
+- uvc_event->data.length = req->actual;
+- memcpy(&uvc_event->data.data, req->buf, req->actual);
++ uvc_event->data.length = min_t(unsigned int, req->actual,
++ sizeof(uvc_event->data.data));
++ memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length);
+ v4l2_event_queue(&uvc->vdev, &v4l2_event);
+ }
+ }
+diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
+index 1594bfa312ebf..90d8b1c0f25f9 100644
+--- a/drivers/usb/gadget/function/u_hid.h
++++ b/drivers/usb/gadget/function/u_hid.h
+@@ -20,6 +20,7 @@ struct f_hid_opts {
+ int minor;
+ unsigned char subclass;
+ unsigned char protocol;
++ unsigned char no_out_endpoint;
+ unsigned short report_length;
+ unsigned short report_desc_length;
+ unsigned char *report_desc;
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index c313d07ec16fd..30389e966e42c 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -629,10 +629,10 @@ static void fotg210_request_error(struct fotg210_udc *fotg210)
+ static void fotg210_set_address(struct fotg210_udc *fotg210,
+ struct usb_ctrlrequest *ctrl)
+ {
+- if (ctrl->wValue >= 0x0100) {
++ if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
+ fotg210_request_error(fotg210);
+ } else {
+- fotg210_set_dev_addr(fotg210, ctrl->wValue);
++ fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
+ fotg210_set_cxdone(fotg210);
+ }
+ }
+@@ -713,17 +713,17 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+- fotg210->ep0_data = 1 << USB_DEVICE_SELF_POWERED;
++ fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
+ break;
+ case USB_RECIP_INTERFACE:
+- fotg210->ep0_data = 0;
++ fotg210->ep0_data = cpu_to_le16(0);
+ break;
+ case USB_RECIP_ENDPOINT:
+ epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum)
+ fotg210->ep0_data =
+- fotg210_is_epnstall(fotg210->ep[epnum])
+- << USB_ENDPOINT_HALT;
++ cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
++ << USB_ENDPOINT_HALT);
+ else
+ fotg210_request_error(fotg210);
+ break;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 8e83995fc3bd2..b8fc818c154ae 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1629,8 +1629,6 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+ {
+ struct musb *musb = gadget_to_musb(gadget);
+
+- if (!musb->xceiv->set_power)
+- return -EOPNOTSUPP;
+ return usb_phy_set_power(musb->xceiv, mA);
+ }
+
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index 97e3d75b19a33..873d89823f5b3 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -108,10 +108,13 @@ usb_role_switch_is_parent(struct fwnode_handle *fwnode)
+ struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ struct device *dev;
+
+- if (!parent || !fwnode_property_present(parent, "usb-role-switch"))
++ if (!fwnode_property_present(parent, "usb-role-switch")) {
++ fwnode_handle_put(parent);
+ return NULL;
++ }
+
+ dev = class_find_device_by_fwnode(role_class, parent);
++ fwnode_handle_put(parent);
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index bc4fd79a13dbe..9992c5aa5ff47 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -196,6 +196,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
+ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
++ { USB_DEVICE(0x17A8, 0x0011) }, /* Kamstrup 444 MHz RF sniffer */
++ { USB_DEVICE(0x17A8, 0x0013) }, /* Kamstrup 870 MHz RF sniffer */
+ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
+index 43fa1f0716b7d..d257d38e37329 100644
+--- a/drivers/usb/serial/f81232.c
++++ b/drivers/usb/serial/f81232.c
+@@ -78,9 +78,6 @@ static u8 const clock_table[] = { F81232_CLK_1_846_MHZ, F81232_CLK_14_77_MHZ,
+
+ static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
+ {
+- if (!baudrate)
+- return 0;
+-
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+
+@@ -423,9 +420,14 @@ static void f81232_set_baudrate(struct tty_struct *tty,
+ speed_t baud_list[] = { baudrate, old_baudrate, F81232_DEF_BAUDRATE };
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+- idx = f81232_find_clk(baud_list[i]);
++ baudrate = baud_list[i];
++ if (baudrate == 0) {
++ tty_encode_baud_rate(tty, 0, 0);
++ return;
++ }
++
++ idx = f81232_find_clk(baudrate);
+ if (idx >= 0) {
+- baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
+index 2b39bda035c71..1b2d2c2e8595c 100644
+--- a/drivers/usb/serial/f81534.c
++++ b/drivers/usb/serial/f81534.c
+@@ -538,9 +538,6 @@ static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+
+ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+ {
+- if (!baudrate)
+- return 0;
+-
+ /* Round to nearest divisor */
+ return DIV_ROUND_CLOSEST(clockrate, baudrate);
+ }
+@@ -570,9 +567,14 @@ static int f81534_set_port_config(struct usb_serial_port *port,
+ u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE};
+
+ for (i = 0; i < ARRAY_SIZE(baud_list); ++i) {
+- idx = f81534_find_clk(baud_list[i]);
++ baudrate = baud_list[i];
++ if (baudrate == 0) {
++ tty_encode_baud_rate(tty, 0, 0);
++ return 0;
++ }
++
++ idx = f81534_find_clk(baudrate);
+ if (idx >= 0) {
+- baudrate = baud_list[i];
+ tty_encode_baud_rate(tty, baudrate, baudrate);
+ break;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 263548af24838..08a368a0f7457 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -255,6 +255,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06 0x0306
+ #define QUECTEL_PRODUCT_EM05G 0x030a
+ #define QUECTEL_PRODUCT_EM060K 0x030b
++#define QUECTEL_PRODUCT_EM05G_SG 0x0311
+ #define QUECTEL_PRODUCT_EM12 0x0512
+ #define QUECTEL_PRODUCT_RM500Q 0x0800
+ #define QUECTEL_PRODUCT_RM520N 0x0801
+@@ -1160,6 +1161,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
+ .driver_info = RSVD(6) | ZLP },
++ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
++ .driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index ddab2cd3d2e75..de62421d96709 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -438,6 +438,8 @@ static int alauda_init_media(struct us_data *us)
+ + MEDIA_INFO(us).blockshift + MEDIA_INFO(us).pageshift);
+ MEDIA_INFO(us).pba_to_lba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
+ MEDIA_INFO(us).lba_to_pba = kcalloc(num_zones, sizeof(u16*), GFP_NOIO);
++ if (MEDIA_INFO(us).pba_to_lba == NULL || MEDIA_INFO(us).lba_to_pba == NULL)
++ return USB_STOR_TRANSPORT_ERROR;
+
+ if (alauda_reset_media(us) != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
+index c950171556d8c..0369ad92a1c8e 100644
+--- a/drivers/usb/typec/bus.c
++++ b/drivers/usb/typec/bus.c
+@@ -126,7 +126,7 @@ int typec_altmode_exit(struct typec_altmode *adev)
+ if (!adev || !adev->active)
+ return 0;
+
+- if (!pdev->ops || !pdev->ops->enter)
++ if (!pdev->ops || !pdev->ops->exit)
+ return -EOPNOTSUPP;
+
+ /* Moving to USB Safe State */
+diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
+index 6caed68ce1be7..84b23ae48aeec 100644
+--- a/drivers/usb/typec/tcpm/tcpci.c
++++ b/drivers/usb/typec/tcpm/tcpci.c
+@@ -551,8 +551,10 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
+ return ERR_PTR(err);
+
+ tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
+- if (IS_ERR(tcpci->port))
++ if (IS_ERR(tcpci->port)) {
++ fwnode_handle_put(tcpci->tcpc.fwnode);
+ return ERR_CAST(tcpci->port);
++ }
+
+ return tcpci;
+ }
+@@ -561,6 +563,7 @@ EXPORT_SYMBOL_GPL(tcpci_register_port);
+ void tcpci_unregister_port(struct tcpci *tcpci)
+ {
+ tcpm_unregister_port(tcpci->port);
++ fwnode_handle_put(tcpci->tcpc.fwnode);
+ }
+ EXPORT_SYMBOL_GPL(tcpci_unregister_port);
+
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index 6f727034679f1..46a72fe397193 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -72,12 +72,11 @@ static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
+ const char **extra_dbg)
+ {
+ #ifdef CONFIG_ACPI
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = vdev->device;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_status acpi_ret;
+
+- acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
++ acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+ if (ACPI_FAILURE(acpi_ret)) {
+ if (extra_dbg)
+ *extra_dbg = acpi_format_exception(acpi_ret);
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 97be299f0a8dc..fdfa399700fe9 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2050,7 +2050,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
+ struct iovec *_iov;
+- u64 s = 0;
++ u64 s = 0, last = addr + len - 1;
+ int ret = 0;
+
+ while ((u64)len > s) {
+@@ -2061,7 +2061,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ }
+
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+- addr, addr + len - 1);
++ addr, last);
+ if (node == NULL || node->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index a7e5f12687b70..124ed0e8454e9 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2243,7 +2243,6 @@ config FB_SSD1307
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_DEFERRED_IO
+- select PWM
+ select FB_BACKLIGHT
+ help
+ This driver implements support for the Solomon SSD1307
+diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
+index 8ae010f07d7da..0ec4be2f2e8c5 100644
+--- a/drivers/video/fbdev/pm2fb.c
++++ b/drivers/video/fbdev/pm2fb.c
+@@ -1529,8 +1529,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ }
+
+ info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev);
+- if (!info)
+- return -ENOMEM;
++ if (!info) {
++ err = -ENOMEM;
++ goto err_exit_disable;
++ }
+ default_par = info->par;
+
+ switch (pdev->device) {
+@@ -1711,6 +1713,8 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ release_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len);
+ err_exit_neither:
+ framebuffer_release(info);
++ err_exit_disable:
++ pci_disable_device(pdev);
+ return retval;
+ }
+
+@@ -1737,6 +1741,7 @@ static void pm2fb_remove(struct pci_dev *pdev)
+ fb_dealloc_cmap(&info->cmap);
+ kfree(info->pixmap.addr);
+ framebuffer_release(info);
++ pci_disable_device(pdev);
+ }
+
+ static const struct pci_device_id pm2fb_id_table[] = {
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 439565cae7abb..7d3af1d19ad3f 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1756,6 +1756,7 @@ static int uvesafb_probe(struct platform_device *dev)
+ out_unmap:
+ iounmap(info->screen_base);
+ out_mem:
++ arch_phys_wc_del(par->mtrr_handle);
+ release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ out_reg:
+ release_region(0x3c0, 32);
+diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
+index 498038a964ee3..ea6671723606b 100644
+--- a/drivers/video/fbdev/vermilion/vermilion.c
++++ b/drivers/video/fbdev/vermilion/vermilion.c
+@@ -277,8 +277,10 @@ static int vmlfb_get_gpu(struct vml_par *par)
+
+ mutex_unlock(&vml_mutex);
+
+- if (pci_enable_device(par->gpu) < 0)
++ if (pci_enable_device(par->gpu) < 0) {
++ pci_dev_put(par->gpu);
+ return -ENODEV;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
+index ffa2ca2d3f5e5..ce366b80bda46 100644
+--- a/drivers/video/fbdev/via/via-core.c
++++ b/drivers/video/fbdev/via/via-core.c
+@@ -732,7 +732,14 @@ static int __init via_core_init(void)
+ return ret;
+ viafb_i2c_init();
+ viafb_gpio_init();
+- return pci_register_driver(&via_driver);
++ ret = pci_register_driver(&via_driver);
++ if (ret) {
++ viafb_gpio_exit();
++ viafb_i2c_exit();
++ return ret;
++ }
++
++ return 0;
+ }
+
+ static void __exit via_core_exit(void)
+diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
+index 6a1bc284f297c..eae78366eb028 100644
+--- a/drivers/vme/bridges/vme_fake.c
++++ b/drivers/vme/bridges/vme_fake.c
+@@ -1073,6 +1073,8 @@ static int __init fake_init(void)
+
+ /* We need a fake parent device */
+ vme_root = __root_device_register("vme", THIS_MODULE);
++ if (IS_ERR(vme_root))
++ return PTR_ERR(vme_root);
+
+ /* If we want to support more than one bridge at some point, we need to
+ * dynamically allocate this so we get one per device.
+diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
+index 7e079d39bd76f..f2da16bf14396 100644
+--- a/drivers/vme/bridges/vme_tsi148.c
++++ b/drivers/vme/bridges/vme_tsi148.c
+@@ -1771,6 +1771,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
+ return 0;
+
+ err_dma:
++ list_del(&entry->list);
+ err_dest:
+ err_source:
+ err_align:
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 87cfadd70d0db..5447111049890 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -2101,8 +2101,8 @@ void xen_callback_vector(void)
+ void xen_callback_vector(void) {}
+ #endif
+
+-static bool fifo_events = true;
+-module_param(fifo_events, bool, 0);
++bool xen_fifo_events = true;
++module_param_named(fifo_events, xen_fifo_events, bool, 0);
+
+ static int xen_evtchn_cpu_prepare(unsigned int cpu)
+ {
+@@ -2131,10 +2131,12 @@ void __init xen_init_IRQ(void)
+ int ret = -EINVAL;
+ unsigned int evtchn;
+
+- if (fifo_events)
++ if (xen_fifo_events)
+ ret = xen_evtchn_fifo_init();
+- if (ret < 0)
++ if (ret < 0) {
+ xen_evtchn_2l_init();
++ xen_fifo_events = false;
++ }
+
+ xen_cpu_init_eoi(smp_processor_id());
+
+diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
+index d4ff944cd16e1..c4b0de4a542b5 100644
+--- a/drivers/xen/privcmd.c
++++ b/drivers/xen/privcmd.c
+@@ -766,7 +766,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
+ goto out;
+ }
+
+- pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
++ pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
+ if (!pfns) {
+ rc = -ENOMEM;
+ goto out;
+diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
+index 8e8346a81723c..ace587b66904c 100644
+--- a/fs/binfmt_aout.c
++++ b/fs/binfmt_aout.c
+@@ -162,6 +162,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
+ set_personality(PER_LINUX);
+ #endif
+ setup_new_exec(bprm);
++ install_exec_creds(bprm);
+
+ current->mm->end_code = ex.a_text +
+ (current->mm->start_code = N_TXTADDR(ex));
+@@ -174,7 +175,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
+ if (retval < 0)
+ return retval;
+
+- install_exec_creds(bprm);
+
+ if (N_MAGIC(ex) == OMAGIC) {
+ unsigned long text_addr, map_size;
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index d86ebd0dcc3d3..39bcbfab386dd 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -353,6 +353,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ current->personality |= READ_IMPLIES_EXEC;
+
+ setup_new_exec(bprm);
++ install_exec_creds(bprm);
+
+ set_binfmt(&elf_fdpic_format);
+
+@@ -434,9 +435,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ current->mm->start_stack = current->mm->start_brk + stack_size;
+ #endif
+
+- install_exec_creds(bprm);
+- if (create_elf_fdpic_tables(bprm, current->mm,
+- &exec_params, &interp_params) < 0)
++ retval = create_elf_fdpic_tables(bprm, current->mm, &exec_params,
++ &interp_params);
++ if (retval < 0)
+ goto error;
+
+ kdebug("- start_code %lx", current->mm->start_code);
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index c999bc0c0691f..22a7d7547a91b 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -565,6 +565,7 @@ static int load_flat_file(struct linux_binprm *bprm,
+ /* OK, This is the point of no return */
+ set_personality(PER_LINUX_32BIT);
+ setup_new_exec(bprm);
++ install_exec_creds(bprm);
+ }
+
+ /*
+@@ -992,8 +993,6 @@ static int load_flat_binary(struct linux_binprm *bprm)
+ }
+ }
+
+- install_exec_creds(bprm);
+-
+ set_binfmt(&flat_format);
+
+ #ifdef CONFIG_MMU
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 056a68292e152..23b563ff0dd7a 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -44,10 +44,10 @@ static LIST_HEAD(entries);
+ static int enabled = 1;
+
+ enum {Enabled, Magic};
+-#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
+-#define MISC_FMT_OPEN_BINARY (1 << 30)
+-#define MISC_FMT_CREDENTIALS (1 << 29)
+-#define MISC_FMT_OPEN_FILE (1 << 28)
++#define MISC_FMT_PRESERVE_ARGV0 (1UL << 31)
++#define MISC_FMT_OPEN_BINARY (1UL << 30)
++#define MISC_FMT_CREDENTIALS (1UL << 29)
++#define MISC_FMT_OPEN_FILE (1UL << 28)
+
+ typedef struct {
+ struct list_head list;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 3cbca2ebdeb0a..89411e96dcc0c 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -431,6 +431,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ u64 wanted_disk_byte = ref->wanted_disk_byte;
+ u64 count = 0;
+ u64 data_offset;
++ u8 type;
+
+ if (level != 0) {
+ eb = path->nodes[level];
+@@ -485,6 +486,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ continue;
+ }
+ fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
++ type = btrfs_file_extent_type(eb, fi);
++ if (type == BTRFS_FILE_EXTENT_INLINE)
++ goto next;
+ disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+ data_offset = btrfs_file_extent_offset(eb, fi);
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8553bd4361dd1..64b443aa61cae 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3299,13 +3299,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+ di_args->bytes_used = btrfs_device_get_bytes_used(dev);
+ di_args->total_bytes = btrfs_device_get_total_bytes(dev);
+ memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
+- if (dev->name) {
+- strncpy(di_args->path, rcu_str_deref(dev->name),
+- sizeof(di_args->path) - 1);
+- di_args->path[sizeof(di_args->path) - 1] = 0;
+- } else {
++ if (dev->name)
++ strscpy(di_args->path, rcu_str_deref(dev->name), sizeof(di_args->path));
++ else
+ di_args->path[0] = '\0';
+- }
+
+ out:
+ rcu_read_unlock();
+diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
+index a97dc74a4d3de..02f15321cecc9 100644
+--- a/fs/btrfs/rcu-string.h
++++ b/fs/btrfs/rcu-string.h
+@@ -18,7 +18,11 @@ static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
+ (len * sizeof(char)), mask);
+ if (!ret)
+ return ret;
+- strncpy(ret->str, src, len);
++ /* Warn if the source got unexpectedly truncated. */
++ if (WARN_ON(strscpy(ret->str, src, len) < 0)) {
++ kfree(ret);
++ return NULL;
++ }
+ return ret;
+ }
+
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index c5e6eff5a3816..36479b72d2781 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -544,7 +544,7 @@ int cdev_device_add(struct cdev *cdev, struct device *dev)
+ }
+
+ rc = device_add(dev);
+- if (rc)
++ if (rc && dev->devt)
+ cdev_del(cdev);
+
+ return rc;
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 79a18692b84c5..aa7827da7b178 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -609,9 +609,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ seq_printf(s, ",echo_interval=%lu",
+ tcon->ses->server->echo_interval / HZ);
+
+- /* Only display max_credits if it was overridden on mount */
++ /* Only display the following if overridden on mount */
+ if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
+ seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
++ if (tcon->ses->server->tcp_nodelay)
++ seq_puts(s, ",tcpnodelay");
++ if (tcon->ses->server->noautotune)
++ seq_puts(s, ",noautotune");
++ if (tcon->ses->server->noblocksnd)
++ seq_puts(s, ",noblocksend");
+
+ if (tcon->snapshot_time)
+ seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 414936989255a..7c0eb110e2630 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -22,6 +22,8 @@
+ #include <linux/in.h>
+ #include <linux/in6.h>
+ #include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/mm.h>
+ #include <linux/mempool.h>
+ #include <linux/workqueue.h>
+ #include "cifs_fs_sb.h"
+@@ -30,6 +32,7 @@
+ #include <linux/scatterlist.h>
+ #include <uapi/linux/cifs/cifs_mount.h>
+ #include "smb2pdu.h"
++#include "smb2glob.h"
+
+ #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+
+@@ -1955,4 +1958,71 @@ extern struct smb_version_values smb302_values;
+ #define ALT_SMB311_VERSION_STRING "3.11"
+ extern struct smb_version_operations smb311_operations;
+ extern struct smb_version_values smb311_values;
++
++static inline unsigned int cifs_get_num_sgs(const struct smb_rqst *rqst,
++ int num_rqst,
++ const u8 *sig)
++{
++ unsigned int len, skip;
++ unsigned int nents = 0;
++ unsigned long addr;
++ int i, j;
++
++ /* Assumes the first rqst has a transform header as the first iov.
++ * I.e.
++ * rqst[0].rq_iov[0] is transform header
++ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++ */
++ for (i = 0; i < num_rqst; i++) {
++ /*
++ * The first rqst has a transform header where the
++ * first 20 bytes are not part of the encrypted blob.
++ */
++ for (j = 0; j < rqst[i].rq_nvec; j++) {
++ struct kvec *iov = &rqst[i].rq_iov[j];
++
++ skip = (i == 0) && (j == 0) ? 20 : 0;
++ addr = (unsigned long)iov->iov_base + skip;
++ if (unlikely(is_vmalloc_addr((void *)addr))) {
++ len = iov->iov_len - skip;
++ nents += DIV_ROUND_UP(offset_in_page(addr) + len,
++ PAGE_SIZE);
++ } else {
++ nents++;
++ }
++ }
++ nents += rqst[i].rq_npages;
++ }
++ nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE);
++ return nents;
++}
++
++/* We can not use the normal sg_set_buf() as we will sometimes pass a
++ * stack object as buf.
++ */
++static inline struct scatterlist *cifs_sg_set_buf(struct scatterlist *sg,
++ const void *buf,
++ unsigned int buflen)
++{
++ unsigned long addr = (unsigned long)buf;
++ unsigned int off = offset_in_page(addr);
++
++ addr &= PAGE_MASK;
++ if (unlikely(is_vmalloc_addr((void *)addr))) {
++ do {
++ unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off);
++
++ sg_set_page(sg++, vmalloc_to_page((void *)addr), len, off);
++
++ off = 0;
++ addr += PAGE_SIZE;
++ buflen -= len;
++ } while (buflen);
++ } else {
++ sg_set_page(sg++, virt_to_page(addr), buflen, off);
++ }
++ return sg;
++}
++
+ #endif /* _CIFS_GLOB_H */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index f18da99a6b558..56a4740ae93ab 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -583,8 +583,8 @@ int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
+ struct sdesc **sdesc);
+ void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
+
+-extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+- unsigned int *len, unsigned int *offset);
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++ unsigned int *len, unsigned int *offset);
+
+ void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+ int copy_path_name(char *dst, const char *src);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index f8127edb89730..25a2a98ebda8d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3235,7 +3235,7 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
+ struct cifs_ses *
+ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+ {
+- int rc = -ENOMEM;
++ int rc = 0;
+ unsigned int xid;
+ struct cifs_ses *ses;
+ struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+@@ -3277,6 +3277,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+ return ses;
+ }
+
++ rc = -ENOMEM;
++
+ cifs_dbg(FYI, "Existing smb sess not found\n");
+ ses = sesInfoAlloc();
+ if (ses == NULL)
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index a24bcbbb50337..b4b15d611deda 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -481,6 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ oparms.disposition = FILE_CREATE;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
++ oparms.mode = 0644;
+
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ NULL);
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 40ca394fd5de9..f41891379de91 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -972,8 +972,8 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
+ * Input: rqst - a smb_rqst, page - a page index for rqst
+ * Output: *len - the length for this page, *offset - the offset for this page
+ */
+-void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
+- unsigned int *len, unsigned int *offset)
++void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
++ unsigned int *len, unsigned int *offset)
+ {
+ *len = rqst->rq_pagesz;
+ *offset = (page == 0) ? rqst->rq_offset : 0;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index d67676545a421..944c575a4a705 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3625,69 +3625,82 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+ memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
+ }
+
+-/* We can not use the normal sg_set_buf() as we will sometimes pass a
+- * stack object as buf.
+- */
+-static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+- unsigned int buflen)
++static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++ int num_rqst, const u8 *sig, u8 **iv,
++ struct aead_request **req, struct scatterlist **sgl,
++ unsigned int *num_sgs)
+ {
+- void *addr;
+- /*
+- * VMAP_STACK (at least) puts stack into the vmalloc address space
+- */
+- if (is_vmalloc_addr(buf))
+- addr = vmalloc_to_page(buf);
+- else
+- addr = virt_to_page(buf);
+- sg_set_page(sg, addr, buflen, offset_in_page(buf));
++ unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
++ unsigned int iv_size = crypto_aead_ivsize(tfm);
++ unsigned int len;
++ u8 *p;
++
++ *num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
++
++ len = iv_size;
++ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
++ len = ALIGN(len, crypto_tfm_ctx_alignment());
++ len += req_size;
++ len = ALIGN(len, __alignof__(struct scatterlist));
++ len += *num_sgs * sizeof(**sgl);
++
++ p = kmalloc(len, GFP_ATOMIC);
++ if (!p)
++ return NULL;
++
++ *iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
++ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
++ crypto_tfm_ctx_alignment());
++ *sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
++ __alignof__(struct scatterlist));
++ return p;
+ }
+
+-/* Assumes the first rqst has a transform header as the first iov.
+- * I.e.
+- * rqst[0].rq_iov[0] is transform header
+- * rqst[0].rq_iov[1+] data to be encrypted/decrypted
+- * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
+- */
+-static struct scatterlist *
+-init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
++static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *rqst,
++ int num_rqst, const u8 *sig, u8 **iv,
++ struct aead_request **req, struct scatterlist **sgl)
+ {
+- unsigned int sg_len;
++ unsigned int off, len, skip;
+ struct scatterlist *sg;
+- unsigned int i;
+- unsigned int j;
+- unsigned int idx = 0;
+- int skip;
+-
+- sg_len = 1;
+- for (i = 0; i < num_rqst; i++)
+- sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
++ unsigned int num_sgs;
++ unsigned long addr;
++ int i, j;
++ void *p;
+
+- sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
+- if (!sg)
++ p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, sgl, &num_sgs);
++ if (!p)
+ return NULL;
+
+- sg_init_table(sg, sg_len);
++ sg_init_table(*sgl, num_sgs);
++ sg = *sgl;
++
++ /* Assumes the first rqst has a transform header as the first iov.
++ * I.e.
++ * rqst[0].rq_iov[0] is transform header
++ * rqst[0].rq_iov[1+] data to be encrypted/decrypted
++ * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
++ */
+ for (i = 0; i < num_rqst; i++) {
++ /*
++ * The first rqst has a transform header where the
++ * first 20 bytes are not part of the encrypted blob.
++ */
+ for (j = 0; j < rqst[i].rq_nvec; j++) {
+- /*
+- * The first rqst has a transform header where the
+- * first 20 bytes are not part of the encrypted blob
+- */
+- skip = (i == 0) && (j == 0) ? 20 : 0;
+- smb2_sg_set_buf(&sg[idx++],
+- rqst[i].rq_iov[j].iov_base + skip,
+- rqst[i].rq_iov[j].iov_len - skip);
+- }
++ struct kvec *iov = &rqst[i].rq_iov[j];
+
++ skip = (i == 0) && (j == 0) ? 20 : 0;
++ addr = (unsigned long)iov->iov_base + skip;
++ len = iov->iov_len - skip;
++ sg = cifs_sg_set_buf(sg, (void *)addr, len);
++ }
+ for (j = 0; j < rqst[i].rq_npages; j++) {
+- unsigned int len, offset;
+-
+- rqst_page_get_length(&rqst[i], j, &len, &offset);
+- sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
++ rqst_page_get_length(&rqst[i], j, &len, &off);
++ sg_set_page(sg++, rqst[i].rq_pages[j], len, off);
+ }
+ }
+- smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
+- return sg;
++ cifs_sg_set_buf(sg, sig, SMB2_SIGNATURE_SIZE);
++
++ return p;
+ }
+
+ static int
+@@ -3729,11 +3742,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ u8 sign[SMB2_SIGNATURE_SIZE] = {};
+ u8 key[SMB3_SIGN_KEY_SIZE];
+ struct aead_request *req;
+- char *iv;
+- unsigned int iv_len;
++ u8 *iv;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct crypto_aead *tfm;
+ unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
++ void *creq;
+
+ rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
+ if (rc) {
+@@ -3762,32 +3775,15 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ return rc;
+ }
+
+- req = aead_request_alloc(tfm, GFP_KERNEL);
+- if (!req) {
+- cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
++ creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg);
++ if (unlikely(!creq))
+ return -ENOMEM;
+- }
+
+ if (!enc) {
+ memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
+ crypt_len += SMB2_SIGNATURE_SIZE;
+ }
+
+- sg = init_sg(num_rqst, rqst, sign);
+- if (!sg) {
+- cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
+- rc = -ENOMEM;
+- goto free_req;
+- }
+-
+- iv_len = crypto_aead_ivsize(tfm);
+- iv = kzalloc(iv_len, GFP_KERNEL);
+- if (!iv) {
+- cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
+- rc = -ENOMEM;
+- goto free_sg;
+- }
+-
+ if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
+ memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
+ else {
+@@ -3795,6 +3791,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
+ }
+
++ aead_request_set_tfm(req, tfm);
+ aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ aead_request_set_ad(req, assoc_data_len);
+
+@@ -3807,11 +3804,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ if (!rc && enc)
+ memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+
+- kfree(iv);
+-free_sg:
+- kfree(sg);
+-free_req:
+- kfree(req);
++ kfree(creq);
+ return rc;
+ }
+
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index d73d88d9c2598..bc27e3ad97ff7 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -317,6 +317,7 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ return 0;
+
+ out_remove:
++ configfs_put(dentry->d_fsdata);
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
+ }
+@@ -383,6 +384,7 @@ int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
+ return 0;
+
+ out_remove:
++ configfs_put(dentry->d_fsdata);
+ configfs_remove_dirent(dentry);
+ return PTR_ERR(inode);
+ }
+diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
+index da87615ad69a7..9efc243e991ae 100644
+--- a/fs/debugfs/file.c
++++ b/fs/debugfs/file.c
+@@ -377,8 +377,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ }
+ EXPORT_SYMBOL_GPL(debugfs_attr_read);
+
+-ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+- size_t len, loff_t *ppos)
++static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos, bool is_signed)
+ {
+ struct dentry *dentry = F_DENTRY(file);
+ ssize_t ret;
+@@ -386,12 +386,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+- ret = simple_attr_write(file, buf, len, ppos);
++ if (is_signed)
++ ret = simple_attr_write_signed(file, buf, len, ppos);
++ else
++ ret = simple_attr_write(file, buf, len, ppos);
+ debugfs_file_put(dentry);
+ return ret;
+ }
++
++ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(debugfs_attr_write);
+
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return debugfs_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(debugfs_attr_write_signed);
++
+ static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode,
+ struct dentry *parent, void *value,
+ const struct file_operations *fops,
+@@ -784,11 +800,11 @@ static int debugfs_atomic_t_get(void *data, u64 *val)
+ *val = atomic_read((atomic_t *)data);
+ return 0;
+ }
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get,
+ debugfs_atomic_t_set, "%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL,
+ "%lld\n");
+-DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
++DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
+ "%lld\n");
+
+ /**
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e932e84823714..946804c3c4b13 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -501,7 +501,7 @@ enum {
+ *
+ * It's not paranoia if the Murphy's Law really *is* out to get you. :-)
+ */
+-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
++#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG))
+ #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
+
+ static inline void ext4_check_flag_values(void)
+@@ -2608,7 +2608,8 @@ int do_journal_get_write_access(handle_t *handle,
+ typedef enum {
+ EXT4_IGET_NORMAL = 0,
+ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
+- EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
++ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */
++ EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */
+ } ext4_iget_flags;
+
+ extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ace8d6145253f..27760c39f70ec 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -6022,6 +6022,14 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+ struct ext4_extent *extent;
+ ext4_lblk_t first_lblk, first_lclu, last_lclu;
+
++ /*
++ * if data can be stored inline, the logical cluster isn't
++ * mapped - no physical clusters have been allocated, and the
++ * file has no extents
++ */
++ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
++ return 0;
++
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+ if (IS_ERR(path)) {
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 43fba01da6c3d..9d0bed2e4d059 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -1354,7 +1354,7 @@ retry:
+ if (count_reserved)
+ count_rsvd(inode, lblk, orig_es.es_len - len1 - len2,
+ &orig_es, &rc);
+- goto out;
++ goto out_get_reserved;
+ }
+
+ if (len1 > 0) {
+@@ -1396,6 +1396,7 @@ retry:
+ }
+ }
+
++out_get_reserved:
+ if (count_reserved)
+ *reserved = get_rsvd(inode, end, es, &rc);
+ out:
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 36699a131168f..a131d2781342b 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ struct super_block *sb = inode->i_sb;
+ Indirect *p = chain;
+ struct buffer_head *bh;
++ unsigned int key;
+ int ret = -EIO;
+
+ *err = 0;
+@@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ if (!p->key)
+ goto no_block;
+ while (--depth) {
+- bh = sb_getblk(sb, le32_to_cpu(p->key));
++ key = le32_to_cpu(p->key);
++ if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) {
++ /* the block was out of range */
++ ret = -EFSCORRUPTED;
++ goto failure;
++ }
++ bh = sb_getblk(sb, key);
+ if (unlikely(!bh)) {
+ ret = -ENOMEM;
+ goto failure;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 9f9135336a17f..cafa06114a1d1 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -207,6 +207,8 @@ void ext4_evict_inode(struct inode *inode)
+
+ trace_ext4_evict_inode(inode);
+
++ if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
++ ext4_evict_ea_inode(inode);
+ if (inode->i_nlink) {
+ /*
+ * When journalling data dirty buffers are tracked only in the
+@@ -4537,7 +4539,7 @@ int ext4_truncate(struct inode *inode)
+ trace_ext4_truncate_enter(inode);
+
+ if (!ext4_can_truncate(inode))
+- return 0;
++ goto out_trace;
+
+ ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
+
+@@ -4548,16 +4550,15 @@ int ext4_truncate(struct inode *inode)
+ int has_inline = 1;
+
+ err = ext4_inline_data_truncate(inode, &has_inline);
+- if (err)
+- return err;
+- if (has_inline)
+- return 0;
++ if (err || has_inline)
++ goto out_trace;
+ }
+
+ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+- if (ext4_inode_attach_jinode(inode) < 0)
+- return 0;
++ err = ext4_inode_attach_jinode(inode);
++ if (err)
++ goto out_trace;
+ }
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+@@ -4566,8 +4567,10 @@ int ext4_truncate(struct inode *inode)
+ credits = ext4_blocks_for_truncate(inode);
+
+ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
++ if (IS_ERR(handle)) {
++ err = PTR_ERR(handle);
++ goto out_trace;
++ }
+
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1))
+ ext4_block_truncate_page(handle, mapping, inode->i_size);
+@@ -4616,6 +4619,7 @@ out_stop:
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
++out_trace:
+ trace_ext4_truncate_exit(inode);
+ return err;
+ }
+@@ -4652,9 +4656,17 @@ static int __ext4_get_inode_loc(struct inode *inode,
+ inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
+ inode_offset = ((inode->i_ino - 1) %
+ EXT4_INODES_PER_GROUP(sb));
+- block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
+ iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+
++ block = ext4_inode_table(sb, gdp);
++ if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) ||
++ (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) {
++ ext4_error(sb, "Invalid inode table block %llu in "
++ "block_group %u", block, iloc->block_group);
++ return -EFSCORRUPTED;
++ }
++ block += (inode_offset / inodes_per_block);
++
+ bh = sb_getblk(sb, block);
+ if (unlikely(!bh))
+ return -ENOMEM;
+@@ -5184,8 +5196,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
+ ext4_error_inode(inode, function, line, 0,
+ "casefold flag without casefold feature");
+- brelse(iloc.bh);
++ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
++ ext4_error_inode(inode, function, line, 0,
++ "bad inode without EXT4_IGET_BAD flag");
++ ret = -EUCLEAN;
++ goto bad_inode;
++ }
+
++ brelse(iloc.bh);
+ unlock_new_inode(inode);
+ return inode;
+
+@@ -6045,6 +6063,14 @@ static int __ext4_expand_extra_isize(struct inode *inode,
+ return 0;
+ }
+
++ /*
++ * We may need to allocate external xattr block so we need quotas
++ * initialized. Here we can be called with various locks held so we
++ * cannot affort to initialize quotas ourselves. So just bail.
++ */
++ if (dquot_initialize_needed(inode))
++ return -EAGAIN;
++
+ /* try to expand with EAs present */
+ error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
+ raw_inode, handle);
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 9fa20f9ba52b5..645186927c1f8 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -121,7 +121,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ blkcnt_t blocks;
+ unsigned short bytes;
+
+- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
++ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO,
++ EXT4_IGET_SPECIAL | EXT4_IGET_BAD);
+ if (IS_ERR(inode_bl))
+ return PTR_ERR(inode_bl);
+ ei_bl = EXT4_I(inode_bl);
+@@ -169,7 +170,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ /* Protect extent tree against block allocations via delalloc */
+ ext4_double_down_write_data_sem(inode, inode_bl);
+
+- if (inode_bl->i_nlink == 0) {
++ if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) {
+ /* this inode has never been used as a BOOT_LOADER */
+ set_nlink(inode_bl, 1);
+ i_uid_write(inode_bl, 0);
+@@ -460,6 +461,10 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
+ if (ext4_is_quota_file(inode))
+ return err;
+
++ err = dquot_initialize(inode);
++ if (err)
++ return err;
++
+ err = ext4_get_inode_loc(inode, &iloc);
+ if (err)
+ return err;
+@@ -475,10 +480,6 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
+ brelse(iloc.bh);
+ }
+
+- err = dquot_initialize(inode);
+- if (err)
+- return err;
+-
+ handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
+ EXT4_QUOTA_INIT_BLOCKS(sb) +
+ EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index c0f4703e64d54..ee9e931e2e925 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3778,6 +3778,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ return -EXDEV;
+
+ retval = dquot_initialize(old.dir);
++ if (retval)
++ return retval;
++ retval = dquot_initialize(old.inode);
+ if (retval)
+ return retval;
+ retval = dquot_initialize(new.dir);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 6db853a3ecc44..880307ba0f278 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1567,8 +1567,8 @@ exit_journal:
+ int meta_bg = ext4_has_feature_meta_bg(sb);
+ sector_t old_gdb = 0;
+
+- update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+- sizeof(struct ext4_super_block), 0);
++ update_backups(sb, ext4_group_first_block_no(sb, 0),
++ (char *)es, sizeof(struct ext4_super_block), 0);
+ for (; gdb_num <= gdb_num_end; gdb_num++) {
+ struct buffer_head *gdb_bh;
+
+@@ -1775,7 +1775,7 @@ errout:
+ if (test_opt(sb, DEBUG))
+ printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
+ "blocks\n", ext4_blocks_count(es));
+- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
++ update_backups(sb, ext4_group_first_block_no(sb, 0),
+ (char *)es, sizeof(struct ext4_super_block), 0);
+ }
+ return err;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ce575cb9bc0e0..789a9f6a2ec6e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1085,6 +1085,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ return NULL;
+
+ inode_set_iversion(&ei->vfs_inode, 1);
++ ei->i_flags = 0;
+ spin_lock_init(&ei->i_raw_lock);
+ INIT_LIST_HEAD(&ei->i_prealloc_list);
+ spin_lock_init(&ei->i_prealloc_lock);
+@@ -4388,30 +4389,31 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ ext4_has_feature_journal_needs_recovery(sb)) {
+ ext4_msg(sb, KERN_ERR, "required journal recovery "
+ "suppressed and not mounted read-only");
+- goto failed_mount_wq;
++ goto failed_mount3a;
+ } else {
+ /* Nojournal mode, all journal mount options are illegal */
+- if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
+- ext4_msg(sb, KERN_ERR, "can't mount with "
+- "journal_checksum, fs mounted w/o journal");
+- goto failed_mount_wq;
+- }
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_async_commit, fs mounted w/o journal");
+- goto failed_mount_wq;
++ goto failed_mount3a;
++ }
++
++ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "journal_checksum, fs mounted w/o journal");
++ goto failed_mount3a;
+ }
+ if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "commit=%lu, fs mounted w/o journal",
+ sbi->s_commit_interval / HZ);
+- goto failed_mount_wq;
++ goto failed_mount3a;
+ }
+ if (EXT4_MOUNT_DATA_FLAGS &
+ (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "data=, fs mounted w/o journal");
+- goto failed_mount_wq;
++ goto failed_mount3a;
+ }
+ sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
+ clear_opt(sb, JOURNAL_CHECKSUM);
+@@ -4835,7 +4837,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
+
+ jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
+ journal_inode, journal_inode->i_size);
+- if (!S_ISREG(journal_inode->i_mode)) {
++ if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) {
+ ext4_msg(sb, KERN_ERR, "invalid journal inode");
+ iput(journal_inode);
+ return NULL;
+@@ -5954,6 +5956,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ return err;
+ }
+
++static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum)
++{
++ switch (type) {
++ case USRQUOTA:
++ return qf_inum == EXT4_USR_QUOTA_INO;
++ case GRPQUOTA:
++ return qf_inum == EXT4_GRP_QUOTA_INO;
++ case PRJQUOTA:
++ return qf_inum >= EXT4_GOOD_OLD_FIRST_INO;
++ default:
++ BUG();
++ }
++}
++
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+ {
+@@ -5970,9 +5986,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ if (!qf_inums[type])
+ return -EPERM;
+
++ if (!ext4_check_quota_inum(type, qf_inums[type])) {
++ ext4_error(sb, "Bad quota inum: %lu, type: %d",
++ qf_inums[type], type);
++ return -EUCLEAN;
++ }
++
+ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
+ if (IS_ERR(qf_inode)) {
+- ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
++ ext4_error(sb, "Bad quota inode: %lu, type: %d",
++ qf_inums[type], type);
+ return PTR_ERR(qf_inode);
+ }
+
+@@ -6011,8 +6034,9 @@ static int ext4_enable_quotas(struct super_block *sb)
+ if (err) {
+ ext4_warning(sb,
+ "Failed to enable quota tracking "
+- "(type=%d, err=%d). Please run "
+- "e2fsck to fix.", type, err);
++ "(type=%d, err=%d, ino=%lu). "
++ "Please run e2fsck to fix.", type,
++ err, qf_inums[type]);
+ for (type--; type >= 0; type--) {
+ struct inode *inode;
+
+diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
+index 6a30e54c11282..9879ea046e5ae 100644
+--- a/fs/ext4/verity.c
++++ b/fs/ext4/verity.c
+@@ -79,8 +79,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ size_t n = min_t(size_t, count,
+ PAGE_SIZE - offset_in_page(pos));
+ struct page *page;
+- void *fsdata;
+- void *addr;
++ void *fsdata = NULL;
+ int res;
+
+ res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
+@@ -88,9 +87,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ if (res)
+ return res;
+
+- addr = kmap_atomic(page);
+- memcpy(addr + offset_in_page(pos), buf, n);
+- kunmap_atomic(addr);
++ memcpy_to_page(page, offset_in_page(pos), buf, n);
+
+ res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
+ page, fsdata);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 4b0e2bc71d2cb..78df2d65998e6 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -434,6 +434,21 @@ error:
+ return err;
+ }
+
++/* Remove entry from mbcache when EA inode is getting evicted */
++void ext4_evict_ea_inode(struct inode *inode)
++{
++ struct mb_cache_entry *oe;
++
++ if (!EA_INODE_CACHE(inode))
++ return;
++ /* Wait for entry to get unused so that we can remove it */
++ while ((oe = mb_cache_entry_delete_or_get(EA_INODE_CACHE(inode),
++ ext4_xattr_inode_get_hash(inode), inode->i_ino))) {
++ mb_cache_entry_wait_unused(oe);
++ mb_cache_entry_put(EA_INODE_CACHE(inode), oe);
++ }
++}
++
+ static int
+ ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
+ struct ext4_xattr_entry *entry, void *buffer,
+@@ -1019,10 +1034,8 @@ static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
+ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+ int ref_change)
+ {
+- struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
+ struct ext4_iloc iloc;
+ s64 ref_count;
+- u32 hash;
+ int ret;
+
+ inode_lock(ea_inode);
+@@ -1045,14 +1058,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+
+ set_nlink(ea_inode, 1);
+ ext4_orphan_del(handle, ea_inode);
+-
+- if (ea_inode_cache) {
+- hash = ext4_xattr_inode_get_hash(ea_inode);
+- mb_cache_entry_create(ea_inode_cache,
+- GFP_NOFS, hash,
+- ea_inode->i_ino,
+- true /* reusable */);
+- }
+ }
+ } else {
+ WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
+@@ -1065,12 +1070,6 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
+
+ clear_nlink(ea_inode);
+ ext4_orphan_add(handle, ea_inode);
+-
+- if (ea_inode_cache) {
+- hash = ext4_xattr_inode_get_hash(ea_inode);
+- mb_cache_entry_delete(ea_inode_cache, hash,
+- ea_inode->i_ino);
+- }
+ }
+ }
+
+@@ -1249,6 +1248,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ if (error)
+ goto out;
+
++retry_ref:
+ lock_buffer(bh);
+ hash = le32_to_cpu(BHDR(bh)->h_hash);
+ ref = le32_to_cpu(BHDR(bh)->h_refcount);
+@@ -1258,9 +1258,18 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ * This must happen under buffer lock for
+ * ext4_xattr_block_set() to reliably detect freed block
+ */
+- if (ea_block_cache)
+- mb_cache_entry_delete(ea_block_cache, hash,
+- bh->b_blocknr);
++ if (ea_block_cache) {
++ struct mb_cache_entry *oe;
++
++ oe = mb_cache_entry_delete_or_get(ea_block_cache, hash,
++ bh->b_blocknr);
++ if (oe) {
++ unlock_buffer(bh);
++ mb_cache_entry_wait_unused(oe);
++ mb_cache_entry_put(ea_block_cache, oe);
++ goto retry_ref;
++ }
++ }
+ get_bh(bh);
+ unlock_buffer(bh);
+
+@@ -1284,7 +1293,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ ce = mb_cache_entry_get(ea_block_cache, hash,
+ bh->b_blocknr);
+ if (ce) {
+- ce->e_reusable = 1;
++ set_bit(MBE_REUSABLE_B, &ce->e_flags);
+ mb_cache_entry_put(ea_block_cache, ce);
+ }
+ }
+@@ -1442,6 +1451,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
+ if (!err)
+ err = ext4_inode_attach_jinode(ea_inode);
+ if (err) {
++ if (ext4_xattr_inode_dec_ref(handle, ea_inode))
++ ext4_warning_inode(ea_inode,
++ "cleanup dec ref error %d", err);
+ iput(ea_inode);
+ return ERR_PTR(err);
+ }
+@@ -1868,6 +1880,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ #define header(x) ((struct ext4_xattr_header *)(x))
+
+ if (s->base) {
++ int offset = (char *)s->here - bs->bh->b_data;
++
+ BUFFER_TRACE(bs->bh, "get_write_access");
+ error = ext4_journal_get_write_access(handle, bs->bh);
+ if (error)
+@@ -1882,9 +1896,20 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ * ext4_xattr_block_set() to reliably detect modified
+ * block
+ */
+- if (ea_block_cache)
+- mb_cache_entry_delete(ea_block_cache, hash,
+- bs->bh->b_blocknr);
++ if (ea_block_cache) {
++ struct mb_cache_entry *oe;
++
++ oe = mb_cache_entry_delete_or_get(ea_block_cache,
++ hash, bs->bh->b_blocknr);
++ if (oe) {
++ /*
++ * Xattr block is getting reused. Leave
++ * it alone.
++ */
++ mb_cache_entry_put(ea_block_cache, oe);
++ goto clone_block;
++ }
++ }
+ ea_bdebug(bs->bh, "modifying in-place");
+ error = ext4_xattr_set_entry(i, s, handle, inode,
+ true /* is_block */);
+@@ -1899,50 +1924,47 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ if (error)
+ goto cleanup;
+ goto inserted;
+- } else {
+- int offset = (char *)s->here - bs->bh->b_data;
++ }
++clone_block:
++ unlock_buffer(bs->bh);
++ ea_bdebug(bs->bh, "cloning");
++ s->base = kmemdup(BHDR(bs->bh), bs->bh->b_size, GFP_NOFS);
++ error = -ENOMEM;
++ if (s->base == NULL)
++ goto cleanup;
++ s->first = ENTRY(header(s->base)+1);
++ header(s->base)->h_refcount = cpu_to_le32(1);
++ s->here = ENTRY(s->base + offset);
++ s->end = s->base + bs->bh->b_size;
+
+- unlock_buffer(bs->bh);
+- ea_bdebug(bs->bh, "cloning");
+- s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
+- error = -ENOMEM;
+- if (s->base == NULL)
++ /*
++ * If existing entry points to an xattr inode, we need
++ * to prevent ext4_xattr_set_entry() from decrementing
++ * ref count on it because the reference belongs to the
++ * original block. In this case, make the entry look
++ * like it has an empty value.
++ */
++ if (!s->not_found && s->here->e_value_inum) {
++ ea_ino = le32_to_cpu(s->here->e_value_inum);
++ error = ext4_xattr_inode_iget(inode, ea_ino,
++ le32_to_cpu(s->here->e_hash),
++ &tmp_inode);
++ if (error)
+ goto cleanup;
+- memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
+- s->first = ENTRY(header(s->base)+1);
+- header(s->base)->h_refcount = cpu_to_le32(1);
+- s->here = ENTRY(s->base + offset);
+- s->end = s->base + bs->bh->b_size;
+-
+- /*
+- * If existing entry points to an xattr inode, we need
+- * to prevent ext4_xattr_set_entry() from decrementing
+- * ref count on it because the reference belongs to the
+- * original block. In this case, make the entry look
+- * like it has an empty value.
+- */
+- if (!s->not_found && s->here->e_value_inum) {
+- ea_ino = le32_to_cpu(s->here->e_value_inum);
+- error = ext4_xattr_inode_iget(inode, ea_ino,
+- le32_to_cpu(s->here->e_hash),
+- &tmp_inode);
+- if (error)
+- goto cleanup;
+
+- if (!ext4_test_inode_state(tmp_inode,
+- EXT4_STATE_LUSTRE_EA_INODE)) {
+- /*
+- * Defer quota free call for previous
+- * inode until success is guaranteed.
+- */
+- old_ea_inode_quota = le32_to_cpu(
+- s->here->e_value_size);
+- }
+- iput(tmp_inode);
+-
+- s->here->e_value_inum = 0;
+- s->here->e_value_size = 0;
++ if (!ext4_test_inode_state(tmp_inode,
++ EXT4_STATE_LUSTRE_EA_INODE)) {
++ /*
++ * Defer quota free call for previous
++ * inode until success is guaranteed.
++ */
++ old_ea_inode_quota = le32_to_cpu(
++ s->here->e_value_size);
+ }
++ iput(tmp_inode);
++
++ s->here->e_value_inum = 0;
++ s->here->e_value_size = 0;
+ }
+ } else {
+ /* Allocate a buffer where we construct the new block. */
+@@ -2009,18 +2031,13 @@ inserted:
+ lock_buffer(new_bh);
+ /*
+ * We have to be careful about races with
+- * freeing, rehashing or adding references to
+- * xattr block. Once we hold buffer lock xattr
+- * block's state is stable so we can check
+- * whether the block got freed / rehashed or
+- * not. Since we unhash mbcache entry under
+- * buffer lock when freeing / rehashing xattr
+- * block, checking whether entry is still
+- * hashed is reliable. Same rules hold for
+- * e_reusable handling.
++ * adding references to xattr block. Once we
++ * hold buffer lock xattr block's state is
++ * stable so we can check the additional
++ * reference fits.
+ */
+- if (hlist_bl_unhashed(&ce->e_hash_list) ||
+- !ce->e_reusable) {
++ ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
++ if (ref > EXT4_XATTR_REFCOUNT_MAX) {
+ /*
+ * Undo everything and check mbcache
+ * again.
+@@ -2035,10 +2052,9 @@ inserted:
+ new_bh = NULL;
+ goto inserted;
+ }
+- ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
+ BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
+- if (ref >= EXT4_XATTR_REFCOUNT_MAX)
+- ce->e_reusable = 0;
++ if (ref == EXT4_XATTR_REFCOUNT_MAX)
++ clear_bit(MBE_REUSABLE_B, &ce->e_flags);
+ ea_bdebug(new_bh, "reusing; refcount now=%d",
+ ref);
+ ext4_xattr_block_csum_set(inode, new_bh);
+@@ -2066,19 +2082,11 @@ inserted:
+
+ goal = ext4_group_first_block_no(sb,
+ EXT4_I(inode)->i_block_group);
+-
+- /* non-extent files can't have physical blocks past 2^32 */
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+-
+ block = ext4_new_meta_blocks(handle, inode, goal, 0,
+ NULL, &error);
+ if (error)
+ goto cleanup;
+
+- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+- BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
+-
+ ea_idebug(inode, "creating block %llu",
+ (unsigned long long)block);
+
+@@ -2571,7 +2579,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+
+ is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+- buffer = kmalloc(value_size, GFP_NOFS);
++ buffer = kvmalloc(value_size, GFP_NOFS);
+ b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+ if (!is || !bs || !buffer || !b_entry_name) {
+ error = -ENOMEM;
+@@ -2623,7 +2631,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ error = 0;
+ out:
+ kfree(b_entry_name);
+- kfree(buffer);
++ kvfree(buffer);
+ if (is)
+ brelse(is->iloc.bh);
+ if (bs)
+diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
+index 990084e00374f..231ef308d10cb 100644
+--- a/fs/ext4/xattr.h
++++ b/fs/ext4/xattr.h
+@@ -190,6 +190,7 @@ extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
+
+ extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle);
++extern void ext4_evict_ea_inode(struct inode *inode);
+
+ extern const struct xattr_handler *ext4_xattr_handlers[];
+
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 3d3e414e29870..ef71d9cda5851 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -643,6 +643,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+ if (ofs_in_node >= max_addrs) {
+ f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
+ ofs_in_node, dni->ino, dni->nid, max_addrs);
++ f2fs_put_page(node_page, 1);
+ return false;
+ }
+
+@@ -1253,8 +1254,9 @@ freed:
+ seg_freed++;
+ migrated++;
+
+- if (__is_large_section(sbi) && segno + 1 < end_segno)
+- sbi->next_victim_seg[gc_type] = segno + 1;
++ if (__is_large_section(sbi))
++ sbi->next_victim_seg[gc_type] =
++ (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
+ skip:
+ f2fs_put_page(sum_page, 0);
+ }
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 7759323bd7751..e43b57755a7fe 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1486,7 +1486,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
+ if (i + 1 < dpolicy->granularity)
+ break;
+
+- if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
++ if (i + 1 < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ return __issue_discard_cmd_orderly(sbi, dpolicy);
+
+ pend_list = &dcc->pend_list[i];
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index da243c84e93b0..ee2ea5532e69b 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -453,14 +453,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ /* panic? */
+ return -EIO;
+
++ res = -EIO;
++ if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
++ goto out;
+ fd.search_key->cat = HFS_I(main_inode)->cat_key;
+ if (hfs_brec_find(&fd))
+- /* panic? */
+ goto out;
+
+ if (S_ISDIR(main_inode->i_mode)) {
+ if (fd.entrylength < sizeof(struct hfs_cat_dir))
+- /* panic? */;
++ goto out;
+ hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_dir));
+ if (rec.type != HFS_CDR_DIR ||
+@@ -473,6 +475,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_dir));
+ } else if (HFS_IS_RSRC(inode)) {
++ if (fd.entrylength < sizeof(struct hfs_cat_file))
++ goto out;
+ hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_file));
+ hfs_inode_write_fork(inode, rec.file.RExtRec,
+@@ -481,7 +485,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ sizeof(struct hfs_cat_file));
+ } else {
+ if (fd.entrylength < sizeof(struct hfs_cat_file))
+- /* panic? */;
++ goto out;
+ hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_file));
+ if (rec.type != HFS_CDR_FIL ||
+@@ -498,9 +502,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
+ sizeof(struct hfs_cat_file));
+ }
++ res = 0;
+ out:
+ hfs_find_exit(&fd);
+- return 0;
++ return res;
+ }
+
+ static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
+diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
+index 39f5e343bf4d4..fdb0edb8a607d 100644
+--- a/fs/hfs/trans.c
++++ b/fs/hfs/trans.c
+@@ -109,7 +109,7 @@ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr
+ if (nls_io) {
+ wchar_t ch;
+
+- while (srclen > 0) {
++ while (srclen > 0 && dstlen > 0) {
+ size = nls_io->char2uni(src, srclen, &ch);
+ if (size < 0) {
+ ch = '?';
+diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
+index b8471bf05def9..9580179ff535a 100644
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -198,6 +198,8 @@ struct hfsplus_sb_info {
+ #define HFSPLUS_SB_HFSX 3
+ #define HFSPLUS_SB_CASEFOLD 4
+ #define HFSPLUS_SB_NOBARRIER 5
++#define HFSPLUS_SB_UID 6
++#define HFSPLUS_SB_GID 7
+
+ static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+ {
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index d131c8ea7eb61..76501d905099b 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -187,11 +187,11 @@ static void hfsplus_get_perms(struct inode *inode,
+ mode = be16_to_cpu(perms->mode);
+
+ i_uid_write(inode, be32_to_cpu(perms->owner));
+- if (!i_uid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode))
+ inode->i_uid = sbi->uid;
+
+ i_gid_write(inode, be32_to_cpu(perms->group));
+- if (!i_gid_read(inode) && !mode)
++ if ((test_bit(HFSPLUS_SB_GID, &sbi->flags)) || (!i_gid_read(inode) && !mode))
+ inode->i_gid = sbi->gid;
+
+ if (dir) {
+@@ -497,8 +497,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ if (type == HFSPLUS_FOLDER) {
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+- if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
+- /* panic? */;
++ WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ hfsplus_get_perms(inode, &folder->permissions, 1);
+@@ -518,8 +517,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
+ } else if (type == HFSPLUS_FILE) {
+ struct hfsplus_cat_file *file = &entry.file;
+
+- if (fd->entrylength < sizeof(struct hfsplus_cat_file))
+- /* panic? */;
++ WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
+ hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
+ sizeof(struct hfsplus_cat_file));
+
+@@ -576,8 +574,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ if (S_ISDIR(main_inode->i_mode)) {
+ struct hfsplus_cat_folder *folder = &entry.folder;
+
+- if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
+- /* panic? */;
++ WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_folder));
+ /* simple node checks? */
+@@ -602,8 +599,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
+ } else {
+ struct hfsplus_cat_file *file = &entry.file;
+
+- if (fd.entrylength < sizeof(struct hfsplus_cat_file))
+- /* panic? */;
++ WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ sizeof(struct hfsplus_cat_file));
+ hfsplus_inode_write_fork(inode, &file->data_fork);
+diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
+index 047e05c575601..c94a58762ad6d 100644
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -140,6 +140,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ if (!uid_valid(sbi->uid)) {
+ pr_err("invalid uid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_UID, &sbi->flags);
+ }
+ break;
+ case opt_gid:
+@@ -151,6 +153,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
+ if (!gid_valid(sbi->gid)) {
+ pr_err("invalid gid specified\n");
+ return 0;
++ } else {
++ set_bit(HFSPLUS_SB_GID, &sbi->flags);
+ }
+ break;
+ case opt_part:
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 7d039ba5ae28b..b1d31c78fc9de 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1232,7 +1232,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_size:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->max_size_opt = memparse(param->string, &rest);
+ ctx->max_val_type = SIZE_STD;
+@@ -1242,7 +1242,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_nr_inodes:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->nr_inodes = memparse(param->string, &rest);
+ return 0;
+@@ -1258,7 +1258,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
+
+ case Opt_min_size:
+ /* memparse() will accept a K/M/G without a digit */
+- if (!isdigit(param->string[0]))
++ if (!param->string || !isdigit(param->string[0]))
+ goto bad_val;
+ ctx->min_size_opt = memparse(param->string, &rest);
+ ctx->min_val_type = SIZE_STD;
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index d3cb27487c706..aa4643854f947 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -155,7 +155,7 @@ int dbMount(struct inode *ipbmap)
+ struct bmap *bmp;
+ struct dbmap_disk *dbmp_le;
+ struct metapage *mp;
+- int i;
++ int i, err;
+
+ /*
+ * allocate/initialize the in-memory bmap descriptor
+@@ -170,8 +170,8 @@ int dbMount(struct inode *ipbmap)
+ BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
+ PSIZE, 0);
+ if (mp == NULL) {
+- kfree(bmp);
+- return -EIO;
++ err = -EIO;
++ goto err_kfree_bmp;
+ }
+
+ /* copy the on-disk bmap descriptor to its in-memory version. */
+@@ -181,9 +181,8 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+ if (!bmp->db_numag) {
+- release_metapage(mp);
+- kfree(bmp);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_release_metapage;
+ }
+
+ bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
+@@ -194,6 +193,16 @@ int dbMount(struct inode *ipbmap)
+ bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
++ if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
++ if (((bmp->db_mapsize - 1) >> bmp->db_agl2size) > MAXAG) {
++ err = -EINVAL;
++ goto err_release_metapage;
++ }
++
+ for (i = 0; i < MAXAG; i++)
+ bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
+ bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
+@@ -214,6 +223,12 @@ int dbMount(struct inode *ipbmap)
+ BMAP_LOCK_INIT(bmp);
+
+ return (0);
++
++err_release_metapage:
++ release_metapage(mp);
++err_kfree_bmp:
++ kfree(bmp);
++ return err;
+ }
+
+
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 247b58a68240a..e6f986da2a65e 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -883,8 +883,8 @@ out:
+ EXPORT_SYMBOL_GPL(simple_attr_read);
+
+ /* interpret the buffer as a number to call the set function with */
+-ssize_t simple_attr_write(struct file *file, const char __user *buf,
+- size_t len, loff_t *ppos)
++static ssize_t simple_attr_write_xsigned(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos, bool is_signed)
+ {
+ struct simple_attr *attr;
+ unsigned long long val;
+@@ -905,7 +905,10 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ goto out;
+
+ attr->set_buf[size] = '\0';
+- ret = kstrtoull(attr->set_buf, 0, &val);
++ if (is_signed)
++ ret = kstrtoll(attr->set_buf, 0, &val);
++ else
++ ret = kstrtoull(attr->set_buf, 0, &val);
+ if (ret)
+ goto out;
+ ret = attr->set(attr->data, val);
+@@ -915,8 +918,21 @@ out:
+ mutex_unlock(&attr->mutex);
+ return ret;
+ }
++
++ssize_t simple_attr_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return simple_attr_write_xsigned(file, buf, len, ppos, false);
++}
+ EXPORT_SYMBOL_GPL(simple_attr_write);
+
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return simple_attr_write_xsigned(file, buf, len, ppos, true);
++}
++EXPORT_SYMBOL_GPL(simple_attr_write_signed);
++
+ /**
+ * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
+ * @sb: filesystem to do the file handle conversion on
+diff --git a/fs/mbcache.c b/fs/mbcache.c
+index 97c54d3a22276..95b047256d093 100644
+--- a/fs/mbcache.c
++++ b/fs/mbcache.c
+@@ -11,7 +11,7 @@
+ /*
+ * Mbcache is a simple key-value store. Keys need not be unique, however
+ * key-value pairs are expected to be unique (we use this fact in
+- * mb_cache_entry_delete()).
++ * mb_cache_entry_delete_or_get()).
+ *
+ * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
+ * Ext4 also uses it for deduplication of xattr values stored in inodes.
+@@ -90,12 +90,19 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->e_list);
+- /* One ref for hash, one ref returned */
+- atomic_set(&entry->e_refcnt, 1);
++ /*
++ * We create entry with two references. One reference is kept by the
++ * hash table, the other reference is used to protect us from
++ * mb_cache_entry_delete_or_get() until the entry is fully setup. This
++ * avoids nesting of cache->c_list_lock into hash table bit locks which
++ * is problematic for RT.
++ */
++ atomic_set(&entry->e_refcnt, 2);
+ entry->e_key = key;
+ entry->e_value = value;
+- entry->e_reusable = reusable;
+- entry->e_referenced = 0;
++ entry->e_flags = 0;
++ if (reusable)
++ set_bit(MBE_REUSABLE_B, &entry->e_flags);
+ head = mb_cache_entry_head(cache, key);
+ hlist_bl_lock(head);
+ hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
+@@ -107,24 +114,41 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ }
+ hlist_bl_add_head(&entry->e_hash_list, head);
+ hlist_bl_unlock(head);
+-
+ spin_lock(&cache->c_list_lock);
+ list_add_tail(&entry->e_list, &cache->c_list);
+- /* Grab ref for LRU list */
+- atomic_inc(&entry->e_refcnt);
+ cache->c_entry_count++;
+ spin_unlock(&cache->c_list_lock);
++ mb_cache_entry_put(cache, entry);
+
+ return 0;
+ }
+ EXPORT_SYMBOL(mb_cache_entry_create);
+
+-void __mb_cache_entry_free(struct mb_cache_entry *entry)
++void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
+ {
++ struct hlist_bl_head *head;
++
++ head = mb_cache_entry_head(cache, entry->e_key);
++ hlist_bl_lock(head);
++ hlist_bl_del(&entry->e_hash_list);
++ hlist_bl_unlock(head);
+ kmem_cache_free(mb_entry_cache, entry);
+ }
+ EXPORT_SYMBOL(__mb_cache_entry_free);
+
++/*
++ * mb_cache_entry_wait_unused - wait to be the last user of the entry
++ *
++ * @entry - entry to work on
++ *
++ * Wait to be the last user of the entry.
++ */
++void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
++{
++ wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
++}
++EXPORT_SYMBOL(mb_cache_entry_wait_unused);
++
+ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
+ struct mb_cache_entry *entry,
+ u32 key)
+@@ -142,10 +166,10 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
+ while (node) {
+ entry = hlist_bl_entry(node, struct mb_cache_entry,
+ e_hash_list);
+- if (entry->e_key == key && entry->e_reusable) {
+- atomic_inc(&entry->e_refcnt);
++ if (entry->e_key == key &&
++ test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
++ atomic_inc_not_zero(&entry->e_refcnt))
+ goto out;
+- }
+ node = node->next;
+ }
+ entry = NULL;
+@@ -205,10 +229,9 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
+ head = mb_cache_entry_head(cache, key);
+ hlist_bl_lock(head);
+ hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
+- if (entry->e_key == key && entry->e_value == value) {
+- atomic_inc(&entry->e_refcnt);
++ if (entry->e_key == key && entry->e_value == value &&
++ atomic_inc_not_zero(&entry->e_refcnt))
+ goto out;
+- }
+ }
+ entry = NULL;
+ out:
+@@ -217,7 +240,7 @@ out:
+ }
+ EXPORT_SYMBOL(mb_cache_entry_get);
+
+-/* mb_cache_entry_delete - remove a cache entry
++/* mb_cache_entry_delete - try to remove a cache entry
+ * @cache - cache we work with
+ * @key - key
+ * @value - value
+@@ -254,6 +277,43 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
+ }
+ EXPORT_SYMBOL(mb_cache_entry_delete);
+
++/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
++ * @cache - cache we work with
++ * @key - key
++ * @value - value
++ *
++ * Remove entry from cache @cache with key @key and value @value. The removal
++ * happens only if the entry is unused. The function returns NULL in case the
++ * entry was successfully removed or there's no entry in cache. Otherwise the
++ * function grabs reference of the entry that we failed to delete because it
++ * still has users and return it.
++ */
++struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
++ u32 key, u64 value)
++{
++ struct mb_cache_entry *entry;
++
++ entry = mb_cache_entry_get(cache, key, value);
++ if (!entry)
++ return NULL;
++
++ /*
++ * Drop the ref we got from mb_cache_entry_get() and the initial hash
++ * ref if we are the last user
++ */
++ if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
++ return entry;
++
++ spin_lock(&cache->c_list_lock);
++ if (!list_empty(&entry->e_list))
++ list_del_init(&entry->e_list);
++ cache->c_entry_count--;
++ spin_unlock(&cache->c_list_lock);
++ __mb_cache_entry_free(cache, entry);
++ return NULL;
++}
++EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
++
+ /* mb_cache_entry_touch - cache entry got used
+ * @cache - cache the entry belongs to
+ * @entry - entry that got used
+@@ -263,7 +323,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete);
+ void mb_cache_entry_touch(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
+ {
+- entry->e_referenced = 1;
++ set_bit(MBE_REFERENCED_B, &entry->e_flags);
+ }
+ EXPORT_SYMBOL(mb_cache_entry_touch);
+
+@@ -281,34 +341,24 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
+ unsigned long nr_to_scan)
+ {
+ struct mb_cache_entry *entry;
+- struct hlist_bl_head *head;
+ unsigned long shrunk = 0;
+
+ spin_lock(&cache->c_list_lock);
+ while (nr_to_scan-- && !list_empty(&cache->c_list)) {
+ entry = list_first_entry(&cache->c_list,
+ struct mb_cache_entry, e_list);
+- if (entry->e_referenced) {
+- entry->e_referenced = 0;
++ /* Drop initial hash reference if there is no user */
++ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
++ atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
++ clear_bit(MBE_REFERENCED_B, &entry->e_flags);
+ list_move_tail(&entry->e_list, &cache->c_list);
+ continue;
+ }
+ list_del_init(&entry->e_list);
+ cache->c_entry_count--;
+- /*
+- * We keep LRU list reference so that entry doesn't go away
+- * from under us.
+- */
+ spin_unlock(&cache->c_list_lock);
+- head = mb_cache_entry_head(cache, entry->e_key);
+- hlist_bl_lock(head);
+- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
+- hlist_bl_del_init(&entry->e_hash_list);
+- atomic_dec(&entry->e_refcnt);
+- }
+- hlist_bl_unlock(head);
+- if (mb_cache_entry_put(cache, entry))
+- shrunk++;
++ __mb_cache_entry_free(cache, entry);
++ shrunk++;
+ cond_resched();
+ spin_lock(&cache->c_list_lock);
+ }
+@@ -400,11 +450,6 @@ void mb_cache_destroy(struct mb_cache *cache)
+ * point.
+ */
+ list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
+- if (!hlist_bl_unhashed(&entry->e_hash_list)) {
+- hlist_bl_del_init(&entry->e_hash_list);
+- atomic_dec(&entry->e_refcnt);
+- } else
+- WARN_ON(1);
+ list_del(&entry->e_list);
+ WARN_ON(atomic_read(&entry->e_refcnt) != 1);
+ mb_cache_entry_put(cache, entry);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 77c2c88621bee..a76550d927e7c 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -121,6 +121,11 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
+ if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
+ return NULL;
+
++ label->lfs = 0;
++ label->pi = 0;
++ label->len = 0;
++ label->label = NULL;
++
+ err = security_dentry_init_security(dentry, sattr->ia_mode,
+ &dentry->d_name, (void **)&label->label, &label->len);
+ if (err == 0)
+@@ -2080,18 +2085,18 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
+ }
+
+ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
+- fmode_t fmode)
++ fmode_t fmode)
+ {
+ struct nfs4_state *newstate;
++ struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
++ int openflags = opendata->o_arg.open_flags;
+ int ret;
+
+ if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
+ return 0;
+- opendata->o_arg.open_flags = 0;
+ opendata->o_arg.fmode = fmode;
+- opendata->o_arg.share_access = nfs4_map_atomic_open_share(
+- NFS_SB(opendata->dentry->d_sb),
+- fmode, 0);
++ opendata->o_arg.share_access =
++ nfs4_map_atomic_open_share(server, fmode, openflags);
+ memset(&opendata->o_res, 0, sizeof(opendata->o_res));
+ memset(&opendata->c_res, 0, sizeof(opendata->c_res));
+ nfs4_init_opendata_res(opendata);
+@@ -2666,10 +2671,15 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
+ struct nfs4_opendata *opendata;
+ int ret;
+
+- opendata = nfs4_open_recoverdata_alloc(ctx, state,
+- NFS4_OPEN_CLAIM_FH);
++ opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
++ /*
++ * We're not recovering a delegation, so ask for no delegation.
++ * Otherwise the recovery thread could deadlock with an outstanding
++ * delegation return.
++ */
++ opendata->o_arg.open_flags = O_DIRECT;
+ ret = nfs4_open_recover(opendata, state);
+ if (ret == -ESTALE)
+ d_drop(ctx->dentry);
+@@ -3742,7 +3752,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
+ int open_flags, struct iattr *attr, int *opened)
+ {
+ struct nfs4_state *state;
+- struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
++ struct nfs4_label l, *label;
+
+ label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
+
+@@ -4497,7 +4507,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ int flags)
+ {
+ struct nfs_server *server = NFS_SERVER(dir);
+- struct nfs4_label l, *ilabel = NULL;
++ struct nfs4_label l, *ilabel;
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
+ int status = 0;
+@@ -4850,7 +4860,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -4891,7 +4901,7 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+@@ -5012,7 +5022,7 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+- struct nfs4_label l, *label = NULL;
++ struct nfs4_label l, *label;
+ int err;
+
+ label = nfs4_label_init_security(dir, dentry, sattr, &l);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index c60b3a1f6d2bc..2ee30ffeb6b97 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1224,6 +1224,8 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
+ if (IS_ERR(task)) {
+ printk(KERN_ERR "%s: kthread_run: %ld\n",
+ __func__, PTR_ERR(task));
++ if (!nfs_client_init_is_complete(clp))
++ nfs_mark_client_ready(clp, PTR_ERR(task));
+ nfs4_clear_state_manager_bit(clp);
+ nfs_put_client(clp);
+ module_put(THIS_MODULE);
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 2b7741fe42ead..a3592becae4a5 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4169,19 +4169,17 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return -EIO;
++ bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ if (len < NFS4_MAXLABELLEN) {
+- if (label) {
+- if (label->len) {
+- if (label->len < len)
+- return -ERANGE;
+- memcpy(label->label, p, len);
+- }
++ if (label && label->len) {
++ if (label->len < len)
++ return -ERANGE;
++ memcpy(label->label, p, len);
+ label->len = len;
+ label->pi = pi;
+ label->lfs = lfs;
+ status = NFS_ATTR_FATTR_V4_SECURITY_LABEL;
+ }
+- bitmap[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ } else
+ printk(KERN_WARNING "%s: label too long (%u)!\n",
+ __func__, len);
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 3c50d18fe8a9b..ffc2b838b123c 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -880,7 +880,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ } else {
+ if (!conn->cb_xprt)
+ return -EINVAL;
+- clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ clp->cl_cb_session = ses;
+ args.bc_xprt = conn->cb_xprt;
+ args.prognumber = clp->cl_cb_session->se_cb_prog;
+@@ -900,6 +899,9 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ rpc_shutdown_client(client);
+ return -ENOMEM;
+ }
++
++ if (clp->cl_minorversion != 0)
++ clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+ clp->cl_cb_client = client;
+ clp->cl_cb_cred = cred;
+ return 0;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 228c2b0753dcf..de2c3809d15aa 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -508,15 +508,26 @@ find_any_file(struct nfs4_file *f)
+ return ret;
+ }
+
+-static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
++static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
+ {
+- struct nfsd_file *ret = NULL;
++ lockdep_assert_held(&f->fi_lock);
++
++ if (f->fi_fds[O_RDWR])
++ return f->fi_fds[O_RDWR];
++ if (f->fi_fds[O_WRONLY])
++ return f->fi_fds[O_WRONLY];
++ if (f->fi_fds[O_RDONLY])
++ return f->fi_fds[O_RDONLY];
++ return NULL;
++}
++
++static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
++{
++ lockdep_assert_held(&f->fi_lock);
+
+- spin_lock(&f->fi_lock);
+ if (f->fi_deleg_file)
+- ret = nfsd_file_get(f->fi_deleg_file);
+- spin_unlock(&f->fi_lock);
+- return ret;
++ return f->fi_deleg_file;
++ return NULL;
+ }
+
+ static atomic_long_t num_delegations;
+@@ -2402,9 +2413,11 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ ols = openlockstateid(st);
+ oo = ols->st_stateowner;
+ nf = st->sc_file;
+- file = find_any_file(nf);
++
++ spin_lock(&nf->fi_lock);
++ file = find_any_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
+
+@@ -2422,8 +2435,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
+ seq_printf(s, ", ");
+ nfs4_show_owner(s, oo);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+@@ -2437,9 +2450,10 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ ols = openlockstateid(st);
+ oo = ols->st_stateowner;
+ nf = st->sc_file;
+- file = find_any_file(nf);
++ spin_lock(&nf->fi_lock);
++ file = find_any_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
+
+@@ -2455,8 +2469,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
+ seq_printf(s, ", ");
+ nfs4_show_owner(s, oo);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+@@ -2468,9 +2482,10 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+
+ ds = delegstateid(st);
+ nf = st->sc_file;
+- file = find_deleg_file(nf);
++ spin_lock(&nf->fi_lock);
++ file = find_deleg_file_locked(nf);
+ if (!file)
+- return 0;
++ goto out;
+
+ seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
+
+@@ -2482,8 +2497,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
+
+ nfs4_show_superblock(s, file);
+ seq_printf(s, " }\n");
+- nfsd_file_put(file);
+-
++out:
++ spin_unlock(&nf->fi_lock);
+ return 0;
+ }
+
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 95bbe9d4018aa..a4f2c0cc6a49e 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3109,6 +3109,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+ case nfserr_noent:
+ xdr_truncate_encode(xdr, start_offset);
+ goto skip_entry;
++ case nfserr_jukebox:
++ /*
++ * The pseudoroot should only display dentries that lead to
++ * exports. If we get EJUKEBOX here, then we can't tell whether
++ * this entry should be included. Just fail the whole READDIR
++ * with NFS4ERR_DELAY in that case, and hope that the situation
++ * will resolve itself by the client's next attempt.
++ */
++ if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
++ goto fail;
++ fallthrough;
+ default:
+ /*
+ * If the client requested the RDATTR_ERROR attribute,
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 670e97dd67f06..80c90fc231a53 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -20,8 +20,7 @@
+
+ #include "nfsd.h"
+ #include "cache.h"
+-
+-#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
++#include "trace.h"
+
+ /*
+ * We use this value to determine the number of hash buckets from the max
+@@ -324,8 +323,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
+ const struct svc_cacherep *rp, struct nfsd_net *nn)
+ {
+ if (key->c_key.k_xid == rp->c_key.k_xid &&
+- key->c_key.k_csum != rp->c_key.k_csum)
++ key->c_key.k_csum != rp->c_key.k_csum) {
+ ++nn->payload_misses;
++ trace_nfsd_drc_mismatch(nn, key, rp);
++ }
+
+ return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
+ }
+@@ -378,15 +379,22 @@ out:
+ return ret;
+ }
+
+-/*
++/**
++ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
++ * @rqstp: Incoming Call to find
++ *
+ * Try to find an entry matching the current call in the cache. When none
+ * is found, we try to grab the oldest expired entry off the LRU list. If
+ * a suitable one isn't there, then drop the cache_lock and allocate a
+ * new one, then search again in case one got inserted while this thread
+ * didn't hold the lock.
++ *
++ * Return values:
++ * %RC_DOIT: Process the request normally
++ * %RC_REPLY: Reply from cache
++ * %RC_DROPIT: Do not process the request further
+ */
+-int
+-nfsd_cache_lookup(struct svc_rqst *rqstp)
++int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ {
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct svc_cacherep *rp, *found;
+@@ -400,7 +408,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+ rqstp->rq_cacherep = NULL;
+ if (type == RC_NOCACHE) {
+ nfsdstats.rcnocache++;
+- return rtn;
++ goto out;
+ }
+
+ csum = nfsd_cache_csum(rqstp);
+@@ -410,10 +418,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+ * preallocate an entry.
+ */
+ rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
+- if (!rp) {
+- dprintk("nfsd: unable to allocate DRC entry!\n");
+- return rtn;
+- }
++ if (!rp)
++ goto out;
+
+ spin_lock(&b->cache_lock);
+ found = nfsd_cache_insert(b, rp, nn);
+@@ -432,8 +438,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
+
+ /* go ahead and prune the cache */
+ prune_bucket(b, nn);
+- out:
++
++out_unlock:
+ spin_unlock(&b->cache_lock);
++out:
+ return rtn;
+
+ found_entry:
+@@ -443,13 +451,13 @@ found_entry:
+
+ /* Request being processed */
+ if (rp->c_state == RC_INPROG)
+- goto out;
++ goto out_trace;
+
+ /* From the hall of fame of impractical attacks:
+ * Is this a user who tries to snoop on the cache? */
+ rtn = RC_DOIT;
+ if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
+- goto out;
++ goto out_trace;
+
+ /* Compose RPC reply header */
+ switch (rp->c_type) {
+@@ -461,20 +469,26 @@ found_entry:
+ break;
+ case RC_REPLBUFF:
+ if (!nfsd_cache_append(rqstp, &rp->c_replvec))
+- goto out; /* should not happen */
++ goto out_unlock; /* should not happen */
+ rtn = RC_REPLY;
+ break;
+ default:
+ WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
+ }
+
+- goto out;
++out_trace:
++ trace_nfsd_drc_found(nn, rqstp, rtn);
++ goto out_unlock;
+ }
+
+-/*
+- * Update a cache entry. This is called from nfsd_dispatch when
+- * the procedure has been executed and the complete reply is in
+- * rqstp->rq_res.
++/**
++ * nfsd_cache_update - Update an entry in the duplicate reply cache.
++ * @rqstp: svc_rqst with a finished Reply
++ * @cachetype: which cache to update
++ * @statp: Reply's status code
++ *
++ * This is called from nfsd_dispatch when the procedure has been
++ * executed and the complete reply is in rqstp->rq_res.
+ *
+ * We're copying around data here rather than swapping buffers because
+ * the toplevel loop requires max-sized buffers, which would be a waste
+@@ -487,8 +501,7 @@ found_entry:
+ * nfsd failed to encode a reply that otherwise would have been cached.
+ * In this case, nfsd_cache_update is called with statp == NULL.
+ */
+-void
+-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
++void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+ {
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct svc_cacherep *rp = rqstp->rq_cacherep;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index d63cdda1782d4..70684c7ae94bd 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -420,8 +420,8 @@ static void nfsd_shutdown_net(struct net *net)
+ {
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+- nfsd_file_cache_shutdown_net(net);
+ nfs4_state_shutdown_net(net);
++ nfsd_file_cache_shutdown_net(net);
+ if (nn->lockd_up) {
+ lockd_down(net);
+ nn->lockd_up = 0;
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 127db5351d016..9d37d09d7ca85 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -166,6 +166,12 @@ DEFINE_STATEID_EVENT(layout_recall_done);
+ DEFINE_STATEID_EVENT(layout_recall_fail);
+ DEFINE_STATEID_EVENT(layout_recall_release);
+
++TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
++TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
++TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
++TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_WRITE);
++TRACE_DEFINE_ENUM(NFSD_FILE_REFERENCED);
++
+ #define show_nf_flags(val) \
+ __print_flags(val, "|", \
+ { 1 << NFSD_FILE_HASHED, "HASHED" }, \
+@@ -304,6 +310,65 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
+ __entry->nlink, __entry->mode, __entry->mask)
+ );
+
++#include "cache.h"
++
++TRACE_DEFINE_ENUM(RC_DROPIT);
++TRACE_DEFINE_ENUM(RC_REPLY);
++TRACE_DEFINE_ENUM(RC_DOIT);
++
++#define show_drc_retval(x) \
++ __print_symbolic(x, \
++ { RC_DROPIT, "DROPIT" }, \
++ { RC_REPLY, "REPLY" }, \
++ { RC_DOIT, "DOIT" })
++
++TRACE_EVENT(nfsd_drc_found,
++ TP_PROTO(
++ const struct nfsd_net *nn,
++ const struct svc_rqst *rqstp,
++ int result
++ ),
++ TP_ARGS(nn, rqstp, result),
++ TP_STRUCT__entry(
++ __field(unsigned long long, boot_time)
++ __field(unsigned long, result)
++ __field(u32, xid)
++ ),
++ TP_fast_assign(
++ __entry->boot_time = nn->boot_time;
++ __entry->result = result;
++ __entry->xid = be32_to_cpu(rqstp->rq_xid);
++ ),
++ TP_printk("boot_time=%16llx xid=0x%08x result=%s",
++ __entry->boot_time, __entry->xid,
++ show_drc_retval(__entry->result))
++
++);
++
++TRACE_EVENT(nfsd_drc_mismatch,
++ TP_PROTO(
++ const struct nfsd_net *nn,
++ const struct svc_cacherep *key,
++ const struct svc_cacherep *rp
++ ),
++ TP_ARGS(nn, key, rp),
++ TP_STRUCT__entry(
++ __field(unsigned long long, boot_time)
++ __field(u32, xid)
++ __field(u32, cached)
++ __field(u32, ingress)
++ ),
++ TP_fast_assign(
++ __entry->boot_time = nn->boot_time;
++ __entry->xid = be32_to_cpu(key->c_key.k_xid);
++ __entry->cached = (__force u32)key->c_key.k_csum;
++ __entry->ingress = (__force u32)rp->c_key.k_csum;
++ ),
++ TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
++ __entry->boot_time, __entry->xid, __entry->cached,
++ __entry->ingress)
++);
++
+ #endif /* _NFSD_TRACE_H */
+
+ #undef TRACE_INCLUDE_PATH
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index fb61c33c60045..74ef3d313686f 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -13,6 +13,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/random.h>
++#include <linux/log2.h>
+ #include <linux/crc32.h>
+ #include "nilfs.h"
+ #include "segment.h"
+@@ -448,11 +449,33 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+ return crc == le32_to_cpu(sbp->s_sum);
+ }
+
+-static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
++/**
++ * nilfs_sb2_bad_offset - check the location of the second superblock
++ * @sbp: superblock raw data buffer
++ * @offset: byte offset of second superblock calculated from device size
++ *
++ * nilfs_sb2_bad_offset() checks if the position on the second
++ * superblock is valid or not based on the filesystem parameters
++ * stored in @sbp. If @offset points to a location within the segment
++ * area, or if the parameters themselves are not normal, it is
++ * determined to be invalid.
++ *
++ * Return Value: true if invalid, false if valid.
++ */
++static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
+ {
+- return offset < ((le64_to_cpu(sbp->s_nsegments) *
+- le32_to_cpu(sbp->s_blocks_per_segment)) <<
+- (le32_to_cpu(sbp->s_log_block_size) + 10));
++ unsigned int shift_bits = le32_to_cpu(sbp->s_log_block_size);
++ u32 blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
++ u64 nsegments = le64_to_cpu(sbp->s_nsegments);
++ u64 index;
++
++ if (blocks_per_segment < NILFS_SEG_MIN_BLOCKS ||
++ shift_bits > ilog2(NILFS_MAX_BLOCK_SIZE) - BLOCK_SIZE_BITS)
++ return true;
++
++ index = offset >> (shift_bits + BLOCK_SIZE_BITS);
++ do_div(index, blocks_per_segment);
++ return index < nsegments;
+ }
+
+ static void nilfs_release_super_block(struct the_nilfs *nilfs)
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 207ec61569ea4..bcc4b5d3e54ef 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3396,10 +3396,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
+ ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
+ ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
+
+- ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+- osb->cconn = NULL;
++ if (osb->cconn) {
++ ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
++ osb->cconn = NULL;
+
+- ocfs2_dlm_shutdown_debug(osb);
++ ocfs2_dlm_shutdown_debug(osb);
++ }
+ }
+
+ static int ocfs2_drop_lock(struct ocfs2_super *osb,
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 900e4ef686bfc..da95ed79c12e9 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -159,7 +159,7 @@ static void ocfs2_queue_replay_slots(struct ocfs2_super *osb,
+ replay_map->rm_state = REPLAY_DONE;
+ }
+
+-static void ocfs2_free_replay_slots(struct ocfs2_super *osb)
++void ocfs2_free_replay_slots(struct ocfs2_super *osb)
+ {
+ struct ocfs2_replay_map *replay_map = osb->replay_map;
+
+diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
+index bfe611ed1b1d7..eb7a21bac71ef 100644
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -152,6 +152,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb);
+ void ocfs2_recovery_exit(struct ocfs2_super *osb);
+
+ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
++void ocfs2_free_replay_slots(struct ocfs2_super *osb);
+ /*
+ * Journal Control:
+ * Initialize, Load, Shutdown, Wipe a journal.
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index 1880387601361..9f0326672af6f 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -707,6 +707,8 @@ static struct ctl_table_header *ocfs2_table_header;
+
+ static int __init ocfs2_stack_glue_init(void)
+ {
++ int ret;
++
+ strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+
+ ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
+@@ -716,7 +718,11 @@ static int __init ocfs2_stack_glue_init(void)
+ return -ENOMEM; /* or something. */
+ }
+
+- return ocfs2_sysfs_init();
++ ret = ocfs2_sysfs_init();
++ if (ret)
++ unregister_sysctl_table(ocfs2_table_header);
++
++ return ret;
+ }
+
+ static void __exit ocfs2_stack_glue_exit(void)
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index c1cf67b24c19b..fb4104cb1c5b7 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -984,28 +984,27 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+
+ if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
+ status = -EINVAL;
+- goto read_super_error;
++ goto out;
+ }
+
+ /* probe for superblock */
+ status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
+ if (status < 0) {
+ mlog(ML_ERROR, "superblock probe failed!\n");
+- goto read_super_error;
++ goto out;
+ }
+
+ status = ocfs2_initialize_super(sb, bh, sector_size, &stats);
+- osb = OCFS2_SB(sb);
+- if (status < 0) {
+- mlog_errno(status);
+- goto read_super_error;
+- }
+ brelse(bh);
+ bh = NULL;
++ if (status < 0)
++ goto out;
++
++ osb = OCFS2_SB(sb);
+
+ if (!ocfs2_check_set_options(sb, &parsed_options)) {
+ status = -EINVAL;
+- goto read_super_error;
++ goto out_super;
+ }
+ osb->s_mount_opt = parsed_options.mount_opt;
+ osb->s_atime_quantum = parsed_options.atime_quantum;
+@@ -1022,7 +1021,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+
+ status = ocfs2_verify_userspace_stack(osb, &parsed_options);
+ if (status)
+- goto read_super_error;
++ goto out_super;
+
+ sb->s_magic = OCFS2_SUPER_MAGIC;
+
+@@ -1036,7 +1035,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ status = -EACCES;
+ mlog(ML_ERROR, "Readonly device detected but readonly "
+ "mount was not specified.\n");
+- goto read_super_error;
++ goto out_super;
+ }
+
+ /* You should not be able to start a local heartbeat
+@@ -1045,7 +1044,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ status = -EROFS;
+ mlog(ML_ERROR, "Local heartbeat specified on readonly "
+ "device.\n");
+- goto read_super_error;
++ goto out_super;
+ }
+
+ status = ocfs2_check_journals_nolocks(osb);
+@@ -1054,9 +1053,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ mlog(ML_ERROR, "Recovery required on readonly "
+ "file system, but write access is "
+ "unavailable.\n");
+- else
+- mlog_errno(status);
+- goto read_super_error;
++ goto out_super;
+ }
+
+ ocfs2_set_ro_flag(osb, 1);
+@@ -1072,10 +1069,8 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ status = ocfs2_verify_heartbeat(osb);
+- if (status < 0) {
+- mlog_errno(status);
+- goto read_super_error;
+- }
++ if (status < 0)
++ goto out_super;
+
+ osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
+ ocfs2_debugfs_root);
+@@ -1089,15 +1084,14 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+
+ status = ocfs2_mount_volume(sb);
+ if (status < 0)
+- goto read_super_error;
++ goto out_debugfs;
+
+ if (osb->root_inode)
+ inode = igrab(osb->root_inode);
+
+ if (!inode) {
+ status = -EIO;
+- mlog_errno(status);
+- goto read_super_error;
++ goto out_dismount;
+ }
+
+ osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL,
+@@ -1105,7 +1099,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ if (!osb->osb_dev_kset) {
+ status = -ENOMEM;
+ mlog(ML_ERROR, "Unable to create device kset %s.\n", sb->s_id);
+- goto read_super_error;
++ goto out_dismount;
+ }
+
+ /* Create filecheck sysfs related directories/files at
+@@ -1114,14 +1108,13 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+ status = -ENOMEM;
+ mlog(ML_ERROR, "Unable to create filecheck sysfs directory at "
+ "/sys/fs/ocfs2/%s/filecheck.\n", sb->s_id);
+- goto read_super_error;
++ goto out_dismount;
+ }
+
+ root = d_make_root(inode);
+ if (!root) {
+ status = -ENOMEM;
+- mlog_errno(status);
+- goto read_super_error;
++ goto out_dismount;
+ }
+
+ sb->s_root = root;
+@@ -1168,17 +1161,22 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+
+ return status;
+
+-read_super_error:
+- brelse(bh);
+-
+- if (status)
+- mlog_errno(status);
++out_dismount:
++ atomic_set(&osb->vol_state, VOLUME_DISABLED);
++ wake_up(&osb->osb_mount_event);
++ ocfs2_free_replay_slots(osb);
++ ocfs2_dismount_volume(sb, 1);
++ goto out;
+
+- if (osb) {
+- atomic_set(&osb->vol_state, VOLUME_DISABLED);
+- wake_up(&osb->osb_mount_event);
+- ocfs2_dismount_volume(sb, 1);
+- }
++out_debugfs:
++ debugfs_remove_recursive(osb->osb_debug_root);
++out_super:
++ ocfs2_release_system_inodes(osb);
++ kfree(osb->recovery_map);
++ ocfs2_delete_osb(osb);
++ kfree(osb);
++out:
++ mlog_errno(status);
+
+ return status;
+ }
+@@ -1787,11 +1785,10 @@ static int ocfs2_get_sector(struct super_block *sb,
+ static int ocfs2_mount_volume(struct super_block *sb)
+ {
+ int status = 0;
+- int unlock_super = 0;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ if (ocfs2_is_hard_readonly(osb))
+- goto leave;
++ goto out;
+
+ mutex_init(&osb->obs_trim_fs_mutex);
+
+@@ -1801,44 +1798,58 @@ static int ocfs2_mount_volume(struct super_block *sb)
+ if (status == -EBADR && ocfs2_userspace_stack(osb))
+ mlog(ML_ERROR, "couldn't mount because cluster name on"
+ " disk does not match the running cluster name.\n");
+- goto leave;
++ goto out;
+ }
+
+ status = ocfs2_super_lock(osb, 1);
+ if (status < 0) {
+ mlog_errno(status);
+- goto leave;
++ goto out_dlm;
+ }
+- unlock_super = 1;
+
+ /* This will load up the node map and add ourselves to it. */
+ status = ocfs2_find_slot(osb);
+ if (status < 0) {
+ mlog_errno(status);
+- goto leave;
++ goto out_super_lock;
+ }
+
+ /* load all node-local system inodes */
+ status = ocfs2_init_local_system_inodes(osb);
+ if (status < 0) {
+ mlog_errno(status);
+- goto leave;
++ goto out_super_lock;
+ }
+
+ status = ocfs2_check_volume(osb);
+ if (status < 0) {
+ mlog_errno(status);
+- goto leave;
++ goto out_system_inodes;
+ }
+
+ status = ocfs2_truncate_log_init(osb);
+- if (status < 0)
++ if (status < 0) {
+ mlog_errno(status);
++ goto out_check_volume;
++ }
+
+-leave:
+- if (unlock_super)
+- ocfs2_super_unlock(osb, 1);
++ ocfs2_super_unlock(osb, 1);
++ return 0;
+
++out_check_volume:
++ ocfs2_free_replay_slots(osb);
++out_system_inodes:
++ if (osb->local_alloc_state == OCFS2_LA_ENABLED)
++ ocfs2_shutdown_local_alloc(osb);
++ ocfs2_release_system_inodes(osb);
++ /* before journal shutdown, we should release slot_info */
++ ocfs2_free_slot_info(osb);
++ ocfs2_journal_shutdown(osb);
++out_super_lock:
++ ocfs2_super_unlock(osb, 1);
++out_dlm:
++ ocfs2_dlm_shutdown(osb, 0);
++out:
+ return status;
+ }
+
+@@ -1911,8 +1922,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
+ !ocfs2_is_hard_readonly(osb))
+ hangup_needed = 1;
+
+- if (osb->cconn)
+- ocfs2_dlm_shutdown(osb, hangup_needed);
++ ocfs2_dlm_shutdown(osb, hangup_needed);
+
+ ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
+ debugfs_remove_recursive(osb->osb_debug_root);
+diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
+index 29eaa45443727..1b508f5433846 100644
+--- a/fs/orangefs/orangefs-debugfs.c
++++ b/fs/orangefs/orangefs-debugfs.c
+@@ -194,15 +194,10 @@ void orangefs_debugfs_init(int debug_mask)
+ */
+ static void orangefs_kernel_debug_init(void)
+ {
+- int rc = -ENOMEM;
+- char *k_buffer = NULL;
++ static char k_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+- k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+- if (!k_buffer)
+- goto out;
+-
+ if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ strcpy(k_buffer, kernel_debug_string);
+ strcat(k_buffer, "\n");
+@@ -213,15 +208,14 @@ static void orangefs_kernel_debug_init(void)
+
+ debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer,
+ &kernel_debug_fops);
+-
+-out:
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+ }
+
+
+ void orangefs_debugfs_cleanup(void)
+ {
+ debugfs_remove_recursive(debug_dir);
++ kfree(debug_help_string);
++ debug_help_string = NULL;
+ }
+
+ /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
+@@ -297,18 +291,13 @@ static int help_show(struct seq_file *m, void *v)
+ /*
+ * initialize the client-debug file.
+ */
+-static int orangefs_client_debug_init(void)
++static void orangefs_client_debug_init(void)
+ {
+
+- int rc = -ENOMEM;
+- char *c_buffer = NULL;
++ static char c_buffer[ORANGEFS_MAX_DEBUG_STRING_LEN] = { };
+
+ gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+- c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+- if (!c_buffer)
+- goto out;
+-
+ if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+ strcpy(c_buffer, client_debug_string);
+ strcat(c_buffer, "\n");
+@@ -322,13 +311,6 @@ static int orangefs_client_debug_init(void)
+ debug_dir,
+ c_buffer,
+ &kernel_debug_fops);
+-
+- rc = 0;
+-
+-out:
+-
+- gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+- return rc;
+ }
+
+ /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
+@@ -671,6 +653,7 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
+ memset(debug_help_string, 0, DEBUG_HELP_STRING_SIZE);
+ strlcat(debug_help_string, new, string_size);
+ mutex_unlock(&orangefs_help_file_lock);
++ kfree(new);
+ }
+
+ rc = 0;
+diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
+index c010c1fddafc1..6aa7a23b04dfd 100644
+--- a/fs/orangefs/orangefs-mod.c
++++ b/fs/orangefs/orangefs-mod.c
+@@ -141,7 +141,7 @@ static int __init orangefs_init(void)
+ gossip_err("%s: could not initialize device subsystem %d!\n",
+ __func__,
+ ret);
+- goto cleanup_device;
++ goto cleanup_sysfs;
+ }
+
+ ret = register_filesystem(&orangefs_fs_type);
+@@ -153,11 +153,11 @@ static int __init orangefs_init(void)
+ goto out;
+ }
+
+- orangefs_sysfs_exit();
+-
+-cleanup_device:
+ orangefs_dev_cleanup();
+
++cleanup_sysfs:
++ orangefs_sysfs_exit();
++
+ sysfs_init_failed:
+ orangefs_debugfs_cleanup();
+
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 8c89eaea1583d..fe88ac18ad934 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -557,28 +557,42 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
+ goto out_revert_creds;
+ }
+
+- err = -ENOMEM;
+- override_cred = prepare_creds();
+- if (override_cred) {
++ if (!attr->hardlink) {
++ err = -ENOMEM;
++ override_cred = prepare_creds();
++ if (!override_cred)
++ goto out_revert_creds;
++ /*
++ * In the creation cases(create, mkdir, mknod, symlink),
++ * ovl should transfer current's fs{u,g}id to underlying
++ * fs. Because underlying fs want to initialize its new
++ * inode owner using current's fs{u,g}id. And in this
++ * case, the @inode is a new inode that is initialized
++ * in inode_init_owner() to current's fs{u,g}id. So use
++ * the inode's i_{u,g}id to override the cred's fs{u,g}id.
++ *
++ * But in the other hardlink case, ovl_link() does not
++ * create a new inode, so just use the ovl mounter's
++ * fs{u,g}id.
++ */
+ override_cred->fsuid = inode->i_uid;
+ override_cred->fsgid = inode->i_gid;
+- if (!attr->hardlink) {
+- err = security_dentry_create_files_as(dentry,
+- attr->mode, &dentry->d_name, old_cred,
+- override_cred);
+- if (err) {
+- put_cred(override_cred);
+- goto out_revert_creds;
+- }
++ err = security_dentry_create_files_as(dentry,
++ attr->mode, &dentry->d_name, old_cred,
++ override_cred);
++ if (err) {
++ put_cred(override_cred);
++ goto out_revert_creds;
+ }
+ put_cred(override_creds(override_cred));
+ put_cred(override_cred);
+-
+- if (!ovl_dentry_is_whiteout(dentry))
+- err = ovl_create_upper(dentry, inode, attr);
+- else
+- err = ovl_create_over_whiteout(dentry, inode, attr);
+ }
++
++ if (!ovl_dentry_is_whiteout(dentry))
++ err = ovl_create_upper(dentry, inode, attr);
++ else
++ err = ovl_create_over_whiteout(dentry, inode, attr);
++
+ out_revert_creds:
+ revert_creds(old_cred);
+ return err;
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 1106137c747a3..468e4e65a615d 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -244,7 +244,7 @@ static int propagate_one(struct mount *m)
+ }
+ do {
+ struct mount *parent = last_source->mnt_parent;
+- if (last_source == first_source)
++ if (peers(last_source, first_source))
+ break;
+ done = parent->mnt_master == p;
+ if (done && peers(n, parent))
+diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
+index 8f0369aad22af..9fe46cc26403c 100644
+--- a/fs/pstore/Kconfig
++++ b/fs/pstore/Kconfig
+@@ -118,6 +118,7 @@ config PSTORE_CONSOLE
+ config PSTORE_PMSG
+ bool "Log user space messages"
+ depends on PSTORE
++ select RT_MUTEXES
+ help
+ When the option is enabled, pstore will export a character
+ interface /dev/pmsg0 to log user space messages. On reboot
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
+index d8542ec2f38c6..18cf94b597e05 100644
+--- a/fs/pstore/pmsg.c
++++ b/fs/pstore/pmsg.c
+@@ -7,9 +7,10 @@
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
++#include <linux/rtmutex.h>
+ #include "internal.h"
+
+-static DEFINE_MUTEX(pmsg_lock);
++static DEFINE_RT_MUTEX(pmsg_lock);
+
+ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+@@ -28,9 +29,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ if (!access_ok(buf, count))
+ return -EFAULT;
+
+- mutex_lock(&pmsg_lock);
++ rt_mutex_lock(&pmsg_lock);
+ ret = psinfo->write_user(&record, buf);
+- mutex_unlock(&pmsg_lock);
++ rt_mutex_unlock(&pmsg_lock);
+ return ret ? ret : count;
+ }
+
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 013486b5125ed..65cbc8a60ca38 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -759,6 +759,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ /* Make sure we didn't get bogus platform data pointer. */
+ if (!pdata) {
+ pr_err("NULL platform data\n");
++ err = -EINVAL;
+ goto fail_out;
+ }
+
+@@ -766,6 +767,7 @@ static int ramoops_probe(struct platform_device *pdev)
+ !pdata->ftrace_size && !pdata->pmsg_size)) {
+ pr_err("The memory size and the record/console size must be "
+ "non-zero\n");
++ err = -EINVAL;
+ goto fail_out;
+ }
+
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 1f4d8c06f9be6..286340f312dcb 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -427,7 +427,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+- vaddr = vmap(pages, page_count, VM_MAP, prot);
++ /*
++ * VM_IOREMAP used here to bypass this region during vread()
++ * and kmap_atomic() (i.e. kcore) to avoid __va() failures.
++ */
++ vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot);
+ kfree(pages);
+
+ /*
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index dc5f8654b277d..1d652af48f0b1 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2306,28 +2306,62 @@ EXPORT_SYMBOL(dquot_quota_off);
+ * Turn quotas on on a device
+ */
+
+-/*
+- * Helper function to turn quotas on when we already have the inode of
+- * quota file and no quota information is loaded.
+- */
+-static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
++static int vfs_setup_quota_inode(struct inode *inode, int type)
++{
++ struct super_block *sb = inode->i_sb;
++ struct quota_info *dqopt = sb_dqopt(sb);
++
++ if (is_bad_inode(inode))
++ return -EUCLEAN;
++ if (!S_ISREG(inode->i_mode))
++ return -EACCES;
++ if (IS_RDONLY(inode))
++ return -EROFS;
++ if (sb_has_quota_loaded(sb, type))
++ return -EBUSY;
++
++ dqopt->files[type] = igrab(inode);
++ if (!dqopt->files[type])
++ return -EIO;
++ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
++ /* We don't want quota and atime on quota files (deadlocks
++ * possible) Also nobody should write to the file - we use
++ * special IO operations which ignore the immutable bit. */
++ inode_lock(inode);
++ inode->i_flags |= S_NOQUOTA;
++ inode_unlock(inode);
++ /*
++ * When S_NOQUOTA is set, remove dquot references as no more
++ * references can be added
++ */
++ __dquot_drop(inode);
++ }
++ return 0;
++}
++
++static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
++{
++ struct quota_info *dqopt = sb_dqopt(sb);
++ struct inode *inode = dqopt->files[type];
++
++ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
++ inode_lock(inode);
++ inode->i_flags &= ~S_NOQUOTA;
++ inode_unlock(inode);
++ }
++ dqopt->files[type] = NULL;
++ iput(inode);
++}
++
++int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+ {
+ struct quota_format_type *fmt = find_quota_format(format_id);
+- struct super_block *sb = inode->i_sb;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int error;
+
+ if (!fmt)
+ return -ESRCH;
+- if (!S_ISREG(inode->i_mode)) {
+- error = -EACCES;
+- goto out_fmt;
+- }
+- if (IS_RDONLY(inode)) {
+- error = -EROFS;
+- goto out_fmt;
+- }
+ if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
+ (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
+ error = -EINVAL;
+@@ -2359,27 +2393,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+ invalidate_bdev(sb->s_bdev);
+ }
+
+- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+- /* We don't want quota and atime on quota files (deadlocks
+- * possible) Also nobody should write to the file - we use
+- * special IO operations which ignore the immutable bit. */
+- inode_lock(inode);
+- inode->i_flags |= S_NOQUOTA;
+- inode_unlock(inode);
+- /*
+- * When S_NOQUOTA is set, remove dquot references as no more
+- * references can be added
+- */
+- __dquot_drop(inode);
+- }
+-
+- error = -EIO;
+- dqopt->files[type] = igrab(inode);
+- if (!dqopt->files[type])
+- goto out_file_flags;
+ error = -EINVAL;
+ if (!fmt->qf_ops->check_quota_file(sb, type))
+- goto out_file_init;
++ goto out_fmt;
+
+ dqopt->ops[type] = fmt->qf_ops;
+ dqopt->info[type].dqi_format = fmt;
+@@ -2387,7 +2403,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+ INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
+ error = dqopt->ops[type]->read_file_info(sb, type);
+ if (error < 0)
+- goto out_file_init;
++ goto out_fmt;
+ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
+ spin_lock(&dq_data_lock);
+ dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
+@@ -2402,18 +2418,30 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+ dquot_disable(sb, type, flags);
+
+ return error;
+-out_file_init:
+- dqopt->files[type] = NULL;
+- iput(inode);
+-out_file_flags:
+- inode_lock(inode);
+- inode->i_flags &= ~S_NOQUOTA;
+- inode_unlock(inode);
+ out_fmt:
+ put_quota_format(fmt);
+
+ return error;
+ }
++EXPORT_SYMBOL(dquot_load_quota_sb);
++
++/*
++ * Helper function to turn quotas on when we already have the inode of
++ * quota file and no quota information is loaded.
++ */
++static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
++ unsigned int flags)
++{
++ int err;
++
++ err = vfs_setup_quota_inode(inode, type);
++ if (err < 0)
++ return err;
++ err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
++ if (err < 0)
++ vfs_cleanup_quota_inode(inode->i_sb, type);
++ return err;
++}
+
+ /* Reenable quotas on remount RW */
+ int dquot_resume(struct super_block *sb, int type)
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index 959a066b7bb07..2843b7cf4d7af 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -695,6 +695,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
+
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -778,6 +779,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
+
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -876,6 +878,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
+ retval = journal_end(&th);
+ out_failed:
+ reiserfs_write_unlock(dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+@@ -1191,6 +1194,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
+ retval = journal_end(&th);
+ out_failed:
+ reiserfs_write_unlock(parent_dir->i_sb);
++ reiserfs_security_free(&security);
+ return retval;
+ }
+
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
+index 20be9a0e5870e..59d87f9f72fb4 100644
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -49,6 +49,7 @@ int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ int error;
+
+ sec->name = NULL;
++ sec->value = NULL;
+
+ /* Don't add selinux attributes on xattrs - they'll never get used */
+ if (IS_PRIVATE(dir))
+@@ -94,7 +95,6 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+
+ void reiserfs_security_free(struct reiserfs_security_handle *sec)
+ {
+- kfree(sec->name);
+ kfree(sec->value);
+ sec->name = NULL;
+ sec->value = NULL;
+diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
+index bcb67b0cabe7e..31f66053e2393 100644
+--- a/fs/sysv/itree.c
++++ b/fs/sysv/itree.c
+@@ -438,7 +438,7 @@ static unsigned sysv_nblocks(struct super_block *s, loff_t size)
+ res += blocks;
+ direct = 1;
+ }
+- return blocks;
++ return res;
+ }
+
+ int sysv_getattr(const struct path *path, struct kstat *stat,
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 639aabf30eaf0..37a6bbd5a19c9 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -441,6 +441,12 @@ static int udf_get_block(struct inode *inode, sector_t block,
+ iinfo->i_next_alloc_goal++;
+ }
+
++ /*
++ * Block beyond EOF and prealloc extents? Just discard preallocation
++ * as it is not useful and complicates things.
++ */
++ if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
++ udf_discard_prealloc(inode);
+ udf_clear_extent_cache(inode);
+ phys = inode_getblk(inode, block, &err, &new);
+ if (!phys)
+@@ -490,8 +496,6 @@ static int udf_do_extend_file(struct inode *inode,
+ uint32_t add;
+ int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ struct super_block *sb = inode->i_sb;
+- struct kernel_lb_addr prealloc_loc = {};
+- uint32_t prealloc_len = 0;
+ struct udf_inode_info *iinfo;
+ int err;
+
+@@ -512,19 +516,6 @@ static int udf_do_extend_file(struct inode *inode,
+ ~(sb->s_blocksize - 1);
+ }
+
+- /* Last extent are just preallocated blocks? */
+- if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+- EXT_NOT_RECORDED_ALLOCATED) {
+- /* Save the extent so that we can reattach it to the end */
+- prealloc_loc = last_ext->extLocation;
+- prealloc_len = last_ext->extLength;
+- /* Mark the extent as a hole */
+- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+- last_ext->extLocation.logicalBlockNum = 0;
+- last_ext->extLocation.partitionReferenceNum = 0;
+- }
+-
+ /* Can we merge with the previous extent? */
+ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+ EXT_NOT_RECORDED_NOT_ALLOCATED) {
+@@ -552,7 +543,7 @@ static int udf_do_extend_file(struct inode *inode,
+ * more extents, we may need to enter possible following
+ * empty indirect extent.
+ */
+- if (new_block_bytes || prealloc_len)
++ if (new_block_bytes)
+ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
+ }
+
+@@ -586,17 +577,6 @@ static int udf_do_extend_file(struct inode *inode,
+ }
+
+ out:
+- /* Do we have some preallocated blocks saved? */
+- if (prealloc_len) {
+- err = udf_add_aext(inode, last_pos, &prealloc_loc,
+- prealloc_len, 1);
+- if (err)
+- return err;
+- last_ext->extLocation = prealloc_loc;
+- last_ext->extLength = prealloc_len;
+- count++;
+- }
+-
+ /* last_pos should point to the last written extent... */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ last_pos->offset -= sizeof(struct short_ad);
+@@ -612,13 +592,17 @@ out:
+ static void udf_do_extend_final_block(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+- uint32_t final_block_len)
++ uint32_t new_elen)
+ {
+- struct super_block *sb = inode->i_sb;
+ uint32_t added_bytes;
+
+- added_bytes = final_block_len -
+- (last_ext->extLength & (sb->s_blocksize - 1));
++ /*
++ * Extent already large enough? It may be already rounded up to block
++ * size...
++ */
++ if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
++ return;
++ added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ last_ext->extLength += added_bytes;
+ UDF_I(inode)->i_lenExtents += added_bytes;
+
+@@ -635,12 +619,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+- unsigned long partial_final_block;
++ loff_t new_elen;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+ int err = 0;
+- int within_final_block;
++ bool within_last_ext;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -649,8 +633,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ else
+ BUG();
+
++ /*
++ * When creating hole in file, just don't bother with preserving
++ * preallocation. It likely won't be very useful anyway.
++ */
++ udf_discard_prealloc(inode);
++
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+- within_final_block = (etype != -1);
++ within_last_ext = (etype != -1);
++ /* We don't expect extents past EOF... */
++ WARN_ON_ONCE(within_last_ext &&
++ elen > ((loff_t)offset + 1) << inode->i_blkbits);
+
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+@@ -666,19 +659,17 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ extent.extLength |= etype << 30;
+ }
+
+- partial_final_block = newsize & (sb->s_blocksize - 1);
++ new_elen = ((loff_t)offset << inode->i_blkbits) |
++ (newsize & (sb->s_blocksize - 1));
+
+ /* File has extent covering the new size (could happen when extending
+ * inside a block)?
+ */
+- if (within_final_block) {
++ if (within_last_ext) {
+ /* Extending file within the last file block */
+- udf_do_extend_final_block(inode, &epos, &extent,
+- partial_final_block);
++ udf_do_extend_final_block(inode, &epos, &extent, new_elen);
+ } else {
+- loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
+- partial_final_block;
+- err = udf_do_extend_file(inode, &epos, &extent, add);
++ err = udf_do_extend_file(inode, &epos, &extent, new_elen);
+ }
+
+ if (err < 0)
+@@ -779,10 +770,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ goto out_free;
+ }
+
+- /* Are we beyond EOF? */
++ /* Are we beyond EOF and preallocated extent? */
+ if (etype == -1) {
+ int ret;
+ loff_t hole_len;
++
+ isBeyondEOF = true;
+ if (count) {
+ if (c)
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 1f418d68e2ee5..c062b41a1e706 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1091,8 +1091,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
+ return -EINVAL;
+
+ ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
+- if (IS_ERR(ofi)) {
+- retval = PTR_ERR(ofi);
++ if (!ofi || IS_ERR(ofi)) {
++ if (IS_ERR(ofi))
++ retval = PTR_ERR(ofi);
+ goto end_rename;
+ }
+
+@@ -1101,8 +1102,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
+
+ brelse(ofibh.sbh);
+ tloc = lelb_to_cpu(ocfi.icb.extLocation);
+- if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
+- != old_inode->i_ino)
++ if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino)
+ goto end_rename;
+
+ nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi);
+diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
+index 63a47f1e1d529..fd3e4a8671f0f 100644
+--- a/fs/udf/truncate.c
++++ b/fs/udf/truncate.c
+@@ -120,60 +120,42 @@ void udf_truncate_tail_extent(struct inode *inode)
+
+ void udf_discard_prealloc(struct inode *inode)
+ {
+- struct extent_position epos = { NULL, 0, {0, 0} };
++ struct extent_position epos = {};
++ struct extent_position prev_epos = {};
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
+ uint64_t lbcount = 0;
+ int8_t etype = -1, netype;
+- int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ int bsize = 1 << inode->i_blkbits;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
+- inode->i_size == iinfo->i_lenExtents)
++ ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
+ return;
+
+- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+- adsize = sizeof(struct short_ad);
+- else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+- adsize = sizeof(struct long_ad);
+- else
+- adsize = 0;
+-
+ epos.block = iinfo->i_location;
+
+ /* Find the last extent in the file */
+- while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
+- etype = netype;
++ while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
++ brelse(prev_epos.bh);
++ prev_epos = epos;
++ if (prev_epos.bh)
++ get_bh(prev_epos.bh);
++
++ etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
+ lbcount += elen;
+ }
+ if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+- epos.offset -= adsize;
+ lbcount -= elen;
+- extent_trunc(inode, &epos, &eloc, etype, elen, 0);
+- if (!epos.bh) {
+- iinfo->i_lenAlloc =
+- epos.offset -
+- udf_file_entry_alloc_offset(inode);
+- mark_inode_dirty(inode);
+- } else {
+- struct allocExtDesc *aed =
+- (struct allocExtDesc *)(epos.bh->b_data);
+- aed->lengthAllocDescs =
+- cpu_to_le32(epos.offset -
+- sizeof(struct allocExtDesc));
+- if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
+- UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
+- udf_update_tag(epos.bh->b_data, epos.offset);
+- else
+- udf_update_tag(epos.bh->b_data,
+- sizeof(struct allocExtDesc));
+- mark_buffer_dirty_inode(epos.bh, inode);
+- }
++ udf_delete_aext(inode, prev_epos);
++ udf_free_blocks(inode->i_sb, inode, &eloc, 0,
++ DIV_ROUND_UP(elen, 1 << inode->i_blkbits));
+ }
+ /* This inode entry is in-memory only and thus we don't have to mark
+ * the inode dirty */
+ iinfo->i_lenExtents = lbcount;
+ brelse(epos.bh);
++ brelse(prev_epos.bh);
+ }
+
+ static void udf_update_alloc_ext_desc(struct inode *inode,
+diff --git a/fs/xattr.c b/fs/xattr.c
+index f2854570d4119..ee78012ec3a51 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -1013,7 +1013,7 @@ static int xattr_list_one(char **buffer, ssize_t *remaining_size,
+ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
+ char *buffer, size_t size)
+ {
+- bool trusted = capable(CAP_SYS_ADMIN);
++ bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ struct simple_xattr *xattr;
+ ssize_t remaining_size = size;
+ int err = 0;
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index 7e4f156acc2f7..d0238d3b2f31e 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -39,7 +39,7 @@ struct debugfs_regset32 {
+
+ extern struct dentry *arch_debugfs_dir;
+
+-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
++#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
+ static int __fops ## _open(struct inode *inode, struct file *file) \
+ { \
+ __simple_attr_check_format(__fmt, 0ull); \
+@@ -50,10 +50,16 @@ static const struct file_operations __fops = { \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+- .write = debugfs_attr_write, \
++ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
+ .llseek = no_llseek, \
+ }
+
++#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
++ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
+ #if defined(CONFIG_DEBUG_FS)
+@@ -96,6 +102,8 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
++ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos);
+
+ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, const char *new_name);
+@@ -245,6 +253,13 @@ static inline ssize_t debugfs_attr_write(struct file *file,
+ return -ENODEV;
+ }
+
++static inline ssize_t debugfs_attr_write_signed(struct file *file,
++ const char __user *buf,
++ size_t len, loff_t *ppos)
++{
++ return -ENODEV;
++}
++
+ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, char *new_name)
+ {
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index 2bae9ed3c7831..7535f860d45df 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -121,10 +121,10 @@ struct devfreq_dev_profile {
+ * devfreq.nb to the corresponding register notifier call chain.
+ * @work: delayed work for load monitoring.
+ * @previous_freq: previously configured frequency value.
+- * @data: Private data of the governor. The devfreq framework does not
+- * touch this.
+- * @min_freq: Limit minimum frequency requested by user (0: none)
+- * @max_freq: Limit maximum frequency requested by user (0: none)
++ * @data: devfreq driver pass to governors, governor should not change it.
++ * @governor_data: private data for governors, devfreq core doesn't touch it.
++ * @min_freq: Limit minimum frequency requested by user (0: none)
++ * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @scaling_min_freq: Limit minimum frequency requested by OPP interface
+ * @scaling_max_freq: Limit maximum frequency requested by OPP interface
+ * @stop_polling: devfreq polling status of a device.
+@@ -159,7 +159,8 @@ struct devfreq {
+ unsigned long previous_freq;
+ struct devfreq_dev_status last_status;
+
+- void *data; /* private data for governors */
++ void *data;
++ void *governor_data;
+
+ unsigned long min_freq;
+ unsigned long max_freq;
+diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
+index dc4fd8a6644dd..3482f9365a4db 100644
+--- a/include/linux/eventfd.h
++++ b/include/linux/eventfd.h
+@@ -61,7 +61,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+ return ERR_PTR(-ENOSYS);
+ }
+
+-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
++static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+ {
+ return -ENOSYS;
+ }
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 4ecbe12f62152..e003afcea3f3e 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3477,7 +3477,7 @@ void simple_transaction_set(struct file *file, size_t n);
+ * All attributes contain a text representation of a numeric value
+ * that are accessed with the get() and set() functions.
+ */
+-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
++#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
+ static int __fops ## _open(struct inode *inode, struct file *file) \
+ { \
+ __simple_attr_check_format(__fmt, 0ull); \
+@@ -3488,10 +3488,16 @@ static const struct file_operations __fops = { \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+- .write = simple_attr_write, \
++ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
+ .llseek = generic_file_llseek, \
+ }
+
++#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
++
++#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
++ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
++
+ static inline __printf(1, 2)
+ void __simple_attr_check_format(const char *fmt, ...)
+ {
+@@ -3506,6 +3512,8 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
++ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
++ size_t len, loff_t *ppos);
+
+ struct ctl_table;
+ int proc_nr_files(struct ctl_table *table, int write,
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index ea5cdbd8c2c32..900f224bb6401 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -276,4 +276,22 @@ static inline void copy_highpage(struct page *to, struct page *from)
+
+ #endif
+
++static inline void memcpy_from_page(char *to, struct page *page,
++ size_t offset, size_t len)
++{
++ char *from = kmap_atomic(page);
++
++ memcpy(to, from + offset, len);
++ kunmap_atomic(from);
++}
++
++static inline void memcpy_to_page(struct page *page, size_t offset,
++ const char *from, size_t len)
++{
++ char *to = kmap_atomic(page);
++
++ memcpy(to + offset, from, len);
++ kunmap_atomic(to);
++}
++
+ #endif /* _LINUX_HIGHMEM_H */
+diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
+index 20f1e3ff60130..591bc4cefe1d6 100644
+--- a/include/linux/mbcache.h
++++ b/include/linux/mbcache.h
+@@ -10,16 +10,29 @@
+
+ struct mb_cache;
+
++/* Cache entry flags */
++enum {
++ MBE_REFERENCED_B = 0,
++ MBE_REUSABLE_B
++};
++
+ struct mb_cache_entry {
+ /* List of entries in cache - protected by cache->c_list_lock */
+ struct list_head e_list;
+- /* Hash table list - protected by hash chain bitlock */
++ /*
++ * Hash table list - protected by hash chain bitlock. The entry is
++ * guaranteed to be hashed while e_refcnt > 0.
++ */
+ struct hlist_bl_node e_hash_list;
++ /*
++ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
++ * While refcount > 0, the entry is guaranteed to stay in the hash and
++ * e.g. mb_cache_entry_try_delete() will fail.
++ */
+ atomic_t e_refcnt;
+ /* Key in hash - stable during lifetime of the entry */
+ u32 e_key;
+- u32 e_referenced:1;
+- u32 e_reusable:1;
++ unsigned long e_flags;
+ /* User provided value - stable during lifetime of the entry */
+ u64 e_value;
+ };
+@@ -29,16 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
+
+ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
+ u64 value, bool reusable);
+-void __mb_cache_entry_free(struct mb_cache_entry *entry);
+-static inline int mb_cache_entry_put(struct mb_cache *cache,
+- struct mb_cache_entry *entry)
++void __mb_cache_entry_free(struct mb_cache *cache,
++ struct mb_cache_entry *entry);
++void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
++static inline void mb_cache_entry_put(struct mb_cache *cache,
++ struct mb_cache_entry *entry)
+ {
+- if (!atomic_dec_and_test(&entry->e_refcnt))
+- return 0;
+- __mb_cache_entry_free(entry);
+- return 1;
++ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
++
++ if (cnt > 0) {
++ if (cnt <= 2)
++ wake_up_var(&entry->e_refcnt);
++ return;
++ }
++ __mb_cache_entry_free(cache, entry);
+ }
+
++struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
++ u32 key, u64 value);
+ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
+ u64 value);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c70b79dba1dc1..73bc0f53303f9 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -160,31 +160,38 @@ static inline bool dev_xmit_complete(int rc)
+ * (unsigned long) so they can be read and written atomically.
+ */
+
++#define NET_DEV_STAT(FIELD) \
++ union { \
++ unsigned long FIELD; \
++ atomic_long_t __##FIELD; \
++ }
++
+ struct net_device_stats {
+- unsigned long rx_packets;
+- unsigned long tx_packets;
+- unsigned long rx_bytes;
+- unsigned long tx_bytes;
+- unsigned long rx_errors;
+- unsigned long tx_errors;
+- unsigned long rx_dropped;
+- unsigned long tx_dropped;
+- unsigned long multicast;
+- unsigned long collisions;
+- unsigned long rx_length_errors;
+- unsigned long rx_over_errors;
+- unsigned long rx_crc_errors;
+- unsigned long rx_frame_errors;
+- unsigned long rx_fifo_errors;
+- unsigned long rx_missed_errors;
+- unsigned long tx_aborted_errors;
+- unsigned long tx_carrier_errors;
+- unsigned long tx_fifo_errors;
+- unsigned long tx_heartbeat_errors;
+- unsigned long tx_window_errors;
+- unsigned long rx_compressed;
+- unsigned long tx_compressed;
++ NET_DEV_STAT(rx_packets);
++ NET_DEV_STAT(tx_packets);
++ NET_DEV_STAT(rx_bytes);
++ NET_DEV_STAT(tx_bytes);
++ NET_DEV_STAT(rx_errors);
++ NET_DEV_STAT(tx_errors);
++ NET_DEV_STAT(rx_dropped);
++ NET_DEV_STAT(tx_dropped);
++ NET_DEV_STAT(multicast);
++ NET_DEV_STAT(collisions);
++ NET_DEV_STAT(rx_length_errors);
++ NET_DEV_STAT(rx_over_errors);
++ NET_DEV_STAT(rx_crc_errors);
++ NET_DEV_STAT(rx_frame_errors);
++ NET_DEV_STAT(rx_fifo_errors);
++ NET_DEV_STAT(rx_missed_errors);
++ NET_DEV_STAT(tx_aborted_errors);
++ NET_DEV_STAT(tx_carrier_errors);
++ NET_DEV_STAT(tx_fifo_errors);
++ NET_DEV_STAT(tx_heartbeat_errors);
++ NET_DEV_STAT(tx_window_errors);
++ NET_DEV_STAT(rx_compressed);
++ NET_DEV_STAT(tx_compressed);
+ };
++#undef NET_DEV_STAT
+
+
+ #include <linux/cache.h>
+@@ -4936,4 +4943,9 @@ do { \
+
+ extern struct net_device *blackhole_netdev;
+
++/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
++#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
++#define DEV_STATS_ADD(DEV, FIELD, VAL) \
++ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++
+ #endif /* _LINUX_NETDEVICE_H */
+diff --git a/include/linux/nvme.h b/include/linux/nvme.h
+index a260cd754f28b..ff0ee07b1e8f4 100644
+--- a/include/linux/nvme.h
++++ b/include/linux/nvme.h
+@@ -7,6 +7,7 @@
+ #ifndef _LINUX_NVME_H
+ #define _LINUX_NVME_H
+
++#include <linux/bits.h>
+ #include <linux/types.h>
+ #include <linux/uuid.h>
+
+@@ -107,8 +108,22 @@ enum {
+ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
+ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
+ NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
+- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
++ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
++ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
++ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
++ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
++ * Location
++ */
++ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
++ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
++ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
++ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
++ * Buffer Size
++ */
++ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
++ * Write Throughput
++ */
+ NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
+ };
+
+@@ -295,6 +310,14 @@ enum {
+ NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
+ NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
++ NVME_CTRL_CTRATT_128_ID = 1 << 0,
++ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
++ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
++ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
++ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
++ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
++ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
++ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
+ };
+
+ struct nvme_lbaf {
+@@ -352,6 +375,9 @@ enum {
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_CTRL_NS_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
++ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
++ NVME_ID_CNS_NS_GRANULARITY = 0x16,
++ NVME_ID_CNS_UUID_LIST = 0x17,
+ };
+
+ enum {
+@@ -409,7 +435,8 @@ struct nvme_smart_log {
+ __u8 avail_spare;
+ __u8 spare_thresh;
+ __u8 percent_used;
+- __u8 rsvd6[26];
++ __u8 endu_grp_crit_warn_sumry;
++ __u8 rsvd7[25];
+ __u8 data_units_read[16];
+ __u8 data_units_written[16];
+ __u8 host_reads[16];
+@@ -423,7 +450,11 @@ struct nvme_smart_log {
+ __le32 warning_temp_time;
+ __le32 critical_comp_time;
+ __le16 temp_sensor[8];
+- __u8 rsvd216[296];
++ __le32 thm_temp1_trans_count;
++ __le32 thm_temp2_trans_count;
++ __le32 thm_temp1_total_time;
++ __le32 thm_temp2_total_time;
++ __u8 rsvd232[280];
+ };
+
+ struct nvme_fw_slot_info_log {
+@@ -439,7 +470,8 @@ enum {
+ NVME_CMD_EFFECTS_NCC = 1 << 2,
+ NVME_CMD_EFFECTS_NIC = 1 << 3,
+ NVME_CMD_EFFECTS_CCC = 1 << 4,
+- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
++ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
++ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ };
+
+ struct nvme_effects_log {
+@@ -563,6 +595,7 @@ enum nvme_opcode {
+ nvme_cmd_compare = 0x05,
+ nvme_cmd_write_zeroes = 0x08,
+ nvme_cmd_dsm = 0x09,
++ nvme_cmd_verify = 0x0c,
+ nvme_cmd_resv_register = 0x0d,
+ nvme_cmd_resv_report = 0x0e,
+ nvme_cmd_resv_acquire = 0x11,
+@@ -806,10 +839,14 @@ enum nvme_admin_opcode {
+ nvme_admin_ns_mgmt = 0x0d,
+ nvme_admin_activate_fw = 0x10,
+ nvme_admin_download_fw = 0x11,
++ nvme_admin_dev_self_test = 0x14,
+ nvme_admin_ns_attach = 0x15,
+ nvme_admin_keep_alive = 0x18,
+ nvme_admin_directive_send = 0x19,
+ nvme_admin_directive_recv = 0x1a,
++ nvme_admin_virtual_mgmt = 0x1c,
++ nvme_admin_nvme_mi_send = 0x1d,
++ nvme_admin_nvme_mi_recv = 0x1e,
+ nvme_admin_dbbuf = 0x7C,
+ nvme_admin_format_nvm = 0x80,
+ nvme_admin_security_send = 0x81,
+@@ -873,6 +910,7 @@ enum {
+ NVME_FEAT_PLM_CONFIG = 0x13,
+ NVME_FEAT_PLM_WINDOW = 0x14,
+ NVME_FEAT_HOST_BEHAVIOR = 0x16,
++ NVME_FEAT_SANITIZE = 0x17,
+ NVME_FEAT_SW_PROGRESS = 0x80,
+ NVME_FEAT_HOST_ID = 0x81,
+ NVME_FEAT_RESV_MASK = 0x82,
+@@ -883,6 +921,10 @@ enum {
+ NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_CHANGED_NS = 0x04,
+ NVME_LOG_CMD_EFFECTS = 0x05,
++ NVME_LOG_DEVICE_SELF_TEST = 0x06,
++ NVME_LOG_TELEMETRY_HOST = 0x07,
++ NVME_LOG_TELEMETRY_CTRL = 0x08,
++ NVME_LOG_ENDURANCE_GROUP = 0x09,
+ NVME_LOG_ANA = 0x0c,
+ NVME_LOG_DISC = 0x70,
+ NVME_LOG_RESERVATION = 0x80,
+@@ -1290,7 +1332,11 @@ enum {
+ NVME_SC_SGL_INVALID_OFFSET = 0x16,
+ NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+
++ NVME_SC_SANITIZE_FAILED = 0x1C,
++ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
++
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
++ NVME_SC_CMD_INTERRUPTED = 0x21,
+
+ NVME_SC_LBA_RANGE = 0x80,
+ NVME_SC_CAP_EXCEEDED = 0x81,
+@@ -1328,6 +1374,8 @@ enum {
+ NVME_SC_NS_NOT_ATTACHED = 0x11a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
+ NVME_SC_CTRL_LIST_INVALID = 0x11c,
++ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
++ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
+
+ /*
+ * I/O Command Set Specific - NVM commands:
+diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
+index 865d02c224ada..b8d41d0e7b46e 100644
+--- a/include/linux/proc_fs.h
++++ b/include/linux/proc_fs.h
+@@ -127,8 +127,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
+ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+
+ #define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
++#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
+ #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
+ #define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
++#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
+
+ static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
+ {
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index 91e0b7624053e..ec10897f7f60c 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -99,6 +99,8 @@ int dquot_file_open(struct inode *inode, struct file *file);
+
+ int dquot_enable(struct inode *inode, int type, int format_id,
+ unsigned int flags);
++int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
++ unsigned int flags);
+ int dquot_quota_on(struct super_block *sb, int type, int format_id,
+ const struct path *path);
+ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
+diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
+index e90b9bd99ded5..396de2ef8767e 100644
+--- a/include/linux/sunrpc/rpc_pipe_fs.h
++++ b/include/linux/sunrpc/rpc_pipe_fs.h
+@@ -94,6 +94,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
+ char __user *, size_t);
+ extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+
++/* returns true if the msg is in-flight, i.e., already eaten by the peer */
++static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
++ return (msg->copied != 0 && list_empty(&msg->list));
++}
++
+ struct rpc_clnt;
+ extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
+ extern int rpc_remove_client_dir(struct rpc_clnt *);
+diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
+index 93884086f3924..adc80e29168ea 100644
+--- a/include/linux/timerqueue.h
++++ b/include/linux/timerqueue.h
+@@ -35,7 +35,7 @@ struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
+ {
+ struct rb_node *leftmost = rb_first_cached(&head->rb_root);
+
+- return rb_entry(leftmost, struct timerqueue_node, node);
++ return rb_entry_safe(leftmost, struct timerqueue_node, node);
+ }
+
+ static inline void timerqueue_init(struct timerqueue_node *node)
+diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
+index def3f6159a0ef..6c79070ec11a8 100644
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev
+ * The loop below will unmap these fields if the log is larger than
+ * one page, so save them here for reference:
+ */
+- count = READ_ONCE(event->count);
+- event_type = READ_ONCE(event->event_type);
++ count = event->count;
++ event_type = event->event_type;
+
+ /* Verify that it's the log header */
+ if (event_header->pcr_idx != 0 ||
+diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
+index 551325858de3d..73c1df584a146 100644
+--- a/include/media/dvbdev.h
++++ b/include/media/dvbdev.h
+@@ -126,6 +126,7 @@ struct dvb_adapter {
+ * struct dvb_device - represents a DVB device node
+ *
+ * @list_head: List head with all DVB devices
++ * @ref: reference counter
+ * @fops: pointer to struct file_operations
+ * @adapter: pointer to the adapter that holds this device node
+ * @type: type of the device, as defined by &enum dvb_device_type.
+@@ -156,6 +157,7 @@ struct dvb_adapter {
+ */
+ struct dvb_device {
+ struct list_head list_head;
++ struct kref ref;
+ const struct file_operations *fops;
+ struct dvb_adapter *adapter;
+ enum dvb_device_type type;
+@@ -187,6 +189,20 @@ struct dvb_device {
+ void *priv;
+ };
+
++/**
++ * dvb_device_get - Increase dvb_device reference
++ *
++ * @dvbdev: pointer to struct dvb_device
++ */
++struct dvb_device *dvb_device_get(struct dvb_device *dvbdev);
++
++/**
++ * dvb_device_put - Decrease dvb_device reference
++ *
++ * @dvbdev: pointer to struct dvb_device
++ */
++void dvb_device_put(struct dvb_device *dvbdev);
++
+ /**
+ * dvb_register_adapter - Registers a new DVB adapter
+ *
+@@ -231,29 +247,17 @@ int dvb_register_device(struct dvb_adapter *adap,
+ /**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+- * This does not free memory. To do that, call dvb_free_device().
++ * This does not free memory. dvb_free_device() will do that when
++ * reference counter is empty
+ *
+ * @dvbdev: pointer to struct dvb_device
+ */
+ void dvb_remove_device(struct dvb_device *dvbdev);
+
+-/**
+- * dvb_free_device - Free memory occupied by a DVB device.
+- *
+- * Call dvb_unregister_device() before calling this function.
+- *
+- * @dvbdev: pointer to struct dvb_device
+- */
+-void dvb_free_device(struct dvb_device *dvbdev);
+
+ /**
+ * dvb_unregister_device - Unregisters a DVB device
+ *
+- * This is a combination of dvb_remove_device() and dvb_free_device().
+- * Using this function is usually a mistake, and is often an indicator
+- * for a use-after-free bug (when a userspace process keeps a file
+- * handle to a detached device).
+- *
+ * @dvbdev: pointer to struct dvb_device
+ */
+ void dvb_unregister_device(struct dvb_device *dvbdev);
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 1bee8fdff7db0..69ceb5b4a8d68 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -205,7 +205,7 @@ struct bonding {
+ struct slave __rcu *curr_active_slave;
+ struct slave __rcu *current_arp_slave;
+ struct slave __rcu *primary_slave;
+- struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
++ struct bond_up_slave __rcu *usable_slaves; /* Array of usable slaves */
+ bool force_primary;
+ s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 433f7c1ce8a9d..34185e5277267 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -357,9 +357,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
+ {
+- /* TODO : stats should be SMP safe */
+- dev->stats.rx_packets++;
+- dev->stats.rx_bytes += skb->len;
++ DEV_STATS_INC(dev, rx_packets);
++ DEV_STATS_ADD(dev, rx_bytes, skb->len);
+ __skb_tunnel_rx(skb, dev, net);
+ }
+
+diff --git a/include/net/mrp.h b/include/net/mrp.h
+index ef58b4a071900..c6c53370e390f 100644
+--- a/include/net/mrp.h
++++ b/include/net/mrp.h
+@@ -120,6 +120,7 @@ struct mrp_applicant {
+ struct sk_buff *pdu;
+ struct rb_root mad;
+ struct rcu_head rcu;
++ bool active;
+ };
+
+ struct mrp_port {
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index 44e57bcc4a579..749b1bce9fc62 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -555,6 +555,8 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
+ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
+ void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
+ void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
++void snd_hdac_stop_streams(struct hdac_bus *bus);
++void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
+ void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
+ void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
+ unsigned int streams, unsigned int reg);
+diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
+index 23dc8deac344d..91440476319a7 100644
+--- a/include/sound/hdaudio_ext.h
++++ b/include/sound/hdaudio_ext.h
+@@ -92,7 +92,6 @@ void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
+ struct hdac_ext_stream *azx_dev, bool decouple);
+ void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
+ struct hdac_ext_stream *azx_dev, bool decouple);
+-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
+
+ int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
+ struct hdac_ext_stream *stream, u32 value);
+diff --git a/include/sound/pcm.h b/include/sound/pcm.h
+index f0045f842a604..299e354588636 100644
+--- a/include/sound/pcm.h
++++ b/include/sound/pcm.h
+@@ -104,24 +104,24 @@ struct snd_pcm_ops {
+ #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
+
+ /* If you change this don't forget to change rates[] table in pcm_native.c */
+-#define SNDRV_PCM_RATE_5512 (1<<0) /* 5512Hz */
+-#define SNDRV_PCM_RATE_8000 (1<<1) /* 8000Hz */
+-#define SNDRV_PCM_RATE_11025 (1<<2) /* 11025Hz */
+-#define SNDRV_PCM_RATE_16000 (1<<3) /* 16000Hz */
+-#define SNDRV_PCM_RATE_22050 (1<<4) /* 22050Hz */
+-#define SNDRV_PCM_RATE_32000 (1<<5) /* 32000Hz */
+-#define SNDRV_PCM_RATE_44100 (1<<6) /* 44100Hz */
+-#define SNDRV_PCM_RATE_48000 (1<<7) /* 48000Hz */
+-#define SNDRV_PCM_RATE_64000 (1<<8) /* 64000Hz */
+-#define SNDRV_PCM_RATE_88200 (1<<9) /* 88200Hz */
+-#define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */
+-#define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */
+-#define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */
+-#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */
+-#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */
+-
+-#define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */
+-#define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */
++#define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */
++#define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */
++#define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */
++#define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */
++#define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */
++#define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */
++#define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */
++#define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */
++#define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */
++#define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */
++#define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */
++#define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */
++#define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */
++#define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */
++#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
++
++#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
++#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
+
+ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
+ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
+diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
+index 2310b259329fb..c9fb7b987a3a4 100644
+--- a/include/trace/events/jbd2.h
++++ b/include/trace/events/jbd2.h
+@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+- __field( int, transaction )
++ __field( tid_t, transaction )
+ ),
+
+ TP_fast_assign(
+@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit,
+ __entry->transaction = commit_transaction->t_tid;
+ ),
+
+- TP_printk("dev %d,%d transaction %d sync %d",
++ TP_printk("dev %d,%d transaction %u sync %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->transaction, __entry->sync_commit)
+ );
+@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit,
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+- __field( int, transaction )
+- __field( int, head )
++ __field( tid_t, transaction )
++ __field( tid_t, head )
+ ),
+
+ TP_fast_assign(
+@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit,
+ __entry->head = journal->j_tail_sequence;
+ ),
+
+- TP_printk("dev %d,%d transaction %d sync %d head %d",
++ TP_printk("dev %d,%d transaction %u sync %d head %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->transaction, __entry->sync_commit, __entry->head)
+ );
+@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data,
+ );
+
+ TRACE_EVENT(jbd2_handle_start,
+- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+- __field( unsigned long, tid )
++ __field( tid_t, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, requested_blocks)
+@@ -155,14 +155,14 @@ TRACE_EVENT(jbd2_handle_start,
+ __entry->requested_blocks = requested_blocks;
+ ),
+
+- TP_printk("dev %d,%d tid %lu type %u line_no %u "
++ TP_printk("dev %d,%d tid %u type %u line_no %u "
+ "requested_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->requested_blocks)
+ );
+
+ TRACE_EVENT(jbd2_handle_extend,
+- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ unsigned int line_no, int buffer_credits,
+ int requested_blocks),
+
+@@ -170,7 +170,7 @@ TRACE_EVENT(jbd2_handle_extend,
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+- __field( unsigned long, tid )
++ __field( tid_t, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, buffer_credits )
+@@ -186,7 +186,7 @@ TRACE_EVENT(jbd2_handle_extend,
+ __entry->requested_blocks = requested_blocks;
+ ),
+
+- TP_printk("dev %d,%d tid %lu type %u line_no %u "
++ TP_printk("dev %d,%d tid %u type %u line_no %u "
+ "buffer_credits %d requested_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->buffer_credits,
+@@ -194,7 +194,7 @@ TRACE_EVENT(jbd2_handle_extend,
+ );
+
+ TRACE_EVENT(jbd2_handle_stats,
+- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type,
+ unsigned int line_no, int interval, int sync,
+ int requested_blocks, int dirtied_blocks),
+
+@@ -203,7 +203,7 @@ TRACE_EVENT(jbd2_handle_stats,
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+- __field( unsigned long, tid )
++ __field( tid_t, tid )
+ __field( unsigned int, type )
+ __field( unsigned int, line_no )
+ __field( int, interval )
+@@ -223,7 +223,7 @@ TRACE_EVENT(jbd2_handle_stats,
+ __entry->dirtied_blocks = dirtied_blocks;
+ ),
+
+- TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d "
++ TP_printk("dev %d,%d tid %u type %u line_no %u interval %d "
+ "sync %d requested_blocks %d dirtied_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ __entry->type, __entry->line_no, __entry->interval,
+@@ -232,14 +232,14 @@ TRACE_EVENT(jbd2_handle_stats,
+ );
+
+ TRACE_EVENT(jbd2_run_stats,
+- TP_PROTO(dev_t dev, unsigned long tid,
++ TP_PROTO(dev_t dev, tid_t tid,
+ struct transaction_run_stats_s *stats),
+
+ TP_ARGS(dev, tid, stats),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+- __field( unsigned long, tid )
++ __field( tid_t, tid )
+ __field( unsigned long, wait )
+ __field( unsigned long, request_delay )
+ __field( unsigned long, running )
+@@ -265,7 +265,7 @@ TRACE_EVENT(jbd2_run_stats,
+ __entry->blocks_logged = stats->rs_blocks_logged;
+ ),
+
+- TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u "
++ TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u "
+ "locked %u flushing %u logging %u handle_count %u "
+ "blocks %u blocks_logged %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+@@ -280,14 +280,14 @@ TRACE_EVENT(jbd2_run_stats,
+ );
+
+ TRACE_EVENT(jbd2_checkpoint_stats,
+- TP_PROTO(dev_t dev, unsigned long tid,
++ TP_PROTO(dev_t dev, tid_t tid,
+ struct transaction_chp_stats_s *stats),
+
+ TP_ARGS(dev, tid, stats),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+- __field( unsigned long, tid )
++ __field( tid_t, tid )
+ __field( unsigned long, chp_time )
+ __field( __u32, forced_to_close )
+ __field( __u32, written )
+@@ -303,7 +303,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
+ __entry->dropped = stats->cs_dropped;
+ ),
+
+- TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
++ TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u "
+ "written %u dropped %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
+ jiffies_to_msecs(__entry->chp_time),
+diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
+index 7272f85d6d6ab..3736f2fe15418 100644
+--- a/include/uapi/linux/swab.h
++++ b/include/uapi/linux/swab.h
+@@ -3,7 +3,7 @@
+ #define _UAPI_LINUX_SWAB_H
+
+ #include <linux/types.h>
+-#include <linux/compiler.h>
++#include <linux/stddef.h>
+ #include <asm/bitsperlong.h>
+ #include <asm/swab.h>
+
+diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
+index a75e14edc957e..dbd60f48b4b01 100644
+--- a/include/uapi/sound/asequencer.h
++++ b/include/uapi/sound/asequencer.h
+@@ -344,10 +344,10 @@ typedef int __bitwise snd_seq_client_type_t;
+ #define KERNEL_CLIENT ((__force snd_seq_client_type_t) 2)
+
+ /* event filter flags */
+-#define SNDRV_SEQ_FILTER_BROADCAST (1<<0) /* accept broadcast messages */
+-#define SNDRV_SEQ_FILTER_MULTICAST (1<<1) /* accept multicast messages */
+-#define SNDRV_SEQ_FILTER_BOUNCE (1<<2) /* accept bounce event in error */
+-#define SNDRV_SEQ_FILTER_USE_EVENT (1<<31) /* use event filter */
++#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */
++#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */
++#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */
++#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */
+
+ struct snd_seq_client_info {
+ int client; /* client number to inquire */
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 81f9831a78592..6d98aed403bac 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -331,6 +331,8 @@ static comp_t encode_comp_t(unsigned long value)
+ exp++;
+ }
+
++ if (exp > (((comp_t) ~0U) >> MANTSIZE))
++ return (comp_t) ~0U;
+ /*
+ * Clean it up and polish it off.
+ */
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index a28bbec8c59fc..8fd65a0eb7f3e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -2849,6 +2849,11 @@ static int btf_func_proto_check(struct btf_verifier_env *env,
+ break;
+ }
+
++ if (btf_type_is_resolve_source_only(arg_type)) {
++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
++ return -EINVAL;
++ }
++
+ if (args[i].name_off &&
+ (!btf_name_offset_valid(btf, args[i].name_off) ||
+ !btf_name_valid_identifier(btf, args[i].name_off))) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index f705d3752fe0d..32b32ecad770d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5140,6 +5140,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
+ return err;
+ return adjust_ptr_min_max_vals(env, insn,
+ dst_reg, src_reg);
++ } else if (dst_reg->precise) {
++ /* if dst_reg is precise, src_reg should be precise as well */
++ err = mark_chain_precision(env, insn->src_reg);
++ if (err)
++ return err;
+ }
+ } else {
+ /* Pretend the src is a reg with a known value, since we only
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0a54780e0942d..a1c89b675b0b9 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -10035,13 +10035,15 @@ static int pmu_dev_alloc(struct pmu *pmu)
+
+ pmu->dev->groups = pmu->attr_groups;
+ device_initialize(pmu->dev);
+- ret = dev_set_name(pmu->dev, "%s", pmu->name);
+- if (ret)
+- goto free_dev;
+
+ dev_set_drvdata(pmu->dev, pmu);
+ pmu->dev->bus = &pmu_bus;
+ pmu->dev->release = pmu_dev_release;
++
++ ret = dev_set_name(pmu->dev, "%s", pmu->name);
++ if (ret)
++ goto free_dev;
++
+ ret = device_add(pmu->dev);
+ if (ret)
+ goto free_dev;
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index bd5e918c85700..0f8a7af5d5918 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -85,6 +85,7 @@ struct gcov_fn_info {
+ * @version: gcov version magic indicating the gcc version used for compilation
+ * @next: list head for a singly-linked list
+ * @stamp: uniquifying time stamp
++ * @checksum: unique object checksum
+ * @filename: name of the associated gcov data file
+ * @merge: merge functions (null for unused counter type)
+ * @n_functions: number of instrumented functions
+@@ -97,6 +98,10 @@ struct gcov_info {
+ unsigned int version;
+ struct gcov_info *next;
+ unsigned int stamp;
++ /* Since GCC 12.1 a checksum field is added. */
++#if (__GNUC__ >= 12)
++ unsigned int checksum;
++#endif
+ const char *filename;
+ void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int);
+ unsigned int n_functions;
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index ba4d742c1c655..7057b60afabe7 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -52,6 +52,7 @@ enum {
+ * IRQS_PENDING - irq is pending and replayed later
+ * IRQS_SUSPENDED - irq is suspended
+ * IRQS_NMI - irq line is used to deliver NMIs
++ * IRQS_SYSFS - descriptor has been added to sysfs
+ */
+ enum {
+ IRQS_AUTODETECT = 0x00000001,
+@@ -64,6 +65,7 @@ enum {
+ IRQS_SUSPENDED = 0x00000800,
+ IRQS_TIMINGS = 0x00001000,
+ IRQS_NMI = 0x00002000,
++ IRQS_SYSFS = 0x00004000,
+ };
+
+ #include "debug.h"
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 172b5e6bc4c2f..0272a2e36ae69 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -288,22 +288,25 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
+ if (irq_kobj_base) {
+ /*
+ * Continue even in case of failure as this is nothing
+- * crucial.
++ * crucial and failures in the late irq_sysfs_init()
++ * cannot be rolled back.
+ */
+ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
+ pr_warn("Failed to add kobject for irq %d\n", irq);
++ else
++ desc->istate |= IRQS_SYSFS;
+ }
+ }
+
+ static void irq_sysfs_del(struct irq_desc *desc)
+ {
+ /*
+- * If irq_sysfs_init() has not yet been invoked (early boot), then
+- * irq_kobj_base is NULL and the descriptor was never added.
+- * kobject_del() complains about a object with no parent, so make
+- * it conditional.
++ * Only invoke kobject_del() when kobject_add() was successfully
++ * invoked for the descriptor. This covers both early boot, where
++ * sysfs is not initialized yet, and the case of a failed
++ * kobject_add() invocation.
+ */
+- if (irq_kobj_base)
++ if (desc->istate & IRQS_SYSFS)
+ kobject_del(&desc->kobj);
+ }
+
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 46455aa7951ec..5092b8bfa1dba 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1680,8 +1680,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
+ * /sys/power/reserved_size, respectively). To make this happen, we compute the
+ * total number of available page frames and allocate at least
+ *
+- * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
+- * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
++ * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
++ * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
+ *
+ * of them, which corresponds to the maximum size of a hibernation image.
+ *
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 5797cf2909b00..615283404d9dc 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -2317,7 +2317,7 @@ void rcu_force_quiescent_state(void)
+ struct rcu_node *rnp_old = NULL;
+
+ /* Funnel through hierarchy to reduce memory contention. */
+- rnp = __this_cpu_read(rcu_data.mynode);
++ rnp = raw_cpu_read(rcu_data.mynode);
+ for (; rnp != NULL; rnp = rnp->parent) {
+ ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
+ !raw_spin_trylock(&rnp->fqslock);
+diff --git a/kernel/relay.c b/kernel/relay.c
+index d3940becf2fc3..9b1cfcd8dc6b1 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -163,13 +163,13 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
+ {
+ struct rchan_buf *buf;
+
+- if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
++ if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t))
+ return NULL;
+
+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+- buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
++ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t),
+ GFP_KERNEL);
+ if (!buf->padding)
+ goto free_buf;
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 749b27851f45d..abf5cbbb743b2 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -1589,7 +1589,8 @@ blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
+
+ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
+ {
+- if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
++ if ((iter->ent->type != TRACE_BLK) ||
++ !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ return TRACE_TYPE_UNHANDLED;
+
+ return print_one_line(iter, true);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 176d858903bdb..11e8189dd8ae9 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -727,6 +727,7 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ work = &buffer->irq_work;
++ full = 0;
+ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -EINVAL;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 42f45665e0597..1090b24041104 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6101,7 +6101,20 @@ waitagain:
+
+ ret = print_trace_line(iter);
+ if (ret == TRACE_TYPE_PARTIAL_LINE) {
+- /* don't print partial lines */
++ /*
++ * If one print_trace_line() fills entire trace_seq in one shot,
++ * trace_seq_to_user() will returns -EBUSY because save_len == 0,
++ * In this case, we need to consume it, otherwise, loop will peek
++ * this event next time, resulting in an infinite loop.
++ */
++ if (save_len == 0) {
++ iter->seq.full = 0;
++ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
++ trace_consume(iter);
++ break;
++ }
++
++ /* In other cases, don't print partial lines */
+ iter->seq.seq.len = save_len;
+ break;
+ }
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index b8f1f0eadd2ef..7b648fb9ff115 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -479,7 +479,7 @@ struct action_data {
+ * event param, and is passed to the synthetic event
+ * invocation.
+ */
+- unsigned int var_ref_idx[TRACING_MAP_VARS_MAX];
++ unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
+ struct synth_event *synth_event;
+ bool use_trace_keyword;
+ char *synth_event_name;
+@@ -2752,7 +2752,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
+ return ref_field;
+ }
+ }
+-
++ /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
++ if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
++ return NULL;
+ ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
+ if (ref_field) {
+ if (init_var_ref(ref_field, var_field, system, event_name)) {
+@@ -4014,6 +4016,7 @@ static int parse_action_params(struct trace_array *tr, char *params,
+ while (params) {
+ if (data->n_params >= SYNTH_FIELDS_MAX) {
+ hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -4338,6 +4341,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
+
+ lockdep_assert_held(&event_mutex);
+
++ /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
++ if (data->n_params > SYNTH_FIELDS_MAX)
++ return -EINVAL;
++
+ if (data->use_trace_keyword)
+ synth_event_name = data->synth_event_name;
+ else
+@@ -6433,7 +6440,7 @@ enable:
+ /* Just return zero, not the number of registered triggers */
+ ret = 0;
+ out:
+- if (ret == 0)
++ if (ret == 0 && glob[0])
+ hist_err_clear();
+
+ return ret;
+diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
+index e7258d8c252b2..4da9707ad33df 100644
+--- a/lib/fonts/fonts.c
++++ b/lib/fonts/fonts.c
+@@ -132,8 +132,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
+ if (res > 20)
+ c += 20 - res;
+
+- if ((font_w & (1 << (f->width - 1))) &&
+- (font_h & (1 << (f->height - 1))))
++ if ((font_w & (1U << (f->width - 1))) &&
++ (font_h & (1U << (f->height - 1))))
+ c += 1000;
+
+ if (c > cc) {
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 9d3bda3d49fed..5c6a0b8a2adbb 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -455,20 +455,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
+ }
+ EXPORT_SYMBOL(iov_iter_init);
+
+-static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
+-{
+- char *from = kmap_atomic(page);
+- memcpy(to, from + offset, len);
+- kunmap_atomic(from);
+-}
+-
+-static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
+-{
+- char *to = kmap_atomic(page);
+- memcpy(to + offset, from, len);
+- kunmap_atomic(to);
+-}
+-
+ static void memzero_page(struct page *page, size_t offset, size_t len)
+ {
+ char *addr = kmap_atomic(page);
+diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
+index 21016b32d3131..2b24ea6c94979 100644
+--- a/lib/notifier-error-inject.c
++++ b/lib/notifier-error-inject.c
+@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
+ return 0;
+ }
+
+-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
++DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
+ "%lld\n");
+
+ static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 251213c872b57..0169073ec2b9a 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -940,6 +940,7 @@ static int __init test_firmware_init(void)
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
++ __test_firmware_config_free();
+ kfree(test_fw_config);
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 0758afd6325da..7a2675dbf3ccb 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1219,7 +1219,7 @@ move_freelist_tail(struct list_head *freelist, struct page *freepage)
+ }
+
+ static void
+-fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
++fast_isolate_around(struct compact_control *cc, unsigned long pfn)
+ {
+ unsigned long start_pfn, end_pfn;
+ struct page *page = pfn_to_page(pfn);
+@@ -1236,21 +1236,13 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
+ start_pfn = pageblock_start_pfn(pfn);
+ end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
+
+- /* Scan before */
+- if (start_pfn != pfn) {
+- isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
+- if (cc->nr_freepages >= cc->nr_migratepages)
+- return;
+- }
+-
+- /* Scan after */
+- start_pfn = pfn + nr_isolated;
+- if (start_pfn < end_pfn)
+- isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
++ isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+
+ /* Skip this pageblock in the future as it's full or nearly full */
+ if (cc->nr_freepages < cc->nr_migratepages)
+ set_pageblock_skip(page);
++
++ return;
+ }
+
+ /* Search orders in round-robin fashion */
+@@ -1422,7 +1414,7 @@ fast_isolate_freepages(struct compact_control *cc)
+ return cc->free_pfn;
+
+ low_pfn = page_to_pfn(page);
+- fast_isolate_around(cc, low_pfn, nr_isolated);
++ fast_isolate_around(cc, low_pfn);
+ return low_pfn;
+ }
+
+diff --git a/mm/memblock.c b/mm/memblock.c
+index a75cc65f03307..84a14b63e9129 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1546,7 +1546,13 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
+ end = PFN_DOWN(base + size);
+
+ for (; cursor < end; cursor++) {
+- memblock_free_pages(pfn_to_page(cursor), cursor, 0);
++ /*
++ * Reserved pages are always initialized by the end of
++ * memblock_free_all() (by memmap_init() and, if deferred
++ * initialization is enabled, memmap_init_reserved_pages()), so
++ * these pages can be released directly to the buddy allocator.
++ */
++ __free_pages_core(pfn_to_page(cursor), 0);
+ totalram_pages_inc();
+ }
+ }
+diff --git a/net/802/mrp.c b/net/802/mrp.c
+index 5b804dbe2d08f..486becf6c78d4 100644
+--- a/net/802/mrp.c
++++ b/net/802/mrp.c
+@@ -606,7 +606,10 @@ static void mrp_join_timer(struct timer_list *t)
+ spin_unlock(&app->lock);
+
+ mrp_queue_xmit(app);
+- mrp_join_timer_arm(app);
++ spin_lock(&app->lock);
++ if (likely(app->active))
++ mrp_join_timer_arm(app);
++ spin_unlock(&app->lock);
+ }
+
+ static void mrp_periodic_timer_arm(struct mrp_applicant *app)
+@@ -620,11 +623,12 @@ static void mrp_periodic_timer(struct timer_list *t)
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer);
+
+ spin_lock(&app->lock);
+- mrp_mad_event(app, MRP_EVENT_PERIODIC);
+- mrp_pdu_queue(app);
++ if (likely(app->active)) {
++ mrp_mad_event(app, MRP_EVENT_PERIODIC);
++ mrp_pdu_queue(app);
++ mrp_periodic_timer_arm(app);
++ }
+ spin_unlock(&app->lock);
+-
+- mrp_periodic_timer_arm(app);
+ }
+
+ static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
+@@ -872,6 +876,7 @@ int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
+ app->dev = dev;
+ app->app = appl;
+ app->mad = RB_ROOT;
++ app->active = true;
+ spin_lock_init(&app->lock);
+ skb_queue_head_init(&app->queue);
+ rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
+@@ -900,6 +905,9 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
+
++ spin_lock_bh(&app->lock);
++ app->active = false;
++ spin_unlock_bh(&app->lock);
+ /* Delete timer and generate a final TX event to flush out
+ * all pending messages before the applicant is gone.
+ */
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 2ebb6480b6ecb..e5e1c139f2118 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -4455,7 +4455,7 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ else
+ *req_complete = bt_cb(skb)->hci.req_complete;
+- kfree_skb(skb);
++ dev_kfree_skb_irq(skb);
+ }
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+ }
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index fb2abd0e979a9..0e51ed3412ef3 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4210,7 +4210,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+
+ chan->ident = cmd->ident;
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
+- chan->num_conf_rsp++;
++ if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
++ chan->num_conf_rsp++;
+
+ /* Reset config buffer. */
+ chan->conf_len = 0;
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 83a8c48dfaa8b..2db9e285215c5 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -594,7 +594,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
+
+ ret = rfcomm_dlc_send_frag(d, frag);
+ if (ret < 0) {
+- kfree_skb(frag);
++ dev_kfree_skb_irq(frag);
+ goto unlock;
+ }
+
+diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
+index d78c4cc30a288..591d146a53084 100644
+--- a/net/bpf/test_run.c
++++ b/net/bpf/test_run.c
+@@ -201,9 +201,6 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
+ {
+ struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
+
+- if (!skb->len)
+- return -EINVAL;
+-
+ if (!__skb)
+ return 0;
+
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 2809cbd6b7f74..d8cb4b2a076b4 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
+ default:
+ pr_warn("Request setup of bad link type = %d\n",
+ param->linktype);
++ cfpkt_destroy(pkt);
+ return -EINVAL;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+- if (!req)
++ if (!req) {
++ cfpkt_destroy(pkt);
+ return -ENOMEM;
++ }
++
+ req->client_layer = user_layer;
+ req->cmd = CFCTRL_CMD_LINK_SETUP;
+ req->param = *param;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 84bc6d0e8560b..296bed9431f3b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -9461,24 +9461,16 @@ void netdev_run_todo(void)
+ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats)
+ {
+-#if BITS_PER_LONG == 64
+- BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
+- memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
+- /* zero out counters that only exist in rtnl_link_stats64 */
+- memset((char *)stats64 + sizeof(*netdev_stats), 0,
+- sizeof(*stats64) - sizeof(*netdev_stats));
+-#else
+- size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
+- const unsigned long *src = (const unsigned long *)netdev_stats;
++ size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
++ const atomic_long_t *src = (atomic_long_t *)netdev_stats;
+ u64 *dst = (u64 *)stats64;
+
+ BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+ for (i = 0; i < n; i++)
+- dst[i] = src[i];
++ dst[i] = atomic_long_read(&src[i]);
+ /* zero out counters that only exist in rtnl_link_stats64 */
+ memset((char *)stats64 + n * sizeof(u64), 0,
+ sizeof(*stats64) - n * sizeof(u64));
+-#endif
+ }
+ EXPORT_SYMBOL(netdev_stats_to_stats64);
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index e81f7772161a9..71fcb4e7edae4 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2071,8 +2071,17 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
+ {
+ unsigned int mlen = skb_network_offset(skb);
+
++ if (unlikely(skb->len <= mlen)) {
++ kfree_skb(skb);
++ return -ERANGE;
++ }
++
+ if (mlen) {
+ __skb_pull(skb, mlen);
++ if (unlikely(!skb->len)) {
++ kfree_skb(skb);
++ return -ERANGE;
++ }
+
+ /* At ingress, the mac header has already been pulled once.
+ * At egress, skb_pospull_rcsum has to be done in case that
+@@ -2092,7 +2101,7 @@ static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
+ u32 flags)
+ {
+ /* Verify that a link layer header is carried */
+- if (unlikely(skb->mac_header >= skb->network_header)) {
++ if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) {
+ kfree_skb(skb);
+ return -ERANGE;
+ }
+@@ -2786,15 +2795,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
+
+ static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
+ {
++ void *old_data;
++
+ /* skb_ensure_writable() is not needed here, as we're
+ * already working on an uncloned skb.
+ */
+ if (unlikely(!pskb_may_pull(skb, off + len)))
+ return -ENOMEM;
+
+- skb_postpull_rcsum(skb, skb->data + off, len);
+- memmove(skb->data + len, skb->data, off);
++ old_data = skb->data;
+ __skb_pull(skb, len);
++ skb_postpull_rcsum(skb, old_data + off, len);
++ memmove(skb->data, old_data, off);
+
+ return 0;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e9c796e2944ef..0547aa2c8b131 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2115,6 +2115,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
+ insp = list;
+ } else {
+ /* Eaten partially. */
++ if (skb_is_gso(skb) && !list->head_frag &&
++ skb_headlen(list))
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+
+ if (skb_shared(list)) {
+ /* Sucks! We need to fork list. :-( */
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 2646e8f98f67d..5bce6d4d20573 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -279,11 +279,13 @@ static void sock_map_free(struct bpf_map *map)
+
+ sk = xchg(psk, NULL);
+ if (sk) {
++ sock_hold(sk);
+ lock_sock(sk);
+ rcu_read_lock();
+ sock_map_unref(sk, psk);
+ rcu_read_unlock();
+ release_sock(sk);
++ sock_put(sk);
+ }
+ }
+
+diff --git a/net/core/stream.c b/net/core/stream.c
+index a61130504827a..d7c5413d16d57 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -196,6 +196,12 @@ void sk_stream_kill_queues(struct sock *sk)
+ /* First the read buffer. */
+ __skb_queue_purge(&sk->sk_receive_queue);
+
++ /* Next, the error queue.
++ * We need to use queue lock, because other threads might
++ * add packets to the queue without socket lock being held.
++ */
++ skb_queue_purge(&sk->sk_error_queue);
++
+ /* Next, the write queue. */
+ WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
+
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 4a9200729a326..783e741491ec3 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -269,9 +269,12 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
+ node_real->addr_B_port = port_rcv->type;
+
+ spin_lock_bh(&hsr->list_lock);
+- list_del_rcu(&node_curr->mac_list);
++ if (!node_curr->removed) {
++ list_del_rcu(&node_curr->mac_list);
++ node_curr->removed = true;
++ kfree_rcu(node_curr, rcu_head);
++ }
+ spin_unlock_bh(&hsr->list_lock);
+- kfree_rcu(node_curr, rcu_head);
+
+ done:
+ skb_push(skb, sizeof(struct hsrv1_ethhdr_sp));
+@@ -436,9 +439,12 @@ void hsr_prune_nodes(struct timer_list *t)
+ if (time_is_before_jiffies(timestamp +
+ msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
+ hsr_nl_nodedown(hsr, node->macaddress_A);
+- list_del_rcu(&node->mac_list);
+- /* Note that we need to free this entry later: */
+- kfree_rcu(node, rcu_head);
++ if (!node->removed) {
++ list_del_rcu(&node->mac_list);
++ node->removed = true;
++ /* Note that we need to free this entry later: */
++ kfree_rcu(node, rcu_head);
++ }
+ }
+ }
+ spin_unlock_bh(&hsr->list_lock);
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index 0f0fa12b43293..01f4ef4ae494b 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -56,6 +56,7 @@ struct hsr_node {
+ unsigned long time_in[HSR_PT_PORTS];
+ bool time_in_stale[HSR_PT_PORTS];
+ u16 seq_out[HSR_PT_PORTS];
++ bool removed;
+ struct rcu_head rcu_head;
+ };
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 6cbf0db57ad06..9ef69c975b152 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -902,11 +902,25 @@ void inet_csk_prepare_forced_close(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_prepare_forced_close);
+
++static int inet_ulp_can_listen(const struct sock *sk)
++{
++ const struct inet_connection_sock *icsk = inet_csk(sk);
++
++ if (icsk->icsk_ulp_ops)
++ return -EINVAL;
++
++ return 0;
++}
++
+ int inet_csk_listen_start(struct sock *sk, int backlog)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+- int err = -EADDRINUSE;
++ int err;
++
++ err = inet_ulp_can_listen(sk);
++ if (unlikely(err))
++ return err;
+
+ reqsk_queue_alloc(&icsk->icsk_accept_queue);
+
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index f69dcd3c7797a..ca49d68a0e048 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -203,8 +203,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ tmp->sg.end = i;
+ if (apply) {
+ apply_bytes -= size;
+- if (!apply_bytes)
++ if (!apply_bytes) {
++ if (sge->length)
++ sk_msg_iter_var_prev(i);
+ break;
++ }
+ }
+ } while (i != msg->sg.end);
+
+@@ -312,7 +315,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ bool cork = false, enospc = sk_msg_full(msg);
+ struct sock *sk_redir;
+ u32 tosend, origsize, sent, delta = 0;
+- u32 eval = __SK_NONE;
++ u32 eval;
+ int ret;
+
+ more_data:
+@@ -343,6 +346,7 @@ more_data:
+ tosend = msg->sg.size;
+ if (psock->apply_bytes && psock->apply_bytes < tosend)
+ tosend = psock->apply_bytes;
++ eval = __SK_NONE;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
+index 150e6f0fdbf59..bbe4eca42d361 100644
+--- a/net/ipv4/udp_tunnel.c
++++ b/net/ipv4/udp_tunnel.c
+@@ -196,6 +196,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+ void udp_tunnel_sock_release(struct socket *sock)
+ {
+ rcu_assign_sk_user_data(sock->sk, NULL);
++ synchronize_rcu();
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sock_release(sock);
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 828dd95840b47..3d5be1c2fafda 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -539,6 +539,7 @@ csum_copy_err:
+ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct raw6_sock *rp)
+ {
++ struct ipv6_txoptions *opt;
+ struct sk_buff *skb;
+ int err = 0;
+ int offset;
+@@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+
+ offset = rp->offset;
+ total_len = inet_sk(sk)->cork.base.length;
++ opt = inet6_sk(sk)->cork.opt;
++ total_len -= opt ? opt->opt_flen : 0;
++
+ if (offset >= total_len - 1) {
+ err = -EINVAL;
+ ip6_flush_pending_frames(sk);
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
+index 6e3cf4d19ce88..194d775ce4cc1 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
++++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
+@@ -296,8 +296,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+- hosts = 2 << (32 - netmask - 1);
+- elements = 2 << (netmask - mask_bits - 1);
++ hosts = 2U << (32 - netmask - 1);
++ elements = 2UL << (netmask - mask_bits - 1);
+ }
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
+index 6f9144e1f1c13..ee45dbf1b0350 100644
+--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
++++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
+@@ -128,6 +128,56 @@ static void icmpv6_error_log(const struct sk_buff *skb,
+ IPPROTO_ICMPV6, "%s", msg);
+ }
+
++static noinline_for_stack int
++nf_conntrack_icmpv6_redirect(struct nf_conn *tmpl, struct sk_buff *skb,
++ unsigned int dataoff,
++ const struct nf_hook_state *state)
++{
++ u8 hl = ipv6_hdr(skb)->hop_limit;
++ union nf_inet_addr outer_daddr;
++ union {
++ struct nd_opt_hdr nd_opt;
++ struct rd_msg rd_msg;
++ } tmp;
++ const struct nd_opt_hdr *nd_opt;
++ const struct rd_msg *rd_msg;
++
++ rd_msg = skb_header_pointer(skb, dataoff, sizeof(*rd_msg), &tmp.rd_msg);
++ if (!rd_msg) {
++ icmpv6_error_log(skb, state, "short redirect");
++ return -NF_ACCEPT;
++ }
++
++ if (rd_msg->icmph.icmp6_code != 0)
++ return NF_ACCEPT;
++
++ if (hl != 255 || !(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ icmpv6_error_log(skb, state, "invalid saddr or hoplimit for redirect");
++ return -NF_ACCEPT;
++ }
++
++ dataoff += sizeof(*rd_msg);
++
++ /* warning: rd_msg no longer usable after this call */
++ nd_opt = skb_header_pointer(skb, dataoff, sizeof(*nd_opt), &tmp.nd_opt);
++ if (!nd_opt || nd_opt->nd_opt_len == 0) {
++ icmpv6_error_log(skb, state, "redirect without options");
++ return -NF_ACCEPT;
++ }
++
++ /* We could call ndisc_parse_options(), but it would need
++ * skb_linearize() and a bit more work.
++ */
++ if (nd_opt->nd_opt_type != ND_OPT_REDIRECT_HDR)
++ return NF_ACCEPT;
++
++ memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
++ sizeof(outer_daddr.ip6));
++ dataoff += 8;
++ return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
++ IPPROTO_ICMPV6, &outer_daddr);
++}
++
+ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+@@ -158,6 +208,9 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ return NF_ACCEPT;
+ }
+
++ if (icmp6h->icmp6_type == NDISC_REDIRECT)
++ return nf_conntrack_icmpv6_redirect(tmpl, skb, dataoff, state);
++
+ /* is not error message ? */
+ if (icmp6h->icmp6_type >= 128)
+ return NF_ACCEPT;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 9e94f732e717c..b53d5eb868647 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1505,6 +1505,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+ u32 dev_idx, se_idx;
+ u8 *apdu;
+ size_t apdu_len;
++ int rc;
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_SE_INDEX] ||
+@@ -1518,25 +1519,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+ if (!dev)
+ return -ENODEV;
+
+- if (!dev->ops || !dev->ops->se_io)
+- return -ENOTSUPP;
++ if (!dev->ops || !dev->ops->se_io) {
++ rc = -EOPNOTSUPP;
++ goto put_dev;
++ }
+
+ apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
+- if (apdu_len == 0)
+- return -EINVAL;
++ if (apdu_len == 0) {
++ rc = -EINVAL;
++ goto put_dev;
++ }
+
+ apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
+- if (!apdu)
+- return -EINVAL;
++ if (!apdu) {
++ rc = -EINVAL;
++ goto put_dev;
++ }
+
+ ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
+- if (!ctx)
+- return -ENOMEM;
++ if (!ctx) {
++ rc = -ENOMEM;
++ goto put_dev;
++ }
+
+ ctx->dev_idx = dev_idx;
+ ctx->se_idx = se_idx;
+
+- return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
++ rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
++
++put_dev:
++ nfc_put_device(dev);
++ return rc;
+ }
+
+ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+@@ -1559,14 +1572,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+ subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
+
+ dev = nfc_get_device(dev_idx);
+- if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
++ if (!dev)
+ return -ENODEV;
+
++ if (!dev->vendor_cmds || !dev->n_vendor_cmds) {
++ err = -ENODEV;
++ goto put_dev;
++ }
++
+ if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
+ data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
+ data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
+- if (data_len == 0)
+- return -EINVAL;
++ if (data_len == 0) {
++ err = -EINVAL;
++ goto put_dev;
++ }
+ } else {
+ data = NULL;
+ data_len = 0;
+@@ -1581,10 +1601,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
+ dev->cur_cmd_info = info;
+ err = cmd->doit(dev, data, data_len);
+ dev->cur_cmd_info = NULL;
+- return err;
++ goto put_dev;
+ }
+
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++
++put_dev:
++ nfc_put_device(dev);
++ return err;
+ }
+
+ /* message building helper */
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 5dc517d64965d..a8a8396dd9838 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -910,6 +910,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ struct sw_flow_mask mask;
+ struct sk_buff *reply;
+ struct datapath *dp;
++ struct sw_flow_key *key;
+ struct sw_flow_actions *acts;
+ struct sw_flow_match match;
+ u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
+@@ -937,24 +938,26 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ /* Extract key. */
+- ovs_match_init(&match, &new_flow->key, false, &mask);
++ key = kzalloc(sizeof(*key), GFP_KERNEL);
++ if (!key) {
++ error = -ENOMEM;
++ goto err_kfree_key;
++ }
++
++ ovs_match_init(&match, key, false, &mask);
+ error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+ a[OVS_FLOW_ATTR_MASK], log);
+ if (error)
+ goto err_kfree_flow;
+
++ ovs_flow_mask_key(&new_flow->key, key, true, &mask);
++
+ /* Extract flow identifier. */
+ error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+- &new_flow->key, log);
++ key, log);
+ if (error)
+ goto err_kfree_flow;
+
+- /* unmasked key is needed to match when ufid is not used. */
+- if (ovs_identifier_is_key(&new_flow->id))
+- match.key = new_flow->id.unmasked_key;
+-
+- ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+-
+ /* Validate actions. */
+ error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+ &new_flow->key, &acts, log);
+@@ -981,7 +984,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ if (ovs_identifier_is_ufid(&new_flow->id))
+ flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
+ if (!flow)
+- flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
++ flow = ovs_flow_tbl_lookup(&dp->table, key);
+ if (likely(!flow)) {
+ rcu_assign_pointer(new_flow->sf_acts, acts);
+
+@@ -1051,6 +1054,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+
+ if (reply)
+ ovs_notify(&dp_flow_genl_family, reply, info);
++
++ kfree(key);
+ return 0;
+
+ err_unlock_ovs:
+@@ -1060,6 +1065,8 @@ err_kfree_acts:
+ ovs_nla_free_flow_actions(acts);
+ err_kfree_flow:
+ ovs_flow_free(new_flow, false);
++err_kfree_key:
++ kfree(key);
+ error:
+ return error;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index d76edafb4dff8..450dc03347725 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1864,12 +1864,22 @@ oom:
+
+ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
+ {
++ int depth;
++
+ if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
+ sock->type == SOCK_RAW) {
+ skb_reset_mac_header(skb);
+ skb->protocol = dev_parse_header_protocol(skb);
+ }
+
++ /* Move network header to the right position for VLAN tagged packets */
++ if (likely(skb->dev->type == ARPHRD_ETHER) &&
++ eth_type_vlan(skb->protocol) &&
++ __vlan_get_protocol(skb, skb->protocol, &depth) != 0) {
++ if (pskb_may_pull(skb, depth))
++ skb_set_network_header(skb, depth);
++ }
++
+ skb_probe_transport_header(skb);
+ }
+
+@@ -2979,6 +2989,11 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ skb->mark = sockc.mark;
+ skb->tstamp = sockc.transmit_time;
+
++ if (unlikely(extra_len == 4))
++ skb->no_fcs = 1;
++
++ packet_parse_headers(skb, sock);
++
+ if (has_vnet_hdr) {
+ err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
+ if (err)
+@@ -2987,11 +3002,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ virtio_net_hdr_set_proto(skb, &vnet_hdr);
+ }
+
+- packet_parse_headers(skb, sock);
+-
+- if (unlikely(extra_len == 4))
+- skb->no_fcs = 1;
+-
+ err = po->xmit(skb);
+ if (unlikely(err != 0)) {
+ if (err > 0)
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 6202d2e32914a..09fcc54245c75 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -93,7 +93,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ *_hard_ack = hard_ack;
+ *_top = top;
+
+- pkt->ack.bufferSpace = htons(8);
++ pkt->ack.bufferSpace = htons(0);
+ pkt->ack.maxSkew = htons(0);
+ pkt->ack.firstPacket = htonl(hard_ack + 1);
+ pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 22f020099214d..1cb90d32d553d 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -718,7 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ if (call->tx_total_len != -1 ||
+ call->tx_pending ||
+ call->tx_top != 0)
+- goto error_put;
++ goto out_put_unlock;
+ call->tx_total_len = p.call.tx_total_len;
+ }
+ }
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index 0fccae356dc14..197915332b426 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -116,6 +116,11 @@ static int valid_label(const struct nlattr *attr,
+ {
+ const u32 *label = nla_data(attr);
+
++ if (nla_len(attr) != sizeof(*label)) {
++ NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
++ return -EINVAL;
++ }
++
+ if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
+ NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
+ return -EINVAL;
+@@ -128,7 +133,8 @@ static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
+ [TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 },
+ [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
+ [TCA_MPLS_PROTO] = { .type = NLA_U16 },
+- [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
++ [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
++ valid_label),
+ [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7),
+ [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 684187a1fdb91..768cf7cf65b48 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -332,7 +332,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ struct tcindex_filter_result *r, struct nlattr **tb,
+ struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
+ {
+- struct tcindex_filter_result new_filter_result, *old_r = r;
++ struct tcindex_filter_result new_filter_result;
+ struct tcindex_data *cp = NULL, *oldp;
+ struct tcindex_filter *f = NULL; /* make gcc behave */
+ struct tcf_result cr = {};
+@@ -401,7 +401,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ err = tcindex_filter_result_init(&new_filter_result, cp, net);
+ if (err < 0)
+ goto errout_alloc;
+- if (old_r)
++ if (r)
+ cr = r->res;
+
+ err = -EBUSY;
+@@ -478,14 +478,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ tcf_bind_filter(tp, &cr, base);
+ }
+
+- if (old_r && old_r != r) {
+- err = tcindex_filter_result_init(old_r, cp, net);
+- if (err < 0) {
+- kfree(f);
+- goto errout_alloc;
+- }
+- }
+-
+ oldp = p;
+ r->res = cr;
+ tcf_exts_change(&r->exts, &e);
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index dd3b8c11a2e0d..43bfb33629e90 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -255,6 +255,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ * the value carried.
+ */
+ if (em_hdr->flags & TCF_EM_SIMPLE) {
++ if (em->ops->datalen > 0)
++ goto errout;
+ if (data_len < sizeof(u32))
+ goto errout;
+ em->data = *(u32 *) data;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 6f36df85d23d8..154d62d8ac16c 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1104,6 +1104,11 @@ skip:
+ return -ENOENT;
+ }
+
++ if (new && new->ops == &noqueue_qdisc_ops) {
++ NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
++ return -EINVAL;
++ }
++
+ err = cops->graft(parent, cl, new, &old, extack);
+ if (err)
+ return err;
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index 6385995dc7005..34dd0434d99d5 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -396,10 +396,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ result = tcf_classify(skb, fl, &res, true);
+ if (result < 0)
+ continue;
++ if (result == TC_ACT_SHOT)
++ goto done;
++
+ flow = (struct atm_flow_data *)res.class;
+ if (!flow)
+ flow = lookup_flow(sch, res.classid);
+- goto done;
++ goto drop;
+ }
+ }
+ flow = NULL;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index e5972889cd81c..12893dac84616 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -231,6 +231,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ result = tcf_classify(skb, fl, &res, true);
+ if (!fl || result < 0)
+ goto fallback;
++ if (result == TC_ACT_SHOT)
++ return NULL;
+
+ cl = (void *)res.class;
+ if (!cl) {
+@@ -251,8 +253,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ case TC_ACT_TRAP:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ /* fall through */
+- case TC_ACT_SHOT:
+- return NULL;
++ fallthrough;
+ case TC_ACT_RECLASSIFY:
+ return cbq_reclassify(skb, cl);
+ }
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index b7a71578bd986..4d3cf146f50a5 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -301,7 +301,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
+ list_for_each_entry(pos, &pipe->in_downcall, list) {
+ if (!uid_eq(pos->uid, uid))
+ continue;
+- if (auth && pos->auth->service != auth->service)
++ if (pos->auth->service != auth->service)
+ continue;
+ refcount_inc(&pos->count);
+ return pos;
+@@ -683,6 +683,21 @@ out:
+ return err;
+ }
+
++static struct gss_upcall_msg *
++gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
++{
++ struct gss_upcall_msg *pos;
++ list_for_each_entry(pos, &pipe->in_downcall, list) {
++ if (!uid_eq(pos->uid, uid))
++ continue;
++ if (!rpc_msg_is_inflight(&pos->msg))
++ continue;
++ refcount_inc(&pos->count);
++ return pos;
++ }
++ return NULL;
++}
++
+ #define MSG_BUF_MAXSIZE 1024
+
+ static ssize_t
+@@ -729,7 +744,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ err = -ENOENT;
+ /* Find a matching upcall */
+ spin_lock(&pipe->lock);
+- gss_msg = __gss_find_upcall(pipe, uid, NULL);
++ gss_msg = gss_find_downcall(pipe, uid);
+ if (gss_msg == NULL) {
+ spin_unlock(&pipe->lock);
+ goto err_put_ctx;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index c0016473a255a..1a869a203d520 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1104,18 +1104,23 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
+ return res;
+
+ inlen = svc_getnl(argv);
+- if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
++ if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) {
++ kfree(in_handle->data);
+ return SVC_DENIED;
++ }
+
+ pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
+ in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+- if (!in_token->pages)
++ if (!in_token->pages) {
++ kfree(in_handle->data);
+ return SVC_DENIED;
++ }
+ in_token->page_base = 0;
+ in_token->page_len = inlen;
+ for (i = 0; i < pages; i++) {
+ in_token->pages[i] = alloc_page(GFP_KERNEL);
+ if (!in_token->pages[i]) {
++ kfree(in_handle->data);
+ gss_free_in_token_pages(in_token);
+ return SVC_DENIED;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 1893203cc94fc..012b0504264de 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1354,7 +1354,7 @@ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
+ break;
+ default:
+ err = -EAFNOSUPPORT;
+- goto out;
++ goto out_release;
+ }
+ if (err < 0) {
+ dprintk("RPC: can't bind UDP socket (%d)\n", err);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 0f4d39fdb48f2..e13115bbe7196 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1037,6 +1037,7 @@ out4:
+ kfree(req->rl_sendbuf);
+ out3:
+ kfree(req->rl_rdmabuf);
++ rpcrdma_regbuf_free(req->rl_sendbuf);
+ out2:
+ kfree(req);
+ out1:
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 90cf7e0bbaf0f..58ee5ee707816 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -112,6 +112,15 @@ static void __net_exit tipc_exit_net(struct net *net)
+ cond_resched();
+ }
+
++static void __net_exit tipc_pernet_pre_exit(struct net *net)
++{
++ tipc_node_pre_cleanup_net(net);
++}
++
++static struct pernet_operations tipc_pernet_pre_exit_ops = {
++ .pre_exit = tipc_pernet_pre_exit,
++};
++
+ static struct pernet_operations tipc_net_ops = {
+ .init = tipc_init_net,
+ .exit = tipc_exit_net,
+@@ -150,6 +159,10 @@ static int __init tipc_init(void)
+ if (err)
+ goto out_pernet_topsrv;
+
++ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
++ if (err)
++ goto out_register_pernet_subsys;
++
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+@@ -170,6 +183,8 @@ out_netlink_compat:
+ out_netlink:
+ tipc_bearer_cleanup();
+ out_bearer:
++ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
++out_register_pernet_subsys:
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+ out_pernet_topsrv:
+ tipc_socket_stop();
+@@ -187,6 +202,7 @@ static void __exit tipc_exit(void)
+ tipc_netlink_compat_stop();
+ tipc_netlink_stop();
+ tipc_bearer_cleanup();
++ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+ tipc_socket_stop();
+ unregister_pernet_device(&tipc_net_ops);
+diff --git a/net/tipc/core.h b/net/tipc/core.h
+index c6bda91f85810..59f97ef12e60d 100644
+--- a/net/tipc/core.h
++++ b/net/tipc/core.h
+@@ -59,6 +59,7 @@
+ #include <net/netns/generic.h>
+ #include <linux/rhashtable.h>
+ #include <net/genetlink.h>
++#include <net/netns/hash.h>
+
+ #ifdef pr_fmt
+ #undef pr_fmt
+@@ -202,6 +203,11 @@ static inline int in_range(u16 val, u16 min, u16 max)
+ return !less(val, min) && !more(val, max);
+ }
+
++static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
++{
++ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
++}
++
+ #ifdef CONFIG_SYSCTL
+ int tipc_register_sysctl(void);
+ void tipc_unregister_sysctl(void);
+diff --git a/net/tipc/discover.c b/net/tipc/discover.c
+index 0436c8f2967d4..9c64567f8a741 100644
+--- a/net/tipc/discover.c
++++ b/net/tipc/discover.c
+@@ -94,6 +94,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
+ msg_set_dest_domain(hdr, dest_domain);
+ msg_set_bc_netid(hdr, tn->net_id);
+ b->media->addr2msg(msg_media_addr(hdr), &b->addr);
++ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
+ msg_set_node_id(hdr, tipc_own_id(net));
+ }
+
+@@ -193,6 +194,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
+ {
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_msg *hdr = buf_msg(skb);
++ u32 pnet_hash = msg_peer_net_hash(hdr);
+ u16 caps = msg_node_capabilities(hdr);
+ bool legacy = tn->legacy_addr_format;
+ u32 sugg = msg_sugg_node_addr(hdr);
+@@ -244,7 +246,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
+ return;
+ if (!tipc_in_scope(legacy, b->domain, src))
+ return;
+- tipc_node_check_dest(net, src, peer_id, b, caps, signature,
++ tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
+ &maddr, &respond, &dupl_addr);
+ if (dupl_addr)
+ disc_dupl_alert(b, src, &maddr);
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index 0daa6f04ca812..6d692546acdc3 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -358,6 +358,11 @@ static inline u32 msg_connected(struct tipc_msg *m)
+ return msg_type(m) == TIPC_CONN_MSG;
+ }
+
++static inline u32 msg_direct(struct tipc_msg *m)
++{
++ return msg_type(m) == TIPC_DIRECT_MSG;
++}
++
+ static inline u32 msg_errcode(struct tipc_msg *m)
+ {
+ return msg_bits(m, 1, 25, 0xf);
+@@ -1026,6 +1031,20 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
+ return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
+ }
+
++/* Word 13
++ */
++static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
++{
++ msg_set_word(m, 13, n);
++}
++
++static inline u32 msg_peer_net_hash(struct tipc_msg *m)
++{
++ return msg_word(m, 13);
++}
++
++/* Word 14
++ */
+ static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
+ {
+ return msg_word(m, 14);
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index 661bc2551a0a2..6ac84e7c8b63a 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -146,7 +146,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
+ struct publication *publ;
+ struct sk_buff *skb = NULL;
+ struct distr_item *item = NULL;
+- u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
++ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index c8f6177dd5a2f..535a4a778640a 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -126,6 +126,8 @@ struct tipc_node {
+ struct timer_list timer;
+ struct rcu_head rcu;
+ unsigned long delete_at;
++ struct net *peer_net;
++ u32 peer_hash_mix;
+ };
+
+ /* Node FSM states and events:
+@@ -184,7 +186,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
+ return n->links[bearer_id].link;
+ }
+
+-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
++int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
+ {
+ struct tipc_node *n;
+ int bearer_id;
+@@ -194,6 +196,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+ if (unlikely(!n))
+ return mtu;
+
++ /* Allow MAX_MSG_SIZE when building connection oriented message
++ * if they are in the same core network
++ */
++ if (n->peer_net && connected) {
++ tipc_node_put(n);
++ return mtu;
++ }
++
+ bearer_id = n->active_links[sel & 1];
+ if (likely(bearer_id != INVALID_BEARER_ID))
+ mtu = n->links[bearer_id].mtu;
+@@ -360,8 +370,37 @@ static void tipc_node_write_unlock(struct tipc_node *n)
+ }
+ }
+
++static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
++{
++ int net_id = tipc_netid(n->net);
++ struct tipc_net *tn_peer;
++ struct net *tmp;
++ u32 hash_chk;
++
++ if (n->peer_net)
++ return;
++
++ for_each_net_rcu(tmp) {
++ tn_peer = tipc_net(tmp);
++ if (!tn_peer)
++ continue;
++ /* Integrity checking whether node exists in namespace or not */
++ if (tn_peer->net_id != net_id)
++ continue;
++ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
++ continue;
++ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
++ if (hash_mixes ^ hash_chk)
++ continue;
++ n->peer_net = tmp;
++ n->peer_hash_mix = hash_mixes;
++ break;
++ }
++}
++
+ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+- u8 *peer_id, u16 capabilities)
++ u8 *peer_id, u16 capabilities,
++ u32 signature, u32 hash_mixes)
+ {
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_node *n, *temp_node;
+@@ -372,6 +411,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ spin_lock_bh(&tn->node_list_lock);
+ n = tipc_node_find(net, addr);
+ if (n) {
++ if (n->peer_hash_mix ^ hash_mixes)
++ tipc_node_assign_peer_net(n, hash_mixes);
+ if (n->capabilities == capabilities)
+ goto exit;
+ /* Same node may come back with new capabilities */
+@@ -389,6 +430,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
++
+ goto exit;
+ }
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+@@ -399,6 +441,10 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ n->addr = addr;
+ memcpy(&n->peer_id, peer_id, 16);
+ n->net = net;
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ /* Assign kernel local namespace if exists */
++ tipc_node_assign_peer_net(n, hash_mixes);
+ n->capabilities = capabilities;
+ kref_init(&n->kref);
+ rwlock_init(&n->lock);
+@@ -979,7 +1025,7 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
+
+ void tipc_node_check_dest(struct net *net, u32 addr,
+ u8 *peer_id, struct tipc_bearer *b,
+- u16 capabilities, u32 signature,
++ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr)
+ {
+@@ -989,8 +1035,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ bool addr_match = false;
+ bool sign_match = false;
+ bool link_up = false;
++ bool link_is_reset = false;
+ bool accept_addr = false;
+- bool reset = true;
++ bool reset = false;
+ char *if_name;
+ unsigned long intv;
+ u16 session;
+@@ -998,7 +1045,8 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ *dupl_addr = false;
+ *respond = false;
+
+- n = tipc_node_create(net, addr, peer_id, capabilities);
++ n = tipc_node_create(net, addr, peer_id, capabilities, signature,
++ hash_mixes);
+ if (!n)
+ return;
+
+@@ -1009,14 +1057,17 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ /* Prepare to validate requesting node's signature and media address */
+ l = le->link;
+ link_up = l && tipc_link_is_up(l);
++ link_is_reset = l && tipc_link_is_reset(l);
+ addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+ sign_match = (signature == n->signature);
+
+ /* These three flags give us eight permutations: */
+
+ if (sign_match && addr_match && link_up) {
+- /* All is fine. Do nothing. */
+- reset = false;
++ /* All is fine. Ignore requests. */
++ /* Peer node is not a container/local namespace */
++ if (!n->peer_hash_mix)
++ n->peer_hash_mix = hash_mixes;
+ } else if (sign_match && addr_match && !link_up) {
+ /* Respond. The link will come up in due time */
+ *respond = true;
+@@ -1038,6 +1089,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ */
+ accept_addr = true;
+ *respond = true;
++ reset = true;
+ } else if (!sign_match && addr_match && link_up) {
+ /* Peer node rebooted. Two possibilities:
+ * - Delayed re-discovery; this link endpoint has already
+@@ -1069,6 +1121,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ n->signature = signature;
+ accept_addr = true;
+ *respond = true;
++ reset = true;
+ }
+
+ if (!accept_addr)
+@@ -1097,6 +1150,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ tipc_link_fsm_evt(l, LINK_RESET_EVT);
+ if (n->state == NODE_FAILINGOVER)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
++ link_is_reset = tipc_link_is_reset(l);
+ le->link = l;
+ n->link_cnt++;
+ tipc_node_calculate_timer(n, l);
+@@ -1109,7 +1163,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
+ memcpy(&le->maddr, maddr, sizeof(*maddr));
+ exit:
+ tipc_node_write_unlock(n);
+- if (reset && l && !tipc_link_is_reset(l))
++ if (reset && !link_is_reset)
+ tipc_node_link_down(n, b->identity, false);
+ tipc_node_put(n);
+ }
+@@ -1342,7 +1396,8 @@ static void node_lost_contact(struct tipc_node *n,
+
+ /* Notify publications from this node */
+ n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+-
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
+ /* Notify sockets connected to node */
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+@@ -1424,6 +1479,57 @@ msg_full:
+ return -EMSGSIZE;
+ }
+
++static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
++{
++ struct tipc_msg *hdr = buf_msg(skb_peek(list));
++ struct sk_buff_head inputq;
++
++ switch (msg_user(hdr)) {
++ case TIPC_LOW_IMPORTANCE:
++ case TIPC_MEDIUM_IMPORTANCE:
++ case TIPC_HIGH_IMPORTANCE:
++ case TIPC_CRITICAL_IMPORTANCE:
++ if (msg_connected(hdr) || msg_named(hdr) ||
++ msg_direct(hdr)) {
++ tipc_loopback_trace(peer_net, list);
++ spin_lock_init(&list->lock);
++ tipc_sk_rcv(peer_net, list);
++ return;
++ }
++ if (msg_mcast(hdr)) {
++ tipc_loopback_trace(peer_net, list);
++ skb_queue_head_init(&inputq);
++ tipc_sk_mcast_rcv(peer_net, list, &inputq);
++ __skb_queue_purge(list);
++ skb_queue_purge(&inputq);
++ return;
++ }
++ return;
++ case MSG_FRAGMENTER:
++ if (tipc_msg_assemble(list)) {
++ tipc_loopback_trace(peer_net, list);
++ skb_queue_head_init(&inputq);
++ tipc_sk_mcast_rcv(peer_net, list, &inputq);
++ __skb_queue_purge(list);
++ skb_queue_purge(&inputq);
++ }
++ return;
++ case GROUP_PROTOCOL:
++ case CONN_MANAGER:
++ tipc_loopback_trace(peer_net, list);
++ spin_lock_init(&list->lock);
++ tipc_sk_rcv(peer_net, list);
++ return;
++ case LINK_PROTOCOL:
++ case NAME_DISTRIBUTOR:
++ case TUNNEL_PROTOCOL:
++ case BCAST_PROTOCOL:
++ return;
++ default:
++ return;
++ };
++}
++
+ /**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+@@ -1439,6 +1545,8 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ struct tipc_link_entry *le = NULL;
+ struct tipc_node *n;
+ struct sk_buff_head xmitq;
++ bool node_up = false;
++ struct net *peer_net;
+ int bearer_id;
+ int rc;
+
+@@ -1455,6 +1563,22 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+ return -EHOSTUNREACH;
+ }
+
++ rcu_read_lock();
++ tipc_node_read_lock(n);
++ node_up = node_is_up(n);
++ peer_net = n->peer_net;
++ tipc_node_read_unlock(n);
++ if (node_up && peer_net && check_net(peer_net)) {
++ /* xmit inner linux container */
++ tipc_lxc_xmit(peer_net, list);
++ if (likely(skb_queue_empty(list))) {
++ rcu_read_unlock();
++ tipc_node_put(n);
++ return 0;
++ }
++ }
++ rcu_read_unlock();
++
+ tipc_node_read_lock(n);
+ bearer_id = n->active_links[selector & 1];
+ if (unlikely(bearer_id == INVALID_BEARER_ID)) {
+@@ -2591,3 +2715,33 @@ int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
+
+ return i;
+ }
++
++void tipc_node_pre_cleanup_net(struct net *exit_net)
++{
++ struct tipc_node *n;
++ struct tipc_net *tn;
++ struct net *tmp;
++
++ rcu_read_lock();
++ for_each_net_rcu(tmp) {
++ if (tmp == exit_net)
++ continue;
++ tn = tipc_net(tmp);
++ if (!tn)
++ continue;
++ spin_lock_bh(&tn->node_list_lock);
++ list_for_each_entry_rcu(n, &tn->node_list, list) {
++ if (!n->peer_net)
++ continue;
++ if (n->peer_net != exit_net)
++ continue;
++ tipc_node_write_lock(n);
++ n->peer_net = NULL;
++ n->peer_hash_mix = 0;
++ tipc_node_write_unlock_fast(n);
++ break;
++ }
++ spin_unlock_bh(&tn->node_list_lock);
++ }
++ rcu_read_unlock();
++}
+diff --git a/net/tipc/node.h b/net/tipc/node.h
+index 291d0ecd41013..30563c4f35d5c 100644
+--- a/net/tipc/node.h
++++ b/net/tipc/node.h
+@@ -75,7 +75,7 @@ u32 tipc_node_get_addr(struct tipc_node *node);
+ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
+ void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
+ struct tipc_bearer *bearer,
+- u16 capabilities, u32 signature,
++ u16 capabilities, u32 signature, u32 hash_mixes,
+ struct tipc_media_addr *maddr,
+ bool *respond, bool *dupl_addr);
+ void tipc_node_delete_links(struct net *net, int bearer_id);
+@@ -92,7 +92,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
+ void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
+ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
+ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
+-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
++int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
+ bool tipc_node_is_up(struct net *net, u32 addr);
+ u16 tipc_node_get_capabilities(struct net *net, u32 addr);
+ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
+@@ -107,4 +107,5 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
+ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
+ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
+ struct netlink_callback *cb);
++void tipc_node_pre_cleanup_net(struct net *exit_net);
+ #endif
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 58c4d61d603f6..cf4f90de566ed 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -866,7 +866,7 @@ static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
++ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+@@ -1407,7 +1407,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
+ }
+
+ __skb_queue_head_init(&pkts);
+- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
++ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+@@ -1547,7 +1547,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+ tipc_set_sk_state(sk, TIPC_ESTABLISHED);
+ tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
+- tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
++ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
+ tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ __skb_queue_purge(&sk->sk_write_queue);
+ if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
+diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
+index aaabcd84268a9..85488e19dffc9 100644
+--- a/net/vmw_vsock/vmci_transport.c
++++ b/net/vmw_vsock/vmci_transport.c
+@@ -1725,7 +1725,11 @@ static int vmci_transport_dgram_enqueue(
+ if (!dg)
+ return -ENOMEM;
+
+- memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++ err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
++ if (err) {
++ kfree(dg);
++ return err;
++ }
+
+ dg->dst = vmci_make_handle(remote_addr->svm_cid,
+ remote_addr->svm_port);
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 4db397db2fb45..1f5ea82b58bf7 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -3970,8 +3970,10 @@ static int __init regulatory_init_db(void)
+ return -EINVAL;
+
+ err = load_builtin_regdb_keys();
+- if (err)
++ if (err) {
++ platform_device_unregister(reg_pdev);
+ return err;
++ }
+
+ /* We always try to get an update for the static regdomain */
+ err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
+diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
+index a760e130bd0d6..8ad1aa13ddd90 100644
+--- a/samples/vfio-mdev/mdpy-fb.c
++++ b/samples/vfio-mdev/mdpy-fb.c
+@@ -109,7 +109,7 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
+
+ ret = pci_request_regions(pdev, "mdpy-fb");
+ if (ret < 0)
+- return ret;
++ goto err_disable_dev;
+
+ pci_read_config_dword(pdev, MDPY_FORMAT_OFFSET, &format);
+ pci_read_config_dword(pdev, MDPY_WIDTH_OFFSET, &width);
+@@ -191,6 +191,9 @@ err_release_fb:
+ err_release_regions:
+ pci_release_regions(pdev);
+
++err_disable_dev:
++ pci_disable_device(pdev);
++
+ return ret;
+ }
+
+@@ -199,7 +202,10 @@ static void mdpy_fb_remove(struct pci_dev *pdev)
+ struct fb_info *info = pci_get_drvdata(pdev);
+
+ unregister_framebuffer(info);
++ iounmap(info->screen_base);
+ framebuffer_release(info);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
+ }
+
+ static struct pci_device_id mdpy_fb_pci_table[] = {
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 84daab8ae0621..62736465ac82a 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -869,8 +869,10 @@ static struct multi_transaction *multi_transaction_new(struct file *file,
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&t->count);
+- if (copy_from_user(t->data, buf, size))
++ if (copy_from_user(t->data, buf, size)) {
++ put_multi_transaction(t);
+ return ERR_PTR(-EFAULT);
++ }
+
+ return t;
+ }
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index e31965dc6dd1f..21e03380dd86d 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -1148,10 +1148,10 @@ static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ #endif
+
+ /*
+- * The cred blob is a pointer to, not an instance of, an aa_task_ctx.
++ * The cred blob is a pointer to, not an instance of, an aa_label.
+ */
+ struct lsm_blob_sizes apparmor_blob_sizes __lsm_ro_after_init = {
+- .lbs_cred = sizeof(struct aa_task_ctx *),
++ .lbs_cred = sizeof(struct aa_label *),
+ .lbs_file = sizeof(struct aa_file_ctx),
+ .lbs_task = sizeof(struct aa_task_ctx),
+ };
+diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
+index 06355717ee84c..e38ceba39200b 100644
+--- a/security/apparmor/policy.c
++++ b/security/apparmor/policy.c
+@@ -1123,7 +1123,7 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
+
+ if (!name) {
+ /* remove namespace - can only happen if fqname[0] == ':' */
+- mutex_lock_nested(&ns->parent->lock, ns->level);
++ mutex_lock_nested(&ns->parent->lock, ns->parent->level);
+ __aa_bump_ns_revision(ns);
+ __aa_remove_ns(ns);
+ mutex_unlock(&ns->parent->lock);
+diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
+index 8cfc9493eefc7..7e32c09249b18 100644
+--- a/security/apparmor/policy_unpack.c
++++ b/security/apparmor/policy_unpack.c
+@@ -955,7 +955,7 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
+ * if not specified use previous version
+ * Mask off everything that is not kernel abi version
+ */
+- if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
++ if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v8)) {
+ audit_iface(NULL, NULL, NULL, "unsupported interface version",
+ e, error);
+ return error;
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index 5d7bb91c64876..9bc8f4bed754f 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -79,6 +79,17 @@ free_and_exit:
+ return -ENOMEM;
+ }
+
++static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
++{
++ struct dev_exception_item *ex, *tmp;
++
++ lockdep_assert_held(&devcgroup_mutex);
++
++ list_for_each_entry_safe(ex, tmp, orig, list) {
++ list_move_tail(&ex->list, dest);
++ }
++}
++
+ /*
+ * called under devcgroup_mutex
+ */
+@@ -601,11 +612,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+ int count, rc = 0;
+ struct dev_exception_item ex;
+ struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
++ struct dev_cgroup tmp_devcgrp;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ memset(&ex, 0, sizeof(ex));
++ memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
+ b = buffer;
+
+ switch (*b) {
+@@ -617,15 +630,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
+
+ if (!may_allow_all(parent))
+ return -EPERM;
+- dev_exception_clean(devcgroup);
+- devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+- if (!parent)
++ if (!parent) {
++ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
++ dev_exception_clean(devcgroup);
+ break;
++ }
+
++ INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
++ rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
++ &devcgroup->exceptions);
++ if (rc)
++ return rc;
++ dev_exception_clean(devcgroup);
+ rc = dev_exceptions_copy(&devcgroup->exceptions,
+ &parent->exceptions);
+- if (rc)
++ if (rc) {
++ dev_exceptions_move(&devcgroup->exceptions,
++ &tmp_devcgrp.exceptions);
+ return rc;
++ }
++ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
++ dev_exception_clean(&tmp_devcgrp);
+ break;
+ case DEVCG_DENY:
+ if (css_has_online_children(&devcgroup->css))
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
+index ea1aae3d07b3c..12bae47142113 100644
+--- a/security/integrity/digsig.c
++++ b/security/integrity/digsig.c
+@@ -121,6 +121,7 @@ int __init integrity_init_keyring(const unsigned int id)
+ {
+ struct key_restriction *restriction;
+ key_perm_t perm;
++ int ret;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW
+ | KEY_USR_READ | KEY_USR_SEARCH;
+@@ -141,7 +142,10 @@ int __init integrity_init_keyring(const unsigned int id)
+ perm |= KEY_USR_WRITE;
+
+ out:
+- return __integrity_init_keyring(id, perm, restriction);
++ ret = __integrity_init_keyring(id, perm, restriction);
++ if (ret)
++ kfree(restriction);
++ return ret;
+ }
+
+ int __init integrity_add_key(const unsigned int id, const void *data,
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 5fae6cfe8d910..146154e333e6a 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -361,24 +361,24 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ /* LSM based policy rules require audit */
+ #ifdef CONFIG_IMA_LSM_RULES
+
+-#define security_filter_rule_init security_audit_rule_init
+-#define security_filter_rule_free security_audit_rule_free
+-#define security_filter_rule_match security_audit_rule_match
++#define ima_filter_rule_init security_audit_rule_init
++#define ima_filter_rule_free security_audit_rule_free
++#define ima_filter_rule_match security_audit_rule_match
+
+ #else
+
+-static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
+- void **lsmrule)
++static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
++ void **lsmrule)
+ {
+ return -EINVAL;
+ }
+
+-static inline void security_filter_rule_free(void *lsmrule)
++static inline void ima_filter_rule_free(void *lsmrule)
+ {
+ }
+
+-static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
+- void *lsmrule)
++static inline int ima_filter_rule_match(u32 secid, u32 field, u32 op,
++ void *lsmrule)
+ {
+ return -EINVAL;
+ }
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index a768f37a0a4de..ce9d594ddbcd3 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -615,6 +615,7 @@ int ima_load_data(enum kernel_load_data_id id)
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
++ break;
+ default:
+ break;
+ }
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 14aef74d3588a..6df0436462ab7 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+- security_filter_rule_free(entry->lsm[i].rule);
++ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+ kfree(entry);
+@@ -286,10 +286,9 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+ if (!nentry->lsm[i].args_p)
+ goto out_err;
+
+- security_filter_rule_init(nentry->lsm[i].type,
+- Audit_equal,
+- nentry->lsm[i].args_p,
+- &nentry->lsm[i].rule);
++ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
++ nentry->lsm[i].args_p,
++ &nentry->lsm[i].rule);
+ if (!nentry->lsm[i].rule)
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ (char *)entry->lsm[i].args_p);
+@@ -371,6 +370,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
+ enum ima_hooks func, int mask)
+ {
+ int i;
++ bool result = false;
++ struct ima_rule_entry *lsm_rule = rule;
++ bool rule_reinitialized = false;
+
+ if (func == KEXEC_CMDLINE) {
+ if ((rule->flags & IMA_FUNC) && (rule->func == func))
+@@ -414,36 +416,55 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
+ int rc = 0;
+ u32 osid;
+
+- if (!rule->lsm[i].rule) {
+- if (!rule->lsm[i].args_p)
++ if (!lsm_rule->lsm[i].rule) {
++ if (!lsm_rule->lsm[i].args_p)
+ continue;
+ else
+ return false;
+ }
++
++retry:
+ switch (i) {
+ case LSM_OBJ_USER:
+ case LSM_OBJ_ROLE:
+ case LSM_OBJ_TYPE:
+ security_inode_getsecid(inode, &osid);
+- rc = security_filter_rule_match(osid,
+- rule->lsm[i].type,
+- Audit_equal,
+- rule->lsm[i].rule);
++ rc = ima_filter_rule_match(osid, lsm_rule->lsm[i].type,
++ Audit_equal,
++ lsm_rule->lsm[i].rule);
+ break;
+ case LSM_SUBJ_USER:
+ case LSM_SUBJ_ROLE:
+ case LSM_SUBJ_TYPE:
+- rc = security_filter_rule_match(secid,
+- rule->lsm[i].type,
+- Audit_equal,
+- rule->lsm[i].rule);
++ rc = ima_filter_rule_match(secid, lsm_rule->lsm[i].type,
++ Audit_equal,
++ lsm_rule->lsm[i].rule);
++ break;
+ default:
+ break;
+ }
+- if (!rc)
+- return false;
++
++ if (rc == -ESTALE && !rule_reinitialized) {
++ lsm_rule = ima_lsm_copy_rule(rule);
++ if (lsm_rule) {
++ rule_reinitialized = true;
++ goto retry;
++ }
++ }
++ if (!rc) {
++ result = false;
++ goto out;
++ }
++ }
++ result = true;
++
++out:
++ if (rule_reinitialized) {
++ for (i = 0; i < MAX_LSM_RULES; i++)
++ ima_filter_rule_free(lsm_rule->lsm[i].rule);
++ kfree(lsm_rule);
+ }
+- return true;
++ return result;
+ }
+
+ /*
+@@ -669,6 +690,7 @@ void __init ima_init_policy(void)
+ add_rules(default_measurement_rules,
+ ARRAY_SIZE(default_measurement_rules),
+ IMA_DEFAULT_POLICY);
++ break;
+ default:
+ break;
+ }
+@@ -821,10 +843,9 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ return -ENOMEM;
+
+ entry->lsm[lsm_rule].type = audit_type;
+- result = security_filter_rule_init(entry->lsm[lsm_rule].type,
+- Audit_equal,
+- entry->lsm[lsm_rule].args_p,
+- &entry->lsm[lsm_rule].rule);
++ result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
++ entry->lsm[lsm_rule].args_p,
++ &entry->lsm[lsm_rule].rule);
+ if (!entry->lsm[lsm_rule].rule) {
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ (char *)entry->lsm[lsm_rule].args_p);
+diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
+index 2283051d063bc..7b8be19dbb890 100644
+--- a/security/integrity/ima/ima_template.c
++++ b/security/integrity/ima/ima_template.c
+@@ -222,11 +222,11 @@ int template_desc_init_fields(const char *template_fmt,
+ }
+
+ if (fields && num_fields) {
+- *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
++ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+- memcpy(*fields, found_fields, i * sizeof(*fields));
++ memcpy(*fields, found_fields, i * sizeof(**fields));
+ *num_fields = i;
+ }
+
+@@ -292,8 +292,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name)
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+- if (!template_desc->fmt)
++ if (!template_desc->fmt) {
++ kfree(template_desc);
++ template_desc = NULL;
+ goto out;
++ }
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
+index 2b341aa079644..0fabdea04514f 100644
+--- a/security/integrity/platform_certs/load_uefi.c
++++ b/security/integrity/platform_certs/load_uefi.c
+@@ -34,6 +34,7 @@ static const struct dmi_system_id uefi_skip_cert[] = {
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
+ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
++ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") },
+ { }
+ };
+
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index cca3ed9b06294..ccc7f3d78f626 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -306,7 +306,9 @@ static int ctl_elem_read_user(struct snd_card *card,
+ err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
+ if (err < 0)
+ goto error;
++ down_read(&card->controls_rwsem);
+ err = snd_ctl_elem_read(card, data);
++ up_read(&card->controls_rwsem);
+ if (err < 0)
+ goto error;
+ err = copy_ctl_value_to_user(userdata, valuep, data, type, count);
+@@ -334,7 +336,9 @@ static int ctl_elem_write_user(struct snd_ctl_file *file,
+ err = snd_power_wait(card, SNDRV_CTL_POWER_D0);
+ if (err < 0)
+ goto error;
++ down_write(&card->controls_rwsem);
+ err = snd_ctl_elem_write(card, file, data);
++ up_write(&card->controls_rwsem);
+ if (err < 0)
+ goto error;
+ err = copy_ctl_value_to_user(userdata, valuep, data, type, count);
+diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
+index 44776e1463cbd..71d0ab1c99b35 100644
+--- a/sound/drivers/mts64.c
++++ b/sound/drivers/mts64.c
+@@ -816,6 +816,9 @@ static void snd_mts64_interrupt(void *private)
+ u8 status, data;
+ struct snd_rawmidi_substream *substream;
+
++ if (!mts)
++ return;
++
+ spin_lock(&mts->lock);
+ ret = mts64_read(mts->pardev->port);
+ data = ret & 0x00ff;
+diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
+index 04f4070fbf366..17b34bb9fecdf 100644
+--- a/sound/hda/ext/hdac_ext_stream.c
++++ b/sound/hda/ext/hdac_ext_stream.c
+@@ -475,23 +475,6 @@ int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
+
+-
+-/**
+- * snd_hdac_ext_stop_streams - stop all stream if running
+- * @bus: HD-audio core bus
+- */
+-void snd_hdac_ext_stop_streams(struct hdac_bus *bus)
+-{
+- struct hdac_stream *stream;
+-
+- if (bus->chip_init) {
+- list_for_each_entry(stream, &bus->stream_list, list)
+- snd_hdac_stream_stop(stream);
+- snd_hdac_bus_stop_chip(bus);
+- }
+-}
+-EXPORT_SYMBOL_GPL(snd_hdac_ext_stop_streams);
+-
+ /**
+ * snd_hdac_ext_stream_drsm_enable - enable DMA resume for a stream
+ * @bus: HD-audio core bus
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index b299b8b7f871a..2beb94828729d 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -142,6 +142,33 @@ void snd_hdac_stream_stop(struct hdac_stream *azx_dev)
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_stream_stop);
+
++/**
++ * snd_hdac_stop_streams - stop all streams
++ * @bus: HD-audio core bus
++ */
++void snd_hdac_stop_streams(struct hdac_bus *bus)
++{
++ struct hdac_stream *stream;
++
++ list_for_each_entry(stream, &bus->stream_list, list)
++ snd_hdac_stream_stop(stream);
++}
++EXPORT_SYMBOL_GPL(snd_hdac_stop_streams);
++
++/**
++ * snd_hdac_stop_streams_and_chip - stop all streams and chip if running
++ * @bus: HD-audio core bus
++ */
++void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus)
++{
++
++ if (bus->chip_init) {
++ snd_hdac_stop_streams(bus);
++ snd_hdac_bus_stop_chip(bus);
++ }
++}
++EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip);
++
+ /**
+ * snd_hdac_stream_reset - reset a stream
+ * @azx_dev: HD-audio core stream to reset
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
+index 9790f5108a166..5cab049413fc9 100644
+--- a/sound/pci/asihpi/hpioctl.c
++++ b/sound/pci/asihpi/hpioctl.c
+@@ -352,7 +352,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ pci_dev->device, pci_dev->subsystem_vendor,
+ pci_dev->subsystem_device, pci_dev->devfn);
+
+- if (pci_enable_device(pci_dev) < 0) {
++ if (pcim_enable_device(pci_dev) < 0) {
+ dev_err(&pci_dev->dev,
+ "pci_enable_device failed, disabling device\n");
+ return -EIO;
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 6a159c6c2f546..6dff68691dfff 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1093,10 +1093,8 @@ EXPORT_SYMBOL_GPL(azx_init_chip);
+ void azx_stop_all_streams(struct azx *chip)
+ {
+ struct hdac_bus *bus = azx_bus(chip);
+- struct hdac_stream *s;
+
+- list_for_each_entry(s, &bus->stream_list, list)
+- snd_hdac_stream_stop(s);
++ snd_hdac_stop_streams(bus);
+ }
+ EXPORT_SYMBOL_GPL(azx_stop_all_streams);
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 091a7fe854510..54c67d8b7b493 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1820,6 +1820,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ static const struct snd_pci_quirk force_connect_list[] = {
+ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
+ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
++ SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
++ SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
+ SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+ {}
+ };
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index fa31a71595788..ee4f70cea76ef 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9777,6 +9777,17 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ }
+ }
+
++static void alc897_fixup_lenovo_headset_mode(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
++ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
++ }
++}
++
+ static const struct coef_fw alc668_coefs[] = {
+ WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
+ WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
+@@ -9860,6 +9871,8 @@ enum {
+ ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ ALC897_FIXUP_HEADSET_MIC_PIN,
+ ALC897_FIXUP_HP_HSMIC_VERB,
++ ALC897_FIXUP_LENOVO_HEADSET_MODE,
++ ALC897_FIXUP_HEADSET_MIC_PIN2,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -10286,6 +10299,19 @@ static const struct hda_fixup alc662_fixups[] = {
+ { }
+ },
+ },
++ [ALC897_FIXUP_LENOVO_HEADSET_MODE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc897_fixup_lenovo_headset_mode,
++ },
++ [ALC897_FIXUP_HEADSET_MIC_PIN2] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -10338,6 +10364,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
++ SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 4cbef9affffda..feb590a205447 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1598,7 +1598,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-in\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ pcm512x->pll_in = val;
+ }
+@@ -1607,7 +1607,7 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ if (val > 6) {
+ dev_err(dev, "Invalid pll-out\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ pcm512x->pll_out = val;
+ }
+@@ -1616,12 +1616,12 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
+ dev_err(dev,
+ "Error: both pll-in and pll-out, or none\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) {
+ dev_err(dev, "Error: pll-in == pll-out\n");
+ ret = -EINVAL;
+- goto err_clk;
++ goto err_pm;
+ }
+ }
+ #endif
+diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
+index f8c0f977206c1..cc7eb34a641d9 100644
+--- a/sound/soc/codecs/rt298.c
++++ b/sound/soc/codecs/rt298.c
+@@ -1166,6 +1166,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Geminilake")
+ }
+ },
++ {
++ .ident = "Intel Kabylake R RVP",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
++ }
++ },
+ { }
+ };
+
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index f211817341709..fefdd8cbd8f53 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -3185,8 +3185,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
+ if (ret < 0)
+ goto err;
+
+- pm_runtime_put(&i2c->dev);
+-
+ return 0;
+ err:
+ pm_runtime_disable(&i2c->dev);
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 6dbab3fc6537e..4ae7ec8d71cdc 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3711,7 +3711,12 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
+ } else {
+ dev_dbg(component->dev, "Jack not detected\n");
+
++ /* Release wm8994->accdet_lock to avoid deadlock:
++ * cancel_delayed_work_sync() takes wm8994->mic_work internal
++ * lock and wm1811_mic_work takes wm8994->accdet_lock */
++ mutex_unlock(&wm8994->accdet_lock);
+ cancel_delayed_work_sync(&wm8994->mic_work);
++ mutex_lock(&wm8994->accdet_lock);
+
+ snd_soc_component_update_bits(component, WM8958_MICBIAS2,
+ WM8958_MICB2_DISCH, WM8958_MICB2_DISCH);
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index 96aa2c015572f..a96a7cd8af6ea 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -466,8 +466,10 @@ static int graph_for_each_link(struct asoc_simple_priv *priv,
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(cpu_ep);
+ return ret;
++ }
+
+ codec_port_old = codec_port;
+ }
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 7830d014d9247..6a8edb0a559de 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -428,6 +428,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ {
++ /* Advantech MICA-071 */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"),
++ },
++ /* OVCD Th = 1500uA to reliable detect head-phones vs -set */
++ .driver_data = (void *)(BYT_RT5640_IN3_MAP |
++ BYT_RT5640_JD_SRC_JD2_IN4N |
++ BYT_RT5640_OVCD_TH_1500UA |
++ BYT_RT5640_OVCD_SF_0P75 |
++ BYT_RT5640_MONO_SPEAKER |
++ BYT_RT5640_DIFF_MIC |
++ BYT_RT5640_MCLK_EN),
++ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ARCHOS"),
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 2e5fbd2209235..80cff74c23af0 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -438,7 +438,7 @@ static int skl_free(struct hdac_bus *bus)
+
+ skl->init_done = 0; /* to be sure */
+
+- snd_hdac_ext_stop_streams(bus);
++ snd_hdac_stop_streams_and_chip(bus);
+
+ if (bus->irq >= 0)
+ free_irq(bus->irq, (void *)bus);
+@@ -1116,7 +1116,10 @@ static void skl_shutdown(struct pci_dev *pci)
+ if (!skl->init_done)
+ return;
+
+- snd_hdac_ext_stop_streams(bus);
++ snd_hdac_stop_streams(bus);
++ snd_hdac_ext_bus_link_power_down_all(bus);
++ skl_dsp_sleep(skl->dsp);
++
+ list_for_each_entry(s, &bus->stream_list, list) {
+ stream = stream_to_hdac_ext_stream(s);
+ snd_hdac_ext_stream_decouple(bus, stream, false);
+diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
+index b66f7dee1e149..f6ec6937a71bb 100644
+--- a/sound/soc/mediatek/common/mtk-btcvsd.c
++++ b/sound/soc/mediatek/common/mtk-btcvsd.c
+@@ -1054,11 +1054,9 @@ static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
+ struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- mtk_btcvsd_snd_write(bt, buf, count);
++ return mtk_btcvsd_snd_write(bt, buf, count);
+ else
+- mtk_btcvsd_snd_read(bt, buf, count);
+-
+- return 0;
++ return mtk_btcvsd_snd_read(bt, buf, count);
+ }
+
+ static struct snd_pcm_ops mtk_btcvsd_ops = {
+diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+index 0ee29255e7319..f3dbd8164b864 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
++++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+@@ -1073,16 +1073,6 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+
+ afe->dev = &pdev->dev;
+
+- irq_id = platform_get_irq(pdev, 0);
+- if (irq_id <= 0)
+- return irq_id < 0 ? irq_id : -ENXIO;
+- ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
+- 0, "Afe_ISR_Handle", (void *)afe);
+- if (ret) {
+- dev_err(afe->dev, "could not request_irq\n");
+- return ret;
+- }
+-
+ afe->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+@@ -1158,6 +1148,16 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_pm_disable;
+
++ irq_id = platform_get_irq(pdev, 0);
++ if (irq_id <= 0)
++ return irq_id < 0 ? irq_id : -ENXIO;
++ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
++ 0, "Afe_ISR_Handle", (void *)afe);
++ if (ret) {
++ dev_err(afe->dev, "could not request_irq\n");
++ goto err_pm_disable;
++ }
++
+ dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
+ return 0;
+
+diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+index 6f8542329bab9..a21aefe1a4d1c 100644
+--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
++++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+@@ -200,14 +200,16 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[0].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+ mt8173_rt5650_rt5514_codec_conf[0].of_node =
+ mt8173_rt5650_rt5514_dais[DAI_LINK_CODEC_I2S].codecs[1].of_node;
+@@ -219,6 +221,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+
++out:
+ of_node_put(platform_node);
+ return ret;
+ }
+diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
+index 7096b5263e25e..e9f9642e988f2 100644
+--- a/sound/soc/pxa/mmp-pcm.c
++++ b/sound/soc/pxa/mmp-pcm.c
+@@ -85,7 +85,7 @@ static bool filter(struct dma_chan *chan, void *param)
+
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name,
+ dma_data->ssp_id);
+- if ((strcmp(dev_name(chan->device->dev), devname) == 0) &&
++ if (devname && (strcmp(dev_name(chan->device->dev), devname) == 0) &&
+ (chan->chan_id == dma_data->dma_res->start)) {
+ found = true;
+ }
+diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
+index 1707414cfa921..9f6cbaf3c0e97 100644
+--- a/sound/soc/rockchip/rockchip_pdm.c
++++ b/sound/soc/rockchip/rockchip_pdm.c
+@@ -368,6 +368,7 @@ static int rockchip_pdm_runtime_resume(struct device *dev)
+
+ ret = clk_prepare_enable(pdm->hclk);
+ if (ret) {
++ clk_disable_unprepare(pdm->clk);
+ dev_err(pdm->dev, "hclock enable failed %d\n", ret);
+ return ret;
+ }
+diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
+index 6635145a26c4d..b2b4e5b7739aa 100644
+--- a/sound/soc/rockchip/rockchip_spdif.c
++++ b/sound/soc/rockchip/rockchip_spdif.c
+@@ -86,6 +86,7 @@ static int __maybe_unused rk_spdif_runtime_resume(struct device *dev)
+
+ ret = clk_prepare_enable(spdif->hclk);
+ if (ret) {
++ clk_disable_unprepare(spdif->mclk);
+ dev_err(spdif->dev, "hclk clock enable failed %d\n", ret);
+ return ret;
+ }
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index 8ca56ba600cf4..22104cb84b1ca 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -303,7 +303,8 @@ static void line6_data_received(struct urb *urb)
+ for (;;) {
+ done =
+ line6_midibuf_read(mb, line6->buffer_message,
+- LINE6_MIDI_MESSAGE_MAXLEN);
++ LINE6_MIDI_MESSAGE_MAXLEN,
++ LINE6_MIDIBUF_READ_RX);
+
+ if (done <= 0)
+ break;
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index ba0e2b7e8fe19..0838632c788e4 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+ int req, done;
+
+ for (;;) {
+- req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
++ req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
++ LINE6_FALLBACK_MAXPACKETSIZE);
+ done = snd_rawmidi_transmit_peek(substream, chunk, req);
+
+ if (done == 0)
+@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
+
+ for (;;) {
+ done = line6_midibuf_read(mb, chunk,
+- LINE6_FALLBACK_MAXPACKETSIZE);
++ LINE6_FALLBACK_MAXPACKETSIZE,
++ LINE6_MIDIBUF_READ_TX);
+
+ if (done == 0)
+ break;
+diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
+index 6a70463f82c4e..e7f830f7526c9 100644
+--- a/sound/usb/line6/midibuf.c
++++ b/sound/usb/line6/midibuf.c
+@@ -9,6 +9,7 @@
+
+ #include "midibuf.h"
+
++
+ static int midibuf_message_length(unsigned char code)
+ {
+ int message_length;
+@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
+
+ message_length = length[(code >> 4) - 8];
+ } else {
+- /*
+- Note that according to the MIDI specification 0xf2 is
+- the "Song Position Pointer", but this is used by Line 6
+- to send sysex messages to the host.
+- */
+- static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
++ static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
+ 1, 1, 1, -1, 1, 1
+ };
+ message_length = length[code & 0x0f];
+@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
+ }
+
+ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+- int length)
++ int length, int read_type)
+ {
+ int bytes_used;
+ int length1, length2;
+@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
+
+ length1 = this->size - this->pos_read;
+
+- /* check MIDI command length */
+ command = this->buf[this->pos_read];
++ /*
++ PODxt always has status byte lower nibble set to 0010,
++ when it means to send 0000, so we correct if here so
++ that control/program changes come on channel 1 and
++ sysex message status byte is correct
++ */
++ if (read_type == LINE6_MIDIBUF_READ_RX) {
++ if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
++ unsigned char fixed = command & 0xf0;
++ this->buf[this->pos_read] = fixed;
++ command = fixed;
++ }
++ }
+
++ /* check MIDI command length */
+ if (command & 0x80) {
+ midi_length = midibuf_message_length(command);
+ this->command_prev = command;
+diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h
+index 124a8f9f7e96c..542e8d836f87d 100644
+--- a/sound/usb/line6/midibuf.h
++++ b/sound/usb/line6/midibuf.h
+@@ -8,6 +8,9 @@
+ #ifndef MIDIBUF_H
+ #define MIDIBUF_H
+
++#define LINE6_MIDIBUF_READ_TX 0
++#define LINE6_MIDIBUF_READ_RX 1
++
+ struct midi_buffer {
+ unsigned char *buf;
+ int size;
+@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
+ extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
+ extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
+ extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
+- int length);
++ int length, int read_type);
+ extern void line6_midibuf_reset(struct midi_buffer *mb);
+ extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
+ int length);
+diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
+index b667bb8ad938a..e765f98bb0b1d 100644
+--- a/sound/usb/line6/pod.c
++++ b/sound/usb/line6/pod.c
+@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
+ .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
+ };
+
++
+ static const char pod_version_header[] = {
+- 0xf2, 0x7e, 0x7f, 0x06, 0x02
++ 0xf0, 0x7e, 0x7f, 0x06, 0x02
+ };
+
+ static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
+diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
+index f9fd1325f5bda..7d685a15723ff 100644
+--- a/tools/arch/parisc/include/uapi/asm/mman.h
++++ b/tools/arch/parisc/include/uapi/asm/mman.h
+@@ -1,20 +1,20 @@
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+ #ifndef TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
+ #define TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
+-#define MADV_DODUMP 70
++#define MADV_DODUMP 17
+ #define MADV_DOFORK 11
+-#define MADV_DONTDUMP 69
++#define MADV_DONTDUMP 16
+ #define MADV_DONTFORK 10
+ #define MADV_DONTNEED 4
+ #define MADV_FREE 8
+-#define MADV_HUGEPAGE 67
+-#define MADV_MERGEABLE 65
+-#define MADV_NOHUGEPAGE 68
++#define MADV_HUGEPAGE 14
++#define MADV_MERGEABLE 12
++#define MADV_NOHUGEPAGE 15
+ #define MADV_NORMAL 0
+ #define MADV_RANDOM 1
+ #define MADV_REMOVE 9
+ #define MADV_SEQUENTIAL 2
+-#define MADV_UNMERGEABLE 66
++#define MADV_UNMERGEABLE 13
+ #define MADV_WILLNEED 3
+ #define MAP_ANONYMOUS 0x10
+ #define MAP_DENYWRITE 0x0800
+diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
+index 3eb8411ab60ef..e95b72ec19bc0 100644
+--- a/tools/arch/x86/include/uapi/asm/vmx.h
++++ b/tools/arch/x86/include/uapi/asm/vmx.h
+@@ -33,7 +33,7 @@
+ #define EXIT_REASON_TRIPLE_FAULT 2
+ #define EXIT_REASON_INIT_SIGNAL 3
+
+-#define EXIT_REASON_PENDING_INTERRUPT 7
++#define EXIT_REASON_INTERRUPT_WINDOW 7
+ #define EXIT_REASON_NMI_WINDOW 8
+ #define EXIT_REASON_TASK_SWITCH 9
+ #define EXIT_REASON_CPUID 10
+@@ -94,7 +94,7 @@
+ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
+ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
+ { EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
+- { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
++ { EXIT_REASON_INTERRUPT_WINDOW, "INTERRUPT_WINDOW" }, \
+ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
+ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
+ { EXIT_REASON_CPUID, "CPUID" }, \
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index bae6b261481db..ccf5580442d29 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -162,7 +162,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
+ return false;
+
+ insn = find_insn(file, func->sec, func->offset);
+- if (!insn->func)
++ if (!insn || !insn->func)
+ return false;
+
+ func_for_each_insn_all(file, func, insn) {
+diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
+index 4aa6de1aa67dc..d9329ae84e178 100644
+--- a/tools/perf/bench/bench.h
++++ b/tools/perf/bench/bench.h
+@@ -10,25 +10,13 @@ extern struct timeval bench__start, bench__end, bench__runtime;
+ * The madvise transparent hugepage constants were added in glibc
+ * 2.13. For compatibility with older versions of glibc, define these
+ * tokens if they are not already defined.
+- *
+- * PA-RISC uses different madvise values from other architectures and
+- * needs to be special-cased.
+ */
+-#ifdef __hppa__
+-# ifndef MADV_HUGEPAGE
+-# define MADV_HUGEPAGE 67
+-# endif
+-# ifndef MADV_NOHUGEPAGE
+-# define MADV_NOHUGEPAGE 68
+-# endif
+-#else
+ # ifndef MADV_HUGEPAGE
+ # define MADV_HUGEPAGE 14
+ # endif
+ # ifndef MADV_NOHUGEPAGE
+ # define MADV_NOHUGEPAGE 15
+ # endif
+-#endif
+
+ int bench_numa(int argc, const char **argv);
+ int bench_sched_messaging(int argc, const char **argv);
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index a5201de1a191d..6052eb057821d 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -86,6 +86,34 @@
+ # define F_LINUX_SPECIFIC_BASE 1024
+ #endif
+
++#define RAW_SYSCALL_ARGS_NUM 6
++
++/*
++ * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
++ */
++struct syscall_arg_fmt {
++ size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
++ bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
++ unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
++ void *parm;
++ const char *name;
++ bool show_zero;
++};
++
++struct syscall_fmt {
++ const char *name;
++ const char *alias;
++ struct {
++ const char *sys_enter,
++ *sys_exit;
++ } bpf_prog_name;
++ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
++ u8 nr_args;
++ bool errpid;
++ bool timeout;
++ bool hexret;
++};
++
+ struct trace {
+ struct perf_tool tool;
+ struct syscalltbl *sctbl;
+@@ -694,27 +722,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
+ #include "trace/beauty/socket_type.c"
+ #include "trace/beauty/waitid_options.c"
+
+-struct syscall_arg_fmt {
+- size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
+- unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
+- void *parm;
+- const char *name;
+- bool show_zero;
+-};
+-
+-static struct syscall_fmt {
+- const char *name;
+- const char *alias;
+- struct {
+- const char *sys_enter,
+- *sys_exit;
+- } bpf_prog_name;
+- struct syscall_arg_fmt arg[6];
+- u8 nr_args;
+- bool errpid;
+- bool timeout;
+- bool hexret;
+-} syscall_fmts[] = {
++static struct syscall_fmt syscall_fmts[] = {
+ { .name = "access",
+ .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
+ { .name = "arch_prctl",
+@@ -1012,7 +1020,7 @@ struct syscall {
+ */
+ struct bpf_map_syscall_entry {
+ bool enabled;
+- u16 string_args_len[6];
++ u16 string_args_len[RAW_SYSCALL_ARGS_NUM];
+ };
+
+ /*
+@@ -1437,7 +1445,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
+ {
+ int idx;
+
+- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
++ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
+ nr_args = sc->fmt->nr_args;
+
+ sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
+@@ -1453,15 +1461,37 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
+ return 0;
+ }
+
+-static int syscall__set_arg_fmts(struct syscall *sc)
++static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
++};
++
++static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
++{
++ const struct syscall_arg_fmt *fmt = fmtp;
++ return strcmp(name, fmt->name);
++}
++
++static struct syscall_arg_fmt *
++__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
+ {
+- struct tep_format_field *field, *last_field = NULL;
+- int idx = 0, len;
++ return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
++}
++
++static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
++{
++ const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
++ return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
++}
+
+- for (field = sc->args; field; field = field->next, ++idx) {
++static struct tep_format_field *
++syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
++{
++ struct tep_format_field *last_field = NULL;
++ int len;
++
++ for (; field; field = field->next, ++arg) {
+ last_field = field;
+
+- if (sc->fmt && sc->fmt->arg[idx].scnprintf)
++ if (arg->scnprintf)
+ continue;
+
+ len = strlen(field->name);
+@@ -1469,13 +1499,13 @@ static int syscall__set_arg_fmts(struct syscall *sc)
+ if (strcmp(field->type, "const char *") == 0 &&
+ ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
+ strstr(field->name, "path") != NULL))
+- sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
++ arg->scnprintf = SCA_FILENAME;
+ else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
+- sc->arg_fmt[idx].scnprintf = SCA_PTR;
++ arg->scnprintf = SCA_PTR;
+ else if (strcmp(field->type, "pid_t") == 0)
+- sc->arg_fmt[idx].scnprintf = SCA_PID;
++ arg->scnprintf = SCA_PID;
+ else if (strcmp(field->type, "umode_t") == 0)
+- sc->arg_fmt[idx].scnprintf = SCA_MODE_T;
++ arg->scnprintf = SCA_MODE_T;
+ else if ((strcmp(field->type, "int") == 0 ||
+ strcmp(field->type, "unsigned int") == 0 ||
+ strcmp(field->type, "long") == 0) &&
+@@ -1487,10 +1517,24 @@ static int syscall__set_arg_fmts(struct syscall *sc)
+ * 23 unsigned int
+ * 7 unsigned long
+ */
+- sc->arg_fmt[idx].scnprintf = SCA_FD;
++ arg->scnprintf = SCA_FD;
++ } else {
++ struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
++
++ if (fmt) {
++ arg->scnprintf = fmt->scnprintf;
++ arg->strtoul = fmt->strtoul;
++ }
+ }
+ }
+
++ return last_field;
++}
++
++static int syscall__set_arg_fmts(struct syscall *sc)
++{
++ struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
++
+ if (last_field)
+ sc->args_size = last_field->offset + last_field->size;
+
+@@ -1511,11 +1555,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+
+ sc = trace->syscalls.table + id;
+ if (sc->nonexistent)
+- return 0;
++ return -EEXIST;
+
+ if (name == NULL) {
+ sc->nonexistent = true;
+- return 0;
++ return -EEXIST;
+ }
+
+ sc->name = name;
+@@ -1529,11 +1573,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
+ sc->tp_format = trace_event__tp_format("syscalls", tp_name);
+ }
+
+- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
+- return -ENOMEM;
+-
+- if (IS_ERR(sc->tp_format))
++ /*
++ * Fails to read trace point format via sysfs node, so the trace point
++ * doesn't exist. Set the 'nonexistent' flag as true.
++ */
++ if (IS_ERR(sc->tp_format)) {
++ sc->nonexistent = true;
+ return PTR_ERR(sc->tp_format);
++ }
++
++ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
++ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
++ return -ENOMEM;
+
+ sc->args = sc->tp_format->format.fields;
+ /*
+@@ -1736,6 +1787,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
+ if (arg.mask & bit)
+ continue;
+
++ arg.fmt = &sc->arg_fmt[arg.idx];
+ val = syscall_arg__val(&arg, arg.idx);
+ /*
+ * Some syscall args need some mask, most don't and
+@@ -1825,11 +1877,8 @@ static struct syscall *trace__syscall_info(struct trace *trace,
+ (err = trace__read_syscall_info(trace, id)) != 0)
+ goto out_cant_read;
+
+- if (trace->syscalls.table[id].name == NULL) {
+- if (trace->syscalls.table[id].nonexistent)
+- return NULL;
++ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
+ goto out_cant_read;
+- }
+
+ return &trace->syscalls.table[id];
+
+diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
+index 7e06605f7c763..4cc4f6b3d4a13 100644
+--- a/tools/perf/trace/beauty/beauty.h
++++ b/tools/perf/trace/beauty/beauty.h
+@@ -78,6 +78,8 @@ struct augmented_arg {
+ u64 value[];
+ };
+
++struct syscall_arg_fmt;
++
+ /**
+ * @val: value of syscall argument being formatted
+ * @args: All the args, use syscall_args__val(arg, nth) to access one
+@@ -94,6 +96,7 @@ struct augmented_arg {
+ struct syscall_arg {
+ unsigned long val;
+ unsigned char *args;
++ struct syscall_arg_fmt *fmt;
+ struct {
+ struct augmented_arg *args;
+ int size;
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index c1809695f8721..df4c575a0d94e 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1995,7 +1995,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
+ *size = sym->start - *start;
+ if (idx > 0) {
+ if (*size)
+- return 1;
++ return 0;
+ } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
+ print_duplicate_syms(dso, sym_name);
+ return -EINVAL;
+diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
+index 3c874f52f1a25..4da900bdb2f18 100644
+--- a/tools/perf/util/data.c
++++ b/tools/perf/util/data.c
+@@ -120,6 +120,7 @@ int perf_data__open_dir(struct perf_data *data)
+ file->size = st.st_size;
+ }
+
++ closedir(dir);
+ if (!files)
+ return -EINVAL;
+
+@@ -128,6 +129,7 @@ int perf_data__open_dir(struct perf_data *data)
+ return 0;
+
+ out_err:
++ closedir(dir);
+ close_dir(files, nr);
+ return ret;
+ }
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index ab34ef2c661f8..f1e2f566ce6fc 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -254,26 +254,13 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
+ {
+ Dwarf_Attribute attr;
+
+- if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
++ if (dwarf_attr_integrate(tp_die, attr_name, &attr) == NULL ||
+ dwarf_formudata(&attr, result) != 0)
+ return -ENOENT;
+
+ return 0;
+ }
+
+-/* Get attribute and translate it as a sdata */
+-static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name,
+- Dwarf_Sword *result)
+-{
+- Dwarf_Attribute attr;
+-
+- if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
+- dwarf_formsdata(&attr, result) != 0)
+- return -ENOENT;
+-
+- return 0;
+-}
+-
+ /**
+ * die_is_signed_type - Check whether a type DIE is signed or not
+ * @tp_die: a DIE of a type
+@@ -397,9 +384,9 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+ /* Get the call file index number in CU DIE */
+ static int die_get_call_fileno(Dwarf_Die *in_die)
+ {
+- Dwarf_Sword idx;
++ Dwarf_Word idx;
+
+- if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0)
++ if (die_get_attr_udata(in_die, DW_AT_call_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+@@ -408,9 +395,9 @@ static int die_get_call_fileno(Dwarf_Die *in_die)
+ /* Get the declared file index number in CU DIE */
+ static int die_get_decl_fileno(Dwarf_Die *pdie)
+ {
+- Dwarf_Sword idx;
++ Dwarf_Word idx;
+
+- if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0)
++ if (die_get_attr_udata(pdie, DW_AT_decl_file, &idx) == 0)
+ return (int)idx;
+ else
+ return -ENOENT;
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index f15258fbe9dbf..4fef8d6bc2255 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1157,7 +1157,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ (!used_opd && syms_ss->adjust_symbols)) {
+ GElf_Phdr phdr;
+
+- if (elf_read_program_header(syms_ss->elf,
++ if (elf_read_program_header(runtime_ss->elf,
+ (u64)sym.st_value, &phdr)) {
+ pr_debug4("%s: failed to find program header for "
+ "symbol: %s st_value: %#" PRIx64 "\n",
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index fe587a9594634..49dd029ec80bd 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -1881,7 +1881,7 @@ sub run_scp_mod {
+
+ sub _get_grub_index {
+
+- my ($command, $target, $skip) = @_;
++ my ($command, $target, $skip, $submenu) = @_;
+
+ return if (defined($grub_number) && defined($last_grub_menu) &&
+ $last_grub_menu eq $grub_menu && defined($last_machine) &&
+@@ -1898,11 +1898,16 @@ sub _get_grub_index {
+
+ my $found = 0;
+
++ my $submenu_number = 0;
++
+ while (<IN>) {
+ if (/$target/) {
+ $grub_number++;
+ $found = 1;
+ last;
++ } elsif (defined($submenu) && /$submenu/) {
++ $submenu_number++;
++ $grub_number = -1;
+ } elsif (/$skip/) {
+ $grub_number++;
+ }
+@@ -1911,6 +1916,9 @@ sub _get_grub_index {
+
+ dodie "Could not find '$grub_menu' through $command on $machine"
+ if (!$found);
++ if ($submenu_number > 0) {
++ $grub_number = "$submenu_number>$grub_number";
++ }
+ doprint "$grub_number\n";
+ $last_grub_menu = $grub_menu;
+ $last_machine = $machine;
+@@ -1921,6 +1929,7 @@ sub get_grub_index {
+ my $command;
+ my $target;
+ my $skip;
++ my $submenu;
+ my $grub_menu_qt;
+
+ if ($reboot_type !~ /^grub/) {
+@@ -1935,8 +1944,9 @@ sub get_grub_index {
+ $skip = '^\s*title\s';
+ } elsif ($reboot_type eq "grub2") {
+ $command = "cat $grub_file";
+- $target = '^menuentry.*' . $grub_menu_qt;
+- $skip = '^menuentry\s|^submenu\s';
++ $target = '^\s*menuentry.*' . $grub_menu_qt;
++ $skip = '^\s*menuentry';
++ $submenu = '^\s*submenu\s';
+ } elsif ($reboot_type eq "grub2bls") {
+ $command = $grub_bls_get;
+ $target = '^title=.*' . $grub_menu_qt;
+@@ -1945,7 +1955,7 @@ sub get_grub_index {
+ return;
+ }
+
+- _get_grub_index($command, $target, $skip);
++ _get_grub_index($command, $target, $skip, $submenu);
+ }
+
+ sub wait_for_input
+@@ -2009,7 +2019,7 @@ sub reboot_to {
+ if ($reboot_type eq "grub") {
+ run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
+ } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) {
+- run_ssh "$grub_reboot $grub_number";
++ run_ssh "$grub_reboot \"'$grub_number'\"";
+ } elsif ($reboot_type eq "syslinux") {
+ run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path";
+ } elsif (defined $reboot_script) {
+@@ -3726,9 +3736,10 @@ sub test_this_config {
+ # .config to make sure it is missing the config that
+ # we had before
+ my %configs = %min_configs;
+- delete $configs{$config};
++ $configs{$config} = "# $config is not set";
+ make_new_config ((values %configs), (values %keep_configs));
+ make_oldconfig;
++ delete $configs{$config};
+ undef %configs;
+ assign_configs \%configs, $output_config;
+
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 612f6757015d5..40ee6f57af78a 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -78,26 +78,34 @@ override LDFLAGS =
+ override MAKEFLAGS =
+ endif
+
+-# Append kselftest to KBUILD_OUTPUT to avoid cluttering
++# Append kselftest to KBUILD_OUTPUT and O to avoid cluttering
+ # KBUILD_OUTPUT with selftest objects and headers installed
+ # by selftests Makefile or lib.mk.
+ ifdef building_out_of_srctree
+ override LDFLAGS =
+ endif
+
+-ifneq ($(O),)
+- BUILD := $(O)
++top_srcdir ?= ../../..
++
++ifeq ("$(origin O)", "command line")
++ KBUILD_OUTPUT := $(O)
++endif
++
++ifneq ($(KBUILD_OUTPUT),)
++ # Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
++ # expand a shell special character '~'. We use a somewhat tedious way here.
++ abs_objtree := $(shell cd $(top_srcdir) && mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
++ $(if $(abs_objtree),, \
++ $(error failed to create output directory "$(KBUILD_OUTPUT)"))
++ # $(realpath ...) resolves symlinks
++ abs_objtree := $(realpath $(abs_objtree))
++ BUILD := $(abs_objtree)/kselftest
+ else
+- ifneq ($(KBUILD_OUTPUT),)
+- BUILD := $(KBUILD_OUTPUT)/kselftest
+- else
+- BUILD := $(shell pwd)
+- DEFAULT_INSTALL_HDR_PATH := 1
+- endif
++ BUILD := $(CURDIR)
++ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
+
+ # Prepare for headers install
+-top_srcdir ?= ../../..
+ include $(top_srcdir)/scripts/subarch.include
+ ARCH ?= $(SUBARCH)
+ export KSFT_KHDR_INSTALL_DONE := 1
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index a90f394f9aa90..d374878cc0ba9 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -87,6 +87,11 @@ test_create_read()
+ {
+ local file=$efivarfs_mount/$FUNCNAME-$test_guid
+ ./create-read $file
++ if [ $? -ne 0 ]; then
++ echo "create and read $file failed"
++ file_cleanup $file
++ exit 1
++ fi
+ file_cleanup $file
+ }
+
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+index ca2ffd7957f9e..f261eeccfaf6d 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+@@ -42,11 +42,18 @@ cnt_trace() {
+
+ test_event_enabled() {
+ val=$1
++ check_times=10 # wait for 10 * SLEEP_TIME at most
+
+- e=`cat $EVENT_ENABLE`
+- if [ "$e" != $val ]; then
+- fail "Expected $val but found $e"
+- fi
++ while [ $check_times -ne 0 ]; do
++ e=`cat $EVENT_ENABLE`
++ if [ "$e" == $val ]; then
++ return 0
++ fi
++ sleep $SLEEP_TIME
++ check_times=$((check_times - 1))
++ done
++
++ fail "Expected $val but found $e"
+ }
+
+ run_enable_disable() {
+diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
+index f52e0ba84fedb..3d27069b9ed9c 100644
+--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
++++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
+@@ -18,8 +18,8 @@
+ /*
+ * Definitions of Primary Processor-Based VM-Execution Controls.
+ */
+-#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+-#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
++#define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
++#define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
+ #define CPU_BASED_HLT_EXITING 0x00000080
+ #define CPU_BASED_INVLPG_EXITING 0x00000200
+ #define CPU_BASED_MWAIT_EXITING 0x00000400
+@@ -30,7 +30,7 @@
+ #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
+ #define CPU_BASED_CR8_STORE_EXITING 0x00100000
+ #define CPU_BASED_TPR_SHADOW 0x00200000
+-#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
++#define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
+ #define CPU_BASED_MOV_DR_EXITING 0x00800000
+ #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
+ #define CPU_BASED_USE_IO_BITMAPS 0x02000000
+@@ -103,7 +103,7 @@
+ #define EXIT_REASON_EXCEPTION_NMI 0
+ #define EXIT_REASON_EXTERNAL_INTERRUPT 1
+ #define EXIT_REASON_TRIPLE_FAULT 2
+-#define EXIT_REASON_PENDING_INTERRUPT 7
++#define EXIT_REASON_INTERRUPT_WINDOW 7
+ #define EXIT_REASON_NMI_WINDOW 8
+ #define EXIT_REASON_TASK_SWITCH 9
+ #define EXIT_REASON_CPUID 10
+diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+index 5590fd2bcf87d..69e482a95c47f 100644
+--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
++++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+@@ -98,7 +98,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
+- control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
++ control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
+ vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
+
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index 8794ce382bf5a..59479442faa67 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -130,6 +130,11 @@ endef
+ clean:
+ $(CLEAN)
+
++# Enables to extend CFLAGS and LDFLAGS from command line, e.g.
++# make USERCFLAGS=-Werror USERLDFLAGS=-static
++CFLAGS += $(USERCFLAGS)
++LDFLAGS += $(USERLDFLAGS)
++
+ # When make O= with kselftest target from main level
+ # the following aren't defined.
+ #
+diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+index b48e1833bc896..76645aaf2b58f 100755
+--- a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
++++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
+@@ -35,6 +35,8 @@ cleanup() {
+ for i in 1 2;do ip netns del nsrouter$i;done
+ }
+
++trap cleanup EXIT
++
+ ipv4() {
+ echo -n 192.168.$1.2
+ }
+@@ -146,11 +148,17 @@ ip netns exec nsclient1 nft -f - <<EOF
+ table inet filter {
+ counter unknown { }
+ counter related { }
++ counter redir4 { }
++ counter redir6 { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+- meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
++ icmp type "redirect" ct state "related" counter name "redir4" accept
++ icmpv6 type "nd-redirect" ct state "related" counter name "redir6" accept
++
++ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
++
+ counter name "unknown" drop
+ }
+ }
+@@ -279,5 +287,29 @@ else
+ echo "ERROR: icmp error RELATED state test has failed"
+ fi
+
+-cleanup
++# add 'bad' route, expect icmp REDIRECT to be generated
++ip netns exec nsclient1 ip route add 192.168.1.42 via 192.168.1.1
++ip netns exec nsclient1 ip route add dead:1::42 via dead:1::1
++
++ip netns exec "nsclient1" ping -q -c 2 192.168.1.42 > /dev/null
++
++expect="packets 1 bytes 112"
++check_counter nsclient1 "redir4" "$expect"
++if [ $? -ne 0 ];then
++ ret=1
++fi
++
++ip netns exec "nsclient1" ping -c 1 dead:1::42 > /dev/null
++expect="packets 1 bytes 192"
++check_counter nsclient1 "redir6" "$expect"
++if [ $? -ne 0 ];then
++ ret=1
++fi
++
++if [ $ret -eq 0 ];then
++ echo "PASS: icmp redirects had RELATED state"
++else
++ echo "ERROR: icmp redirect RELATED state test has failed"
++fi
++
+ exit $ret
+diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+index 02f6b4efde145..e54d7a4089eac 100644
+--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
++++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+@@ -24,6 +24,7 @@ static int check_cpu_dscr_default(char *file, unsigned long val)
+ rc = read(fd, buf, sizeof(buf));
+ if (rc == -1) {
+ perror("read() failed");
++ close(fd);
+ return 1;
+ }
+ close(fd);
+@@ -65,8 +66,10 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
+ if (access(file, F_OK))
+ continue;
+
+- if (check_cpu_dscr_default(file, val))
++ if (check_cpu_dscr_default(file, val)) {
++ closedir(sysfs);
+ return 1;
++ }
+ }
+ closedir(sysfs);
+ return 0;
+diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
+index e7ceabed7f51f..7d0aa22bdc12b 100644
+--- a/tools/testing/selftests/proc/proc-uptime-002.c
++++ b/tools/testing/selftests/proc/proc-uptime-002.c
+@@ -17,6 +17,7 @@
+ // while shifting across CPUs.
+ #undef NDEBUG
+ #include <assert.h>
++#include <errno.h>
+ #include <unistd.h>
+ #include <sys/syscall.h>
+ #include <stdlib.h>
+@@ -54,7 +55,7 @@ int main(void)
+ len += sizeof(unsigned long);
+ free(m);
+ m = malloc(len);
+- } while (sys_sched_getaffinity(0, len, m) == -EINVAL);
++ } while (sys_sched_getaffinity(0, len, m) == -1 && errno == EINVAL);
+
+ fd = open("/proc/uptime", O_RDONLY);
+ assert(fd >= 0);