summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-10-07 13:45:41 -0400
committerMike Pagano <mpagano@gentoo.org>2019-10-07 13:45:41 -0400
commit0b3da0b3904e0dabb4d3ba04448ba3169b8599d8 (patch)
treecf2f0e6c9333c691c75f0be0bb7ea26a4adcc945
parentLinux patch 5.2.19 (diff)
downloadlinux-patches-5.2.tar.gz
linux-patches-5.2.tar.bz2
linux-patches-5.2.zip
Linux patch 5.2.205.2-205.2
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1019_linux-5.2.20.patch5241
2 files changed, 5245 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 71b680eb..428535dd 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch: 1018_linux-5.2.19.patch
From: https://www.kernel.org
Desc: Linux 5.2.19
+Patch: 1019_linux-5.2.20.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.20
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1019_linux-5.2.20.patch b/1019_linux-5.2.20.patch
new file mode 100644
index 00000000..62e23973
--- /dev/null
+++ b/1019_linux-5.2.20.patch
@@ -0,0 +1,5241 @@
+diff --git a/Makefile b/Makefile
+index 5c981a5c882f..4bad77aa54c5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 8869742a85df..6029d324911c 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -75,7 +75,7 @@ config ARM
+ select HAVE_EXIT_THREAD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
+- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
++ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
+ select HAVE_GCC_PLUGINS
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_IDE if PCI || ISA || PCMCIA
+@@ -1545,8 +1545,9 @@ config ARM_PATCH_IDIV
+ code to do integer division.
+
+ config AEABI
+- bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
+- default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
++ bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
++ !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
++ default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
+ help
+ This option allows for the kernel to be compiled using the latest
+ ARM ABI (aka EABI). This is only useful if you are using a user
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index f863c6935d0e..c0b278358301 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -112,6 +112,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
+ CFLAGS_ABI +=-funwind-tables
+ endif
+
++ifeq ($(CONFIG_CC_IS_CLANG),y)
++CFLAGS_ABI += -meabi gnu
++endif
++
+ # Accept old syntax despite ".syntax unified"
+ AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
+
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 0048eadd0681..e76155d5840b 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -211,7 +211,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+ {
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+- if (fsr & FSR_WRITE)
++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+@@ -282,7 +282,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+- if (fsr & FSR_WRITE)
++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+ flags |= FAULT_FLAG_WRITE;
+
+ /*
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index c063708fa503..9ecc2097a87a 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -6,6 +6,7 @@
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+ #define FSR_LNX_PF (1 << 31)
++#define FSR_CM (1 << 13)
+ #define FSR_WRITE (1 << 11)
+ #define FSR_FS4 (1 << 10)
+ #define FSR_FS3_0 (15)
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index f866870db749..0b94b674aa91 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -18,8 +18,9 @@
+ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+ /* gap between mmap and stack */
+-#define MIN_GAP (128*1024*1024UL)
+-#define MAX_GAP ((TASK_SIZE)/6*5)
++#define MIN_GAP (128*1024*1024UL)
++#define MAX_GAP ((STACK_TOP)/6*5)
++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
+
+ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ {
+@@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
++
++ /* Values close to RLIM_INFINITY can overflow. */
++ if (gap + pad > gap)
++ gap += pad;
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
++ return PAGE_ALIGN(STACK_TOP - gap - rnd);
+ }
+
+ /*
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 1aa2586fa597..13233c7917fe 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1177,6 +1177,22 @@ void __init adjust_lowmem_bounds(void)
+ */
+ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
++ /*
++ * The first usable region must be PMD aligned. Mark its start
++ * as MEMBLOCK_NOMAP if it isn't
++ */
++ for_each_memblock(memory, reg) {
++ if (!memblock_is_nomap(reg)) {
++ if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
++ phys_addr_t len;
++
++ len = round_up(reg->base, PMD_SIZE) - reg->base;
++ memblock_mark_nomap(reg->base, len);
++ }
++ break;
++ }
++ }
++
+ for_each_memblock(memory, reg) {
+ phys_addr_t block_start = reg->base;
+ phys_addr_t block_end = reg->base + reg->size;
+diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
+index 7a299a20f6dc..7a8b8bc69e8d 100644
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -63,7 +63,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
+ #undef __XCHG_CASE
+
+ #define __XCHG_GEN(sfx) \
+-static inline unsigned long __xchg##sfx(unsigned long x, \
++static __always_inline unsigned long __xchg##sfx(unsigned long x, \
+ volatile void *ptr, \
+ int size) \
+ { \
+@@ -105,7 +105,7 @@ __XCHG_GEN(_mb)
+ #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+
+ #define __CMPXCHG_GEN(sfx) \
+-static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
++static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new, \
+ int size) \
+@@ -212,7 +212,7 @@ __CMPWAIT_CASE( , , 64);
+ #undef __CMPWAIT_CASE
+
+ #define __CMPWAIT_GEN(sfx) \
+-static inline void __cmpwait##sfx(volatile void *ptr, \
++static __always_inline void __cmpwait##sfx(volatile void *ptr, \
+ unsigned long val, \
+ int size) \
+ { \
+diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
+index b050641b5139..8dac7110f0cb 100644
+--- a/arch/arm64/mm/mmap.c
++++ b/arch/arm64/mm/mmap.c
+@@ -54,7 +54,11 @@ unsigned long arch_mmap_rnd(void)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
+- unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 94096299fc56..50cc2f0962e5 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -68,7 +68,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
+ "\t" __scbeqz " %0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -98,7 +98,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -132,7 +132,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -193,6 +193,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ if (kernel_uses_llsc) {
+ int temp;
+
++ loongson_llsc_mb();
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+@@ -200,16 +201,16 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ " .set pop \n"
+ " subu %0, %1, %3 \n"
+ " move %1, %0 \n"
+- " bltz %0, 1f \n"
++ " bltz %0, 2f \n"
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+ " sc %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
+- "1: \n"
++ "2: \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+- : "Ir" (i));
++ : "Ir" (i) : __LLSC_CLOBBER);
+ } else {
+ unsigned long flags;
+
+@@ -269,7 +270,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+ "\t" __scbeqz " %0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -299,7 +300,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -333,7 +334,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
+index b865e317a14f..9228f7386220 100644
+--- a/arch/mips/include/asm/barrier.h
++++ b/arch/mips/include/asm/barrier.h
+@@ -211,14 +211,22 @@
+ #define __smp_wmb() barrier()
+ #endif
+
++/*
++ * When LL/SC does imply order, it must also be a compiler barrier to avoid the
++ * compiler from reordering where the CPU will not. When it does not imply
++ * order, the compiler is also free to reorder across the LL/SC loop and
++ * ordering will be done by smp_llsc_mb() and friends.
++ */
+ #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+ #define __WEAK_LLSC_MB " sync \n"
++#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
++#define __LLSC_CLOBBER
+ #else
+ #define __WEAK_LLSC_MB " \n"
++#define smp_llsc_mb() do { } while (0)
++#define __LLSC_CLOBBER "memory"
+ #endif
+
+-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+-
+ #ifdef CONFIG_CPU_CAVIUM_OCTEON
+ #define smp_mb__before_llsc() smp_wmb()
+ #define __smp_mb__before_llsc() __smp_wmb()
+@@ -238,36 +246,40 @@
+
+ /*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+- * store or pref) in between an ll & sc can cause the sc instruction to
++ * store or prefetch) in between an LL & SC can cause the SC instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+- * 1) A memory access appearing prior to the ll in program order may actually
+- * be executed after the ll - this is the reordering case.
++ * 1) A memory access appearing prior to the LL in program order may actually
++ * be executed after the LL - this is the reordering case.
+ *
+- * In order to avoid this we need to place a memory barrier (ie. a sync
+- * instruction) prior to every ll instruction, in between it & any earlier
+- * memory access instructions. Many of these cases are already covered by
+- * smp_mb__before_llsc() but for the remaining cases, typically ones in
+- * which multiple CPUs may operate on a memory location but ordering is not
+- * usually guaranteed, we use loongson_llsc_mb() below.
++ * In order to avoid this we need to place a memory barrier (ie. a SYNC
++ * instruction) prior to every LL instruction, in between it and any earlier
++ * memory access instructions.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+- * 2) If a conditional branch exists between an ll & sc with a target outside
+- * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
++ * 2) If a conditional branch exists between an LL & SC with a target outside
++ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+- * execution of memory accesses from outside of the ll-sc loop.
++ * execution of memory accesses from outside of the LL-SC loop.
+ *
+- * In order to avoid this we need a memory barrier (ie. a sync instruction)
++ * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
+ * at each affected branch target, for which we also use loongson_llsc_mb()
+ * defined below.
+ *
+ * This case affects all current Loongson 3 CPUs.
++ *
++ * The above described cases cause an error in the cache coherence protocol;
++ * such that the Invalidate of a competing LL-SC goes 'missing' and SC
++ * erroneously observes its core still has Exclusive state and lets the SC
++ * proceed.
++ *
++ * Therefore the error only occurs on SMP systems.
+ */
+ #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
+-#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
++#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory")
+ #else
+ #define loongson_llsc_mb() do { } while (0)
+ #endif
+diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
+index 9a466dde9b96..985d6a02f9ea 100644
+--- a/arch/mips/include/asm/bitops.h
++++ b/arch/mips/include/asm/bitops.h
+@@ -66,7 +66,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
++ : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
+@@ -76,7 +77,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __INS "%0, %3, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (bit), "r" (~0));
++ : "ir" (bit), "r" (~0)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+ } else if (kernel_uses_llsc) {
+@@ -90,7 +92,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_set_bit(nr, addr);
+@@ -122,7 +125,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (~(1UL << bit)));
++ : "ir" (~(1UL << bit))
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
+@@ -132,7 +136,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __INS "%0, $0, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (bit));
++ : "ir" (bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+ } else if (kernel_uses_llsc) {
+@@ -146,7 +151,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (~(1UL << bit)));
++ : "ir" (~(1UL << bit))
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_clear_bit(nr, addr);
+@@ -192,7 +198,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+@@ -207,7 +214,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_change_bit(nr, addr);
+@@ -244,11 +252,12 @@ static inline int test_and_set_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -259,7 +268,7 @@ static inline int test_and_set_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -300,11 +309,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -315,7 +325,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -358,12 +368,13 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+@@ -372,13 +383,14 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "ir" (bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -390,7 +402,7 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -433,11 +445,12 @@ static inline int test_and_change_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -448,7 +461,7 @@ static inline int test_and_change_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
+index f345a873742d..c8a47d18f628 100644
+--- a/arch/mips/include/asm/cmpxchg.h
++++ b/arch/mips/include/asm/cmpxchg.h
+@@ -46,6 +46,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
++ loongson_llsc_mb(); \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+@@ -60,7 +61,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
+ " .set pop \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
+- : "memory"); \
++ : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long __flags; \
+ \
+@@ -117,6 +118,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
++ loongson_llsc_mb(); \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+@@ -132,8 +134,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ " .set pop \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+- : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
+- : "memory"); \
++ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
++ : __LLSC_CLOBBER); \
++ loongson_llsc_mb(); \
+ } else { \
+ unsigned long __flags; \
+ \
+@@ -229,6 +232,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
+ */
+ local_irq_save(flags);
+
++ loongson_llsc_mb();
+ asm volatile(
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+@@ -274,6 +278,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
+ "r" (old),
+ "r" (new)
+ : "memory");
++ loongson_llsc_mb();
+
+ local_irq_restore(flags);
+ return ret;
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 1e6966e8527e..bdbdc19a2b8f 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -689,6 +689,9 @@
+ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
+ #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+
++/* Ingenic Config7 bits */
++#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
++
+ /* Config7 Bits specific to MIPS Technologies. */
+
+ /* Performance counters implemented Per TC */
+@@ -2813,6 +2816,7 @@ __BUILD_SET_C0(status)
+ __BUILD_SET_C0(cause)
+ __BUILD_SET_C0(config)
+ __BUILD_SET_C0(config5)
++__BUILD_SET_C0(config7)
+ __BUILD_SET_C0(intcontrol)
+ __BUILD_SET_C0(intctl)
+ __BUILD_SET_C0(srsmap)
+diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
+index 180ad081afcf..c2d88c1dcc0f 100644
+--- a/arch/mips/kernel/branch.c
++++ b/arch/mips/kernel/branch.c
+@@ -58,6 +58,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+ {
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
++ int __maybe_unused bc_false = 0;
+
+ if (!cpu_has_mmips)
+ return 0;
+@@ -139,7 +140,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_bc2f_op:
+ case mm_bc1f_op: {
+- int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 9635c1db3ae6..e654ffc1c8a0 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1964,6 +1964,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+ c->cputype = CPU_JZRISC;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ __cpu_name[cpu] = "Ingenic JZRISC";
++ /*
++ * The XBurst core by default attempts to avoid branch target
++ * buffer lookups by detecting & special casing loops. This
++ * feature will cause BogoMIPS and lpj calculate in error.
++ * Set cp0 config7 bit 4 to disable this feature.
++ */
++ set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
+index b6dc78ad5d8c..b0e25e913bdb 100644
+--- a/arch/mips/kernel/syscall.c
++++ b/arch/mips/kernel/syscall.c
+@@ -132,6 +132,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
++ loongson_llsc_mb();
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index d79f2b432318..f5c778113384 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+ EXPORT_SYMBOL(shm_align_mask);
+
+ /* gap between mmap and stack */
+-#define MIN_GAP (128*1024*1024UL)
+-#define MAX_GAP ((TASK_SIZE)/6*5)
++#define MIN_GAP (128*1024*1024UL)
++#define MAX_GAP ((TASK_SIZE)/6*5)
++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
+
+ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ {
+@@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
++
++ /* Values close to RLIM_INFINITY can overflow. */
++ if (gap + pad > gap)
++ gap += pad;
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 144ceb0fba88..bece1264d1c5 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -631,7 +631,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ return;
+ }
+
+- if (cpu_has_rixi && _PAGE_NO_EXEC) {
++ if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (fill_includes_sw_bits) {
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+ } else {
+diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
+index 3a6aa57b9d90..eea28ca679db 100644
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+
+ pagefault_enable();
+
+- if (!ret)
+- *oval = oldval;
++ *oval = oldval;
+
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
+ return ret;
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 89623962c727..fe0c32fb9f96 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -744,6 +744,33 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ */
+ #define MAX_WAIT_FOR_RECOVERY 300
+
++
++/* Walks the PE tree after processing an event to remove any stale PEs.
++ *
++ * NB: This needs to be recursive to ensure the leaf PEs get removed
++ * before their parents do. Although this is possible to do recursively
++ * we don't since this is easier to read and we need to garantee
++ * the leaf nodes will be handled first.
++ */
++static void eeh_pe_cleanup(struct eeh_pe *pe)
++{
++ struct eeh_pe *child_pe, *tmp;
++
++ list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
++ eeh_pe_cleanup(child_pe);
++
++ if (pe->state & EEH_PE_KEEP)
++ return;
++
++ if (!(pe->state & EEH_PE_INVALID))
++ return;
++
++ if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
++ list_del(&pe->child);
++ kfree(pe);
++ }
++}
++
+ /**
+ * eeh_handle_normal_event - Handle EEH events on a specific PE
+ * @pe: EEH PE - which should not be used after we return, as it may
+@@ -782,8 +809,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ return;
+ }
+
+- eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+-
+ eeh_pe_update_time_stamp(pe);
+ pe->freeze_count++;
+ if (pe->freeze_count > eeh_max_freezes) {
+@@ -793,6 +818,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
++ eeh_for_each_pe(pe, tmp_pe)
++ eeh_pe_for_each_dev(tmp_pe, edev, tmp)
++ edev->mode &= ~EEH_DEV_NO_HANDLER;
++
+ /* Walk the various device drivers attached to this slot through
+ * a reset sequence, giving each an opportunity to do what it needs
+ * to accomplish the reset. Each child gets a report of the
+@@ -969,6 +998,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ return;
+ }
+ }
++
++ /*
++ * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
++ * we don't want to modify the PE tree structure so we do it here.
++ */
++ eeh_pe_cleanup(pe);
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
+ }
+
+@@ -981,7 +1016,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ */
+ void eeh_handle_special_event(void)
+ {
+- struct eeh_pe *pe, *phb_pe;
++ struct eeh_pe *pe, *phb_pe, *tmp_pe;
++ struct eeh_dev *edev, *tmp_edev;
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ unsigned long flags;
+@@ -1040,6 +1076,7 @@ void eeh_handle_special_event(void)
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ eeh_handle_normal_event(pe);
+ } else {
+ pci_lock_rescan_remove();
+@@ -1050,6 +1087,10 @@ void eeh_handle_special_event(void)
+ (phb_pe->state & EEH_PE_RECOVERING))
+ continue;
+
++ eeh_for_each_pe(pe, tmp_pe)
++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
++ edev->mode &= ~EEH_DEV_NO_HANDLER;
++
+ /* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
+index 64cfbe41174b..e36653e5f76b 100644
+--- a/arch/powerpc/kernel/eeh_event.c
++++ b/arch/powerpc/kernel/eeh_event.c
+@@ -121,6 +121,14 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
+ }
+ event->pe = pe;
+
++ /*
++ * Mark the PE as recovering before inserting it in the queue.
++ * This prevents the PE from being free()ed by a hotplug driver
++ * while the PE is sitting in the event queue.
++ */
++ if (pe)
++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
++
+ /* We may or may not be called in an interrupt context */
+ spin_lock_irqsave(&eeh_eventlist_lock, flags);
+ list_add(&event->list, &eeh_eventlist);
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 854cef7b18f4..f0813d50e0b1 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -491,6 +491,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
+ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ {
+ struct eeh_pe *pe, *parent, *child;
++ bool keep, recover;
+ int cnt;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
+@@ -516,10 +517,21 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ */
+ while (1) {
+ parent = pe->parent;
++
++ /* PHB PEs should never be removed */
+ if (pe->type & EEH_PE_PHB)
+ break;
+
+- if (!(pe->state & EEH_PE_KEEP)) {
++ /*
++ * XXX: KEEP is set while resetting a PE. I don't think it's
++ * ever set without RECOVERING also being set. I could
++ * be wrong though so catch that with a WARN.
++ */
++ keep = !!(pe->state & EEH_PE_KEEP);
++ recover = !!(pe->state & EEH_PE_RECOVERING);
++ WARN_ON(keep && !recover);
++
++ if (!keep && !recover) {
+ if (list_empty(&pe->edevs) &&
+ list_empty(&pe->child_list)) {
+ list_del(&pe->child);
+@@ -528,6 +540,15 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ break;
+ }
+ } else {
++ /*
++ * Mark the PE as invalid. At the end of the recovery
++ * process any invalid PEs will be garbage collected.
++ *
++ * We need to delay the free()ing of them since we can
++ * remove edev's while traversing the PE tree which
++ * might trigger the removal of a PE and we can't
++ * deal with that (yet).
++ */
+ if (list_empty(&pe->edevs)) {
+ cnt = 0;
+ list_for_each_entry(child, &pe->child_list, child) {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 6c51aa845bce..3e564536a237 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -556,6 +556,10 @@ FTR_SECTION_ELSE
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ 9:
+ /* Deliver the machine check to host kernel in V mode. */
++BEGIN_FTR_SECTION
++ ld r10,ORIG_GPR3(r1)
++ mtspr SPRN_CFAR,r10
++END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ MACHINE_CHECK_HANDLER_WINDUP
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_0(PACA_EXMC)
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index fff2eb22427d..65cd96c3b1d6 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -871,15 +871,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ return 0;
+
+ for_each_cpu(cpu, cpus) {
++ struct device *dev = get_cpu_device(cpu);
++
+ switch (state) {
+ case DOWN:
+- cpuret = cpu_down(cpu);
++ cpuret = device_offline(dev);
+ break;
+ case UP:
+- cpuret = cpu_up(cpu);
++ cpuret = device_online(dev);
+ break;
+ }
+- if (cpuret) {
++ if (cpuret < 0) {
+ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+ __func__,
+ ((state == UP) ? "up" : "down"),
+@@ -968,6 +970,8 @@ int rtas_ibm_suspend_me(u64 handle)
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
+
++ lock_device_hotplug();
++
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+ cpuret = rtas_online_cpus_mask(offline_mask);
+@@ -1007,6 +1011,7 @@ out_hotplug_enable:
+ __func__);
+
+ out:
++ unlock_device_hotplug();
+ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 47df30982de1..c8ea3a253b81 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -472,6 +472,7 @@ void system_reset_exception(struct pt_regs *regs)
+ if (debugger(regs))
+ goto out;
+
++ kmsg_dump(KMSG_DUMP_OOPS);
+ /*
+ * A system reset is a request to dump, so we always send
+ * it through the crashdump code (if fadump or kdump are
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 8deb432c2975..2b6cc823046a 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -901,7 +901,7 @@ int __meminit radix__create_section_mapping(unsigned long start, unsigned long e
+ return -1;
+ }
+
+- return create_physical_mapping(start, end, nid);
++ return create_physical_mapping(__pa(start), __pa(end), nid);
+ }
+
+ int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 3bdfc1e32096..2231959c5633 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -570,6 +570,7 @@ static int core_imc_mem_init(int cpu, int size)
+ {
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
+ struct imc_mem_info *mem_info;
++ struct page *page;
+
+ /*
+ * alloc_pages_node() will allocate memory for core in the
+@@ -580,11 +581,12 @@ static int core_imc_mem_init(int cpu, int size)
+ mem_info->id = core_id;
+
+ /* We need only vbase for core counters */
+- mem_info->vbase = page_address(alloc_pages_node(nid,
+- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!mem_info->vbase)
++ page = alloc_pages_node(nid,
++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ mem_info->vbase = page_address(page);
+
+ /* Init the mutex */
+ core_imc_refc[core_id].id = core_id;
+@@ -839,15 +841,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
+ int nid = cpu_to_node(cpu_id);
+
+ if (!local_mem) {
++ struct page *page;
+ /*
+ * This case could happen only once at start, since we dont
+ * free the memory in cpu offline path.
+ */
+- local_mem = page_address(alloc_pages_node(nid,
++ page = alloc_pages_node(nid,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!local_mem)
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ local_mem = page_address(page);
+
+ per_cpu(thread_imc_mem, cpu_id) = local_mem;
+ }
+@@ -1085,11 +1089,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
+ int core_id = (cpu_id / threads_per_core);
+
+ if (!local_mem) {
+- local_mem = page_address(alloc_pages_node(phys_id,
+- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!local_mem)
++ struct page *page;
++
++ page = alloc_pages_node(phys_id,
++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ local_mem = page_address(page);
+ per_cpu(trace_imc_mem, cpu_id) = local_mem;
+
+ /* Initialise the counters for trace mode */
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+index e28f03e1eb5e..c75ec37bf0cd 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+@@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
+ struct page *tce_mem = NULL;
+ __be64 *addr;
+
+- tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
++ tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
++ shift - PAGE_SHIFT);
+ if (!tce_mem) {
+ pr_err("Failed to allocate a TCE memory, level shift=%d\n",
+ shift);
+@@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+
+ if (ptce)
+ *ptce = cpu_to_be64(0);
++ else
++ /* Skip the rest of the level */
++ i |= tbl->it_level_size - 1;
+ }
+ }
+
+@@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
+ PAGE_SHIFT);
+ const unsigned long tce_table_size = 1UL << table_shift;
+- unsigned int tmplevels = levels;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
+ return -EINVAL;
+@@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ if (!is_power_of_2(window_size))
+ return -EINVAL;
+
+- if (alloc_userspace_copy && (window_size > (1ULL << 32)))
+- tmplevels = 1;
+-
+ /* Adjust direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ level_shift = entries_shift + 3;
+@@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+
+ /* Allocate TCE table */
+ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+- tmplevels, tce_table_size, &offset, &total_allocated);
++ 1, tce_table_size, &offset, &total_allocated);
+
+ /* addr==NULL means that the first level allocation failed */
+ if (!addr)
+@@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ * we did not allocate as much as we wanted,
+ * release partially allocated table.
+ */
+- if (tmplevels == levels && offset < tce_table_size)
++ if (levels == 1 && offset < tce_table_size)
+ goto free_tces_exit;
+
+ /* Allocate userspace view of the TCE table */
+ if (alloc_userspace_copy) {
+ offset = 0;
+ uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+- tmplevels, tce_table_size, &offset,
++ 1, tce_table_size, &offset,
+ &total_allocated_uas);
+ if (!uas)
+ goto free_tces_exit;
+- if (tmplevels == levels && (offset < tce_table_size ||
++ if (levels == 1 && (offset < tce_table_size ||
+ total_allocated_uas != total_allocated))
+ goto free_uas_exit;
+ }
+@@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+
+ pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
+ window_size, tce_table_size, bus_offset, tbl->it_base,
+- tbl->it_userspace, tmplevels, levels);
++ tbl->it_userspace, 1, levels);
+
+ return 0;
+
+diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
+index be26ab3d99e0..33a52114267d 100644
+--- a/arch/powerpc/platforms/powernv/pci.h
++++ b/arch/powerpc/platforms/powernv/pci.h
+@@ -225,7 +225,7 @@ extern struct iommu_table_group *pnv_npu_compound_attach(
+ struct pnv_ioda_pe *pe);
+
+ /* pci-ioda-tce.c */
+-#define POWERNV_IOMMU_DEFAULT_LEVELS 1
++#define POWERNV_IOMMU_DEFAULT_LEVELS 2
+ #define POWERNV_IOMMU_MAX_LEVELS 5
+
+ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 50e7aee3c7f3..accb732dcfac 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -9,6 +9,7 @@
+ #include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
++#include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/stat.h>
+ #include <linux/completion.h>
+@@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
+
+ prop_data += vd;
+ }
++
++ cond_resched();
+ }
++
++ cond_resched();
+ } while (rtas_rc == 1);
+
+ of_node_put(dn);
+@@ -309,8 +314,12 @@ int pseries_devicetree_update(s32 scope)
+ add_dt_node(phandle, drc_index);
+ break;
+ }
++
++ cond_resched();
+ }
+ }
++
++ cond_resched();
+ } while (rc == 1);
+
+ kfree(rtas_buf);
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 8fa012a65a71..cc682759feae 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -344,6 +344,9 @@ static void pseries_lpar_idle(void)
+ * low power mode by ceding processor to hypervisor
+ */
+
++ if (!prep_irq_for_idle())
++ return;
++
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 4a721fd62406..e15ccf19c153 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -2532,13 +2532,16 @@ static void dump_pacas(void)
+ static void dump_one_xive(int cpu)
+ {
+ unsigned int hwid = get_hard_smp_processor_id(cpu);
+-
+- opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+- opal_xive_dump(XIVE_DUMP_VP, hwid);
+- opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++ bool hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ if (hv) {
++ opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
++ opal_xive_dump(XIVE_DUMP_VP, hwid);
++ opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++ }
+
+ if (setjmp(bus_error_jmp) != 0) {
+ catch_memory_errors = 0;
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index ccad1398abd4..b5cfcad953c2 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
+ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ struct inode *root_inode;
+- struct dentry *root_dentry;
++ struct dentry *root_dentry, *update_file;
+ int rc = 0;
+ struct hypfs_sb_info *sbi;
+
+@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+ rc = hypfs_diag_create_files(root_dentry);
+ if (rc)
+ return rc;
+- sbi->update_file = hypfs_create_update_file(root_dentry);
+- if (IS_ERR(sbi->update_file))
+- return PTR_ERR(sbi->update_file);
++ update_file = hypfs_create_update_file(root_dentry);
++ if (IS_ERR(update_file))
++ return PTR_ERR(update_file);
++ sbi->update_file = update_file;
+ hypfs_update_update(sb);
+ pr_info("Hypervisor filesystem mounted\n");
+ return 0;
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 742ecf5b6c00..72200998687c 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -645,7 +645,9 @@ static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
+ .vector = stimer->config.apic_vector
+ };
+
+- return !kvm_apic_set_irq(vcpu, &irq, NULL);
++ if (lapic_in_kernel(vcpu))
++ return !kvm_apic_set_irq(vcpu, &irq, NULL);
++ return 0;
+ }
+
+ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
+@@ -1854,7 +1856,13 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+
+ ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
+ ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+- ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
++
++ /*
++ * Direct Synthetic timers only make sense with in-kernel
++ * LAPIC
++ */
++ if (lapic_in_kernel(vcpu))
++ ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+
+ break;
+
+diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
+index 6ad5ef48b61e..8cd2ac650b50 100644
+--- a/drivers/base/regmap/Kconfig
++++ b/drivers/base/regmap/Kconfig
+@@ -44,7 +44,7 @@ config REGMAP_IRQ
+
+ config REGMAP_SOUNDWIRE
+ tristate
+- depends on SOUNDWIRE_BUS
++ depends on SOUNDWIRE
+
+ config REGMAP_SCCB
+ tristate
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 024060165afa..76457003f140 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+ if (ret)
+ return ret;
+ if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+- WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ return -EINVAL;
+ }
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index f124a2d2bb9f..92a89c8290aa 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -221,6 +221,9 @@ struct smi_info {
+ */
+ bool irq_enable_broken;
+
++ /* Is the driver in maintenance mode? */
++ bool in_maintenance_mode;
++
+ /*
+ * Did we get an attention that we did not handle?
+ */
+@@ -1007,11 +1010,20 @@ static int ipmi_thread(void *data)
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+- if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
++ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ ; /* do nothing */
+- else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
+- schedule();
+- else if (smi_result == SI_SM_IDLE) {
++ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
++ /*
++ * In maintenance mode we run as fast as
++ * possible to allow firmware updates to
++ * complete as fast as possible, but normally
++ * don't bang on the scheduler.
++ */
++ if (smi_info->in_maintenance_mode)
++ schedule();
++ else
++ usleep_range(100, 200);
++ } else if (smi_result == SI_SM_IDLE) {
+ if (atomic_read(&smi_info->need_watch)) {
+ schedule_timeout_interruptible(100);
+ } else {
+@@ -1019,8 +1031,9 @@ static int ipmi_thread(void *data)
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+- } else
++ } else {
+ schedule_timeout_interruptible(1);
++ }
+ }
+ return 0;
+ }
+@@ -1198,6 +1211,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
++ smi_info->in_maintenance_mode = enable;
+ }
+
+ static void shutdown_smi(void *send_info);
+diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
+index 32dd29e0a37e..4de97cc7cb54 100644
+--- a/drivers/clk/actions/owl-common.c
++++ b/drivers/clk/actions/owl-common.c
+@@ -68,16 +68,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
+ struct clk_hw *hw;
+
+ for (i = 0; i < hw_clks->num; i++) {
++ const char *name;
+
+ hw = hw_clks->hws[i];
+-
+ if (IS_ERR_OR_NULL(hw))
+ continue;
+
++ name = hw->init->name;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+- i, hw->init->name);
++ i, name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
+index f607ee702c83..311cea0c3ae2 100644
+--- a/drivers/clk/at91/clk-main.c
++++ b/drivers/clk/at91/clk-main.c
+@@ -21,6 +21,10 @@
+
+ #define MOR_KEY_MASK (0xff << 16)
+
++#define clk_main_parent_select(s) (((s) & \
++ (AT91_PMC_MOSCEN | \
++ AT91_PMC_OSCBYPASS)) ? 1 : 0)
++
+ struct clk_main_osc {
+ struct clk_hw hw;
+ struct regmap *regmap;
+@@ -113,7 +117,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
+
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+- return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
++ return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
+ }
+
+ static const struct clk_ops main_osc_ops = {
+@@ -450,7 +454,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+
+- return status & AT91_PMC_MOSCEN ? 1 : 0;
++ return clk_main_parent_select(status);
+ }
+
+ static const struct clk_ops sam9x5_main_ops = {
+@@ -492,7 +496,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
+ clkmain->hw.init = &init;
+ clkmain->regmap = regmap;
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+- clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
++ clkmain->parent = clk_main_parent_select(status);
+
+ hw = &clkmain->hw;
+ ret = clk_hw_register(NULL, &clkmain->hw);
+diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
+index 06499568cf07..db5096fa9a17 100644
+--- a/drivers/clk/clk-bulk.c
++++ b/drivers/clk/clk-bulk.c
+@@ -18,10 +18,13 @@ static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
+ int ret;
+ int i;
+
+- for (i = 0; i < num_clks; i++)
++ for (i = 0; i < num_clks; i++) {
++ clks[i].id = NULL;
+ clks[i].clk = NULL;
++ }
+
+ for (i = 0; i < num_clks; i++) {
++ of_property_read_string_index(np, "clock-names", i, &clks[i].id);
+ clks[i].clk = of_clk_get(np, i);
+ if (IS_ERR(clks[i].clk)) {
+ ret = PTR_ERR(clks[i].clk);
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index dd93d3acc67d..8724ef6c469a 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -675,7 +675,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p5020_init_periph,
+ .cmux_groups = {
+- &p2041_cmux_grp1, &p2041_cmux_grp2
++ &p5020_cmux_grp1, &p5020_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 1, -1
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index daf1841b2adb..f29025c99c53 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -396,7 +396,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+
+ /* AHB */
+- clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000);
++ /* AHB clock is used by the AHB bus therefore marked as critical */
++ clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+ clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+
+ /* IPG */
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index b7213023b238..7a815ec76aa5 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -191,6 +191,10 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
++ /* Enable BYPASS */
++ tmp |= BYPASS_MASK;
++ writel(tmp, pll->base);
++
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+@@ -250,6 +254,10 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
++ /* Enable BYPASS */
++ tmp |= BYPASS_MASK;
++ writel_relaxed(tmp, pll->base);
++
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+@@ -283,16 +291,28 @@ static int clk_pll14xx_prepare(struct clk_hw *hw)
+ {
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
++ int ret;
+
+ /*
+ * RESETB = 1 from 0, PLL starts its normal
+ * operation after lock time
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
++ if (val & RST_MASK)
++ return 0;
++ val |= BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
+ val |= RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+
+- return clk_pll14xx_wait_lock(pll);
++ ret = clk_pll14xx_wait_lock(pll);
++ if (ret)
++ return ret;
++
++ val &= ~BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
++
++ return 0;
+ }
+
+ static int clk_pll14xx_is_prepared(struct clk_hw *hw)
+@@ -348,6 +368,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ struct clk_pll14xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
++ u32 val;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+@@ -379,6 +400,10 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ pll->rate_table = pll_clk->rate_table;
+ pll->rate_count = pll_clk->rate_count;
+
++ val = readl_relaxed(pll->base + GNRL_CTL);
++ val &= ~BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
++
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll %s %lu\n",
+diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
+index 8028ff6f6610..db0b73d53551 100644
+--- a/drivers/clk/meson/axg-audio.c
++++ b/drivers/clk/meson/axg-audio.c
+@@ -992,15 +992,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
+
+ /* Take care to skip the registered input clocks */
+ for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
++ const char *name;
++
+ hw = data->hw_onecell_data->hws[i];
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
++ name = hw->init->name;
++
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+- dev_err(dev, "failed to register clock %s\n",
+- hw->init->name);
++ dev_err(dev, "failed to register clock %s\n", name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index 7131dcf9b060..95be125c3bdd 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -685,7 +685,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+
+@@ -709,7 +709,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+
+diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
+index 92ece221b0d4..7f156cf1d7a6 100644
+--- a/drivers/clk/renesas/clk-mstp.c
++++ b/drivers/clk/renesas/clk-mstp.c
+@@ -338,7 +338,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
+ return;
+
+ pd->name = np->name;
+- pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
++ pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
++ GENPD_FLAG_ACTIVE_WAKEUP;
+ pd->attach_dev = cpg_mstp_attach_dev;
+ pd->detach_dev = cpg_mstp_detach_dev;
+ pm_genpd_init(pd, &pm_domain_always_on_gov, false);
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 9dfa28d6fd9f..cbe5fb468b7f 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -555,7 +555,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
+
+ genpd = &pd->genpd;
+ genpd->name = np->name;
+- genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
++ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
++ GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
+index ad7951b6b285..dcf4e25a0216 100644
+--- a/drivers/clk/sirf/clk-common.c
++++ b/drivers/clk/sirf/clk-common.c
+@@ -297,9 +297,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
+ {
+ struct clk_dmn *clk = to_dmnclk(hw);
+ u32 cfg = clkc_readl(clk->regofs);
++ const char *name = clk_hw_get_name(hw);
+
+ /* parent of io domain can only be pll3 */
+- if (strcmp(hw->init->name, "io") == 0)
++ if (strcmp(name, "io") == 0)
+ return 4;
+
+ WARN_ON((cfg & (BIT(3) - 1)) > 4);
+@@ -311,9 +312,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
+ {
+ struct clk_dmn *clk = to_dmnclk(hw);
+ u32 cfg = clkc_readl(clk->regofs);
++ const char *name = clk_hw_get_name(hw);
+
+ /* parent of io domain can only be pll3 */
+- if (strcmp(hw->init->name, "io") == 0)
++ if (strcmp(name, "io") == 0)
+ return -EINVAL;
+
+ cfg &= ~(BIT(3) - 1);
+@@ -353,7 +355,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ unsigned long fin;
+ unsigned ratio, wait, hold;
+- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++ const char *name = clk_hw_get_name(hw);
++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+
+ fin = *parent_rate;
+ ratio = fin / rate;
+@@ -375,7 +378,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_dmn *clk = to_dmnclk(hw);
+ unsigned long fin;
+ unsigned ratio, wait, hold, reg;
+- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++ const char *name = clk_hw_get_name(hw);
++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+
+ fin = parent_rate;
+ ratio = fin / rate;
+diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
+index e038b0447206..8bdab1c3013b 100644
+--- a/drivers/clk/sprd/common.c
++++ b/drivers/clk/sprd/common.c
+@@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
+ struct clk_hw *hw;
+
+ for (i = 0; i < clkhw->num; i++) {
++ const char *name;
+
+ hw = clkhw->hws[i];
+-
+ if (!hw)
+ continue;
+
++ name = hw->init->name;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+- i, hw->init->name);
++ i, name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
+index 36b4402bf09e..640270f51aa5 100644
+--- a/drivers/clk/sprd/pll.c
++++ b/drivers/clk/sprd/pll.c
+@@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
+ k2 + refin * nint * CLK_PLL_1M;
+ }
+
++ kfree(cfg);
+ return rate;
+ }
+
+@@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
+ if (!ret)
+ udelay(pll->udelay);
+
++ kfree(cfg);
+ return ret;
+ }
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+index cbbf06d42c2c..408a6750ddda 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+@@ -493,6 +493,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
++ [CLK_MMC2] = &mmc2_clk.common.hw,
++ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
++ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
+index fd6c347bec6a..dd7045bc48c1 100644
+--- a/drivers/clk/zte/clk-zx296718.c
++++ b/drivers/clk/zte/clk-zx296718.c
+@@ -564,6 +564,7 @@ static int __init top_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -573,11 +574,10 @@ static int __init top_clocks_init(struct device_node *np)
+
+ for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
+ zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
++ name = zx296718_pll_clk[i].hw.init->name;
+ ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- zx296718_pll_clk[i].hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
+@@ -585,11 +585,10 @@ static int __init top_clocks_init(struct device_node *np)
+ top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
+ &top_ffactor_clk[i].factor.hw;
+
++ name = top_ffactor_clk[i].factor.hw.init->name;
+ ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_ffactor_clk[i].factor.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
+@@ -598,11 +597,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_mux_clk[i].mux.hw;
+
+ top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = top_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
+@@ -611,11 +609,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_gate_clk[i].gate.hw;
+
+ top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = top_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
+@@ -624,11 +621,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_div_clk[i].div.hw;
+
+ top_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = top_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -754,6 +750,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -767,11 +764,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = lsp0_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
+@@ -780,11 +776,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_gate_clk[i].gate.hw;
+
+ lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = lsp0_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
+@@ -793,11 +788,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_div_clk[i].div.hw;
+
+ lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = lsp0_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -862,6 +856,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -875,11 +870,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = lsp1_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
+@@ -888,11 +882,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp1_gate_clk[i].gate.hw;
+
+ lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = lsp1_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
+@@ -901,11 +894,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp1_div_clk[i].div.hw;
+
+ lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = lsp1_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -979,6 +971,7 @@ static int __init audio_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -992,11 +985,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_mux_clk[i].mux.hw;
+
+ audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = audio_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
+@@ -1005,11 +997,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_adiv_clk[i].hw;
+
+ audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
++ name = audio_adiv_clk[i].hw.init->name;
+ ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_adiv_clk[i].hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
+@@ -1018,11 +1009,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_div_clk[i].div.hw;
+
+ audio_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = audio_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
+@@ -1031,11 +1021,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_gate_clk[i].gate.hw;
+
+ audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = audio_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
+index 02768af0dccd..8c789b8671fc 100644
+--- a/drivers/crypto/hisilicon/sec/sec_algs.c
++++ b/drivers/crypto/hisilicon/sec/sec_algs.c
+@@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+ {
+ struct sec_hw_sgl *sgl_current, *sgl_next;
++ dma_addr_t sgl_next_dma;
+
+- if (!hw_sgl)
+- return;
+ sgl_current = hw_sgl;
+- while (sgl_current->next) {
++ while (sgl_current) {
+ sgl_next = sgl_current->next;
+- dma_pool_free(info->hw_sgl_pool, sgl_current,
+- sgl_current->next_sgl);
++ sgl_next_dma = sgl_current->next_sgl;
++
++ dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
++
+ sgl_current = sgl_next;
++ psec_sgl = sgl_next_dma;
+ }
+- dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+ }
+
+ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 051f6c2873c7..6713cfb1995c 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
+ {
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
++ unsigned long flags;
+
++ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(fence->lock, flags);
+- if (!list_empty(&pt->link)) {
+- list_del(&pt->link);
+- rb_erase(&pt->node, &parent->pt_tree);
+- }
+- spin_unlock_irqrestore(fence->lock, flags);
++ list_del(&pt->link);
++ rb_erase(&pt->node, &parent->pt_tree);
+ }
++ spin_unlock_irqrestore(fence->lock, flags);
+
+ sync_timeline_put(parent);
+ dma_fence_free(fence);
+@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+ p = &parent->rb_left;
+ } else {
+ if (dma_fence_get_rcu(&other->base)) {
+- dma_fence_put(&pt->base);
++ sync_timeline_put(obj);
++ kfree(pt);
+ pt = other;
+ goto unlock;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index e47609218839..bf0c61baa05c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -137,14 +137,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
+ domain = amdgpu_display_supported_domains(adev);
+-
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitches[0] * height;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+ ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+- AMDGPU_GEM_CREATE_VRAM_CLEARED,
++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
++ AMDGPU_GEM_CREATE_VRAM_CLEARED |
++ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ ttm_bo_type_kernel, NULL, &gobj);
+ if (ret) {
+ pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
+@@ -166,7 +166,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ dev_err(adev->dev, "FB failed to set tiling flags\n");
+ }
+
+-
+ ret = amdgpu_bo_pin(abo, domain);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index d4fcf5475464..6fc77ac814d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -746,7 +746,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
++ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ u32 domain;
+ int r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 9d8df68893b9..1e34dfc14355 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1867,7 +1867,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+- if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
++ if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+@@ -1916,14 +1916,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 0a7adc2925e3..191f5757ded1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2016,6 +2016,14 @@ void dc_set_power_state(
+ dc_resource_state_construct(dc, dc->current_state);
+
+ dc->hwss.init_hw(dc);
++
++#ifdef CONFIG_DRM_AMD_DC_DCN2_0
++ if (dc->hwss.init_sys_ctx != NULL &&
++ dc->vm_pa_config.valid) {
++ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
++ }
++#endif
++
+ break;
+ default:
+ ASSERT(dc->current_state->stream_count == 0);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+index b0dea759cd86..8aecf044e2ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+@@ -154,6 +154,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
+ break;
+ udelay(25); //MAx T7 is 50ms
+ } while (++tries < 300);
++
++ if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
++ udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
++
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index b459ce056b60..c404b5e930f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -261,12 +261,10 @@ bool resource_construct(
+ DC_ERR("DC: failed to create audio!\n");
+ return false;
+ }
+-
+ if (!aud->funcs->endpoint_valid(aud)) {
+ aud->funcs->destroy(&aud);
+ break;
+ }
+-
+ pool->audios[i] = aud;
+ pool->audio_count++;
+ }
+@@ -1692,24 +1690,25 @@ static struct audio *find_first_free_audio(
+ const struct resource_pool *pool,
+ enum engine_id id)
+ {
+- int i;
+- for (i = 0; i < pool->audio_count; i++) {
++ int i, available_audio_count;
++
++ available_audio_count = pool->audio_count;
++
++ for (i = 0; i < available_audio_count; i++) {
+ if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+-
+ return pool->audios[i];
+ }
+ }
+
+- /* use engine id to find free audio */
+- if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
++ /* use engine id to find free audio */
++ if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ return pool->audios[id];
+ }
+-
+ /*not found the matching one, first come first serve*/
+- for (i = 0; i < pool->audio_count; i++) {
++ for (i = 0; i < available_audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+ return pool->audios[i];
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 6c2a3d9a4c2e..283082666be5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -202,6 +202,7 @@ struct dc_panel_patch {
+ unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
+ unsigned int extra_delay_backlight_off;
++ unsigned int extra_t7_ms;
+ };
+
+ struct dc_edid_caps {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+index 7f6d724686f1..abb559ce6408 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+@@ -611,6 +611,8 @@ void dce_aud_az_configure(
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ value);
++ DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
++ audio->inst, value, audio_info->display_name);
+
+ /*
+ *write the port ID:
+@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+ };
+-
+ void dce_aud_destroy(struct audio **audio)
+ {
+ struct dce_audio *aud = DCE_AUD(*audio);
+@@ -953,7 +954,6 @@ struct audio *dce_audio_create(
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+-
+ return &audio->base;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 7469333a2c8a..8166fdbacd73 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -357,9 +357,10 @@ bool cm_helper_translate_curve_to_hw_format(
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
++ seg_distr[10] = 1;
+
+ region_start = -10;
+- region_end = 0;
++ region_end = 1;
+ }
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 19b1eaebe484..000a9db9dad8 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -433,6 +433,12 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ /* Either we've calculated the number of frames to insert,
+ * or we need to insert min duration frames
+ */
++ if (last_render_time_in_us / frames_to_insert <
++ in_out_vrr->min_duration_in_us){
++ frames_to_insert -= (frames_to_insert > 1) ?
++ 1 : 0;
++ }
++
+ if (frames_to_insert > 0)
+ inserted_frame_duration_in_us = last_render_time_in_us /
+ frames_to_insert;
+@@ -885,8 +891,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ struct core_freesync *core_freesync = NULL;
+ unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int refresh_range = 0;
+- unsigned int min_refresh_in_uhz = 0;
+- unsigned int max_refresh_in_uhz = 0;
++ unsigned long long min_refresh_in_uhz = 0;
++ unsigned long long max_refresh_in_uhz = 0;
+
+ if (mod_freesync == NULL)
+ return;
+@@ -913,7 +919,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ min_refresh_in_uhz = nominal_field_rate_in_uhz;
+
+ if (!vrr_settings_require_update(core_freesync,
+- in_config, min_refresh_in_uhz, max_refresh_in_uhz,
++ in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
+ in_out_vrr))
+ return;
+
+@@ -929,15 +935,15 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ return;
+
+ } else {
+- in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz;
++ in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz;
+ in_out_vrr->max_duration_in_us =
+ calc_duration_in_us_from_refresh_in_uhz(
+- min_refresh_in_uhz);
++ (unsigned int)min_refresh_in_uhz);
+
+- in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz;
++ in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz;
+ in_out_vrr->min_duration_in_us =
+ calc_duration_in_us_from_refresh_in_uhz(
+- max_refresh_in_uhz);
++ (unsigned int)max_refresh_in_uhz);
+
+ refresh_range = in_out_vrr->max_refresh_in_uhz -
+ in_out_vrr->min_refresh_in_uhz;
+@@ -948,17 +954,18 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ in_out_vrr->fixed.ramping_active = in_config->ramping;
+
+ in_out_vrr->btr.btr_enabled = in_config->btr;
++
+ if (in_out_vrr->max_refresh_in_uhz <
+ 2 * in_out_vrr->min_refresh_in_uhz)
+ in_out_vrr->btr.btr_enabled = false;
++
+ in_out_vrr->btr.btr_active = false;
+ in_out_vrr->btr.inserted_duration_in_us = 0;
+ in_out_vrr->btr.frames_to_insert = 0;
+ in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->btr.mid_point_in_us =
+- in_out_vrr->min_duration_in_us +
+- (in_out_vrr->max_duration_in_us -
+- in_out_vrr->min_duration_in_us) / 2;
++ (in_out_vrr->min_duration_in_us +
++ in_out_vrr->max_duration_in_us) / 2;
+
+ if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) {
+ in_out_vrr->adjust.v_total_min = stream->timing.v_total;
+diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+index 3666c308c34a..53676b5fec68 100644
+--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+@@ -1036,16 +1036,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
+ if (ret)
+ return ret;
+
++ /* Check whether panel supports fast training */
++ ret = analogix_dp_fast_link_train_detection(dp);
++ if (ret)
++ dp->psr_enable = false;
++
+ if (dp->psr_enable) {
+ ret = analogix_dp_enable_sink_psr(dp);
+ if (ret)
+ return ret;
+ }
+
+- /* Check whether panel supports fast training */
+- ret = analogix_dp_fast_link_train_detection(dp);
+- if (ret)
+- dp->psr_enable = false;
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index f59a51e19dab..d515c7cebb9c 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -293,7 +293,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+ {
+ struct tc_data *tc = aux_to_tc(aux);
+- size_t size = min_t(size_t, 8, msg->size);
++ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ u8 *buf = msg->buffer;
+ u32 tmp = 0;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 283ff690350e..50303ec194bb 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -320,7 +320,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
+ asyh->wndw.olut &= ~BIT(wndw->id);
+ }
+
+- if (!ilut && wndw->func->ilut_identity) {
++ if (!ilut && wndw->func->ilut_identity &&
++ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
++ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
+ static struct drm_property_blob dummy = {};
+ ilut = &dummy;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+index 7143ea4611aa..33a9fb5ac558 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ info->min = min(info->base,
+ info->base + info->step * info->vidmask);
+ info->max = nvbios_rd32(bios, volt + 0x0e);
++ if (!info->max)
++ info->max = max(info->base, info->base + info->step * info->vidmask);
+ break;
+ case 0x50:
+ info->min = nvbios_rd32(bios, volt + 0x0a);
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index 2c9c9722734f..9a2cb8aeab3a 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+
+ /* Look up the DSI host. It needs to probe before we do. */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
++ if (!endpoint)
++ return -ENODEV;
++
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
++ if (!dsi_host_node)
++ goto error;
++
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host) {
+@@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ }
+
+ info.node = of_graph_get_remote_port(endpoint);
++ if (!info.node)
++ goto error;
++
+ of_node_put(endpoint);
+
+ ts->dsi = mipi_dsi_device_register_full(host, &info);
+@@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ return ret;
+
+ return 0;
++
++error:
++ of_node_put(endpoint);
++ return -ENODEV;
+ }
+
+ static int rpi_touchscreen_remove(struct i2c_client *i2c)
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 397a3086eac8..95e430f9fea4 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -723,9 +723,9 @@ static const struct panel_desc auo_g133han01 = {
+ static const struct display_timing auo_g185han01_timings = {
+ .pixelclock = { 120000000, 144000000, 175000000 },
+ .hactive = { 1920, 1920, 1920 },
+- .hfront_porch = { 18, 60, 74 },
+- .hback_porch = { 12, 44, 54 },
+- .hsync_len = { 10, 24, 32 },
++ .hfront_porch = { 36, 120, 148 },
++ .hback_porch = { 24, 88, 108 },
++ .hsync_len = { 20, 48, 64 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 6, 10, 40 },
+ .vback_porch = { 2, 5, 20 },
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index de1745adcccc..c7f2e073a82f 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -752,7 +752,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
+
+ radeon_encoder->output_csc = val;
+
+- if (connector->encoder->crtc) {
++ if (connector->encoder && connector->encoder->crtc) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 2e96c886392b..60ee51edd782 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -344,11 +344,19 @@ radeon_pci_remove(struct pci_dev *pdev)
+ static void
+ radeon_pci_shutdown(struct pci_dev *pdev)
+ {
++ struct drm_device *ddev = pci_get_drvdata(pdev);
++
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown
+ */
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
++
++ /* Some adapters need to be suspended before a
++ * shutdown occurs in order to prevent an error
++ * during kexec.
++ */
++ radeon_suspend_kms(ddev, true, true, false);
+ }
+
+ static int radeon_pmops_suspend(struct device *dev)
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 32fd6a3b37fb..6f1fef76671c 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -25,6 +25,7 @@
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_cma_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_of.h>
+ #include <drm/drm_plane_helper.h>
+ #include <drm/drm_probe_helper.h>
+@@ -875,6 +876,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
+ };
+
+ static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
++ .prepare_fb = drm_gem_fb_prepare_fb,
+ .atomic_check = ltdc_plane_atomic_check,
+ .atomic_update = ltdc_plane_atomic_update,
+ .atomic_disable = ltdc_plane_atomic_disable,
+diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
+index 87819c82bcce..f2f0739d1035 100644
+--- a/drivers/gpu/drm/tinydrm/Kconfig
++++ b/drivers/gpu/drm/tinydrm/Kconfig
+@@ -14,8 +14,8 @@ config TINYDRM_MIPI_DBI
+ config TINYDRM_HX8357D
+ tristate "DRM support for HX8357D display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following HX8357D panels:
+ * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
+@@ -35,8 +35,8 @@ config TINYDRM_ILI9225
+ config TINYDRM_ILI9341
+ tristate "DRM support for ILI9341 display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9341 panels:
+ * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+@@ -46,8 +46,8 @@ config TINYDRM_ILI9341
+ config TINYDRM_MI0283QT
+ tristate "DRM support for MI0283QT"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the Multi-Inno MI0283QT display panel
+ If M is selected the module will be called mi0283qt.
+@@ -78,8 +78,8 @@ config TINYDRM_ST7586
+ config TINYDRM_ST7735R
+ tristate "DRM support for Sitronix ST7735R display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver Sitronix ST7735R with one of the following LCDs:
+ * JD-T18003-T01 1.8" 128x160 TFT
+diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
+index d7b409a3c0f8..66603da634fe 100644
+--- a/drivers/gpu/drm/vkms/vkms_crc.c
++++ b/drivers/gpu/drm/vkms/vkms_crc.c
+@@ -166,16 +166,24 @@ void vkms_crc_work_handle(struct work_struct *work)
+ struct drm_plane *plane;
+ u32 crc32 = 0;
+ u64 frame_start, frame_end;
++ bool crc_pending;
+ unsigned long flags;
+
+ spin_lock_irqsave(&out->state_lock, flags);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
++ crc_pending = crtc_state->crc_pending;
++ crtc_state->frame_start = 0;
++ crtc_state->frame_end = 0;
++ crtc_state->crc_pending = false;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+
+- /* _vblank_handle() hasn't updated frame_start yet */
+- if (!frame_start || frame_start == frame_end)
+- goto out;
++ /*
++ * We raced with the vblank hrtimer and previous work already computed
++ * the crc, nothing to do.
++ */
++ if (!crc_pending)
++ return;
+
+ drm_for_each_plane(plane, &vdev->drm) {
+ struct vkms_plane_state *vplane_state;
+@@ -196,20 +204,11 @@ void vkms_crc_work_handle(struct work_struct *work)
+ if (primary_crc)
+ crc32 = _vkms_get_crc(primary_crc, cursor_crc);
+
+- frame_end = drm_crtc_accurate_vblank_count(crtc);
+-
+- /* queue_work can fail to schedule crc_work; add crc for
+- * missing frames
++ /*
++ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+-
+-out:
+- /* to avoid using the same value for frame number again */
+- spin_lock_irqsave(&out->state_lock, flags);
+- crtc_state->frame_end = frame_end;
+- crtc_state->frame_start = 0;
+- spin_unlock_irqrestore(&out->state_lock, flags);
+ }
+
+ static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index e447b7588d06..77a1f5fa5d5c 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -30,13 +30,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+ * has read the data
+ */
+ spin_lock(&output->state_lock);
+- if (!state->frame_start)
++ if (!state->crc_pending)
+ state->frame_start = frame;
++ else
++ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
++ state->frame_start, frame);
++ state->frame_end = frame;
++ state->crc_pending = true;
+ spin_unlock(&output->state_lock);
+
+ ret = queue_work(output->crc_workq, &state->crc_work);
+ if (!ret)
+- DRM_WARN("failed to queue vkms_crc_work_handle");
++ DRM_DEBUG_DRIVER("vkms_crc_work_handle already queued\n");
+ }
+
+ spin_unlock(&output->lock);
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 738dd6206d85..92296bd8f623 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -92,7 +92,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
+ dev->mode_config.max_height = YRES_MAX;
+ dev->mode_config.preferred_depth = 24;
+
+- return vkms_output_init(vkmsdev);
++ return vkms_output_init(vkmsdev, 0);
+ }
+
+ static int __init vkms_init(void)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 81f1cfbeb936..a0adcc86079f 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -56,6 +56,8 @@ struct vkms_plane_state {
+ struct vkms_crtc_state {
+ struct drm_crtc_state base;
+ struct work_struct crc_work;
++
++ bool crc_pending;
+ u64 frame_start;
+ u64 frame_end;
+ };
+@@ -113,10 +115,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+-int vkms_output_init(struct vkms_device *vkmsdev);
++int vkms_output_init(struct vkms_device *vkmsdev, int index);
+
+ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+- enum drm_plane_type type);
++ enum drm_plane_type type, int index);
+
+ /* Gem stuff */
+ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
+index 3b162b25312e..1442b447c707 100644
+--- a/drivers/gpu/drm/vkms/vkms_output.c
++++ b/drivers/gpu/drm/vkms/vkms_output.c
+@@ -36,7 +36,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ };
+
+-int vkms_output_init(struct vkms_device *vkmsdev)
++int vkms_output_init(struct vkms_device *vkmsdev, int index)
+ {
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+@@ -46,12 +46,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
+ struct drm_plane *primary, *cursor = NULL;
+ int ret;
+
+- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
++ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ if (enable_cursor) {
+- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
++ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto err_cursor;
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
+index 0e67d2d42f0c..20ffc52f9194 100644
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -168,7 +168,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+ };
+
+ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+- enum drm_plane_type type)
++ enum drm_plane_type type, int index)
+ {
+ struct drm_device *dev = &vkmsdev->drm;
+ const struct drm_plane_helper_funcs *funcs;
+@@ -190,7 +190,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ funcs = &vkms_primary_helper_funcs;
+ }
+
+- ret = drm_universal_plane_init(dev, plane, 0,
++ ret = drm_universal_plane_init(dev, plane, 1 << index,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, type, NULL);
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 81df62f48c4c..6ac8becc2372 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -54,7 +54,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
+ struct apple_sc {
+ unsigned long quirks;
+ unsigned int fn_on;
+- DECLARE_BITMAP(pressed_fn, KEY_CNT);
+ DECLARE_BITMAP(pressed_numlock, KEY_CNT);
+ };
+
+@@ -181,6 +180,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ {
+ struct apple_sc *asc = hid_get_drvdata(hid);
+ const struct apple_key_translation *trans, *table;
++ bool do_translate;
++ u16 code = 0;
+
+ if (usage->code == KEY_FN) {
+ asc->fn_on = !!value;
+@@ -189,8 +190,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ }
+
+ if (fnmode) {
+- int do_translate;
+-
+ if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+ table = macbookair_fn_keys;
+@@ -202,25 +201,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ trans = apple_find_translation (table, usage->code);
+
+ if (trans) {
+- if (test_bit(usage->code, asc->pressed_fn))
+- do_translate = 1;
+- else if (trans->flags & APPLE_FLAG_FKEY)
+- do_translate = (fnmode == 2 && asc->fn_on) ||
+- (fnmode == 1 && !asc->fn_on);
+- else
+- do_translate = asc->fn_on;
+-
+- if (do_translate) {
+- if (value)
+- set_bit(usage->code, asc->pressed_fn);
+- else
+- clear_bit(usage->code, asc->pressed_fn);
+-
+- input_event(input, usage->type, trans->to,
+- value);
+-
+- return 1;
++ if (test_bit(trans->from, input->key))
++ code = trans->from;
++ else if (test_bit(trans->to, input->key))
++ code = trans->to;
++
++ if (!code) {
++ if (trans->flags & APPLE_FLAG_FKEY) {
++ switch (fnmode) {
++ case 1:
++ do_translate = !asc->fn_on;
++ break;
++ case 2:
++ do_translate = asc->fn_on;
++ break;
++ default:
++ /* should never happen */
++ do_translate = false;
++ }
++ } else {
++ do_translate = asc->fn_on;
++ }
++
++ code = do_translate ? trans->to : trans->from;
+ }
++
++ input_event(input, usage->type, code, value);
++ return 1;
+ }
+
+ if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 53bddb50aeba..602219a8710d 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -88,7 +88,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
+ }
+
+ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+- struct hid_report *report, u8 *raw_data, int size)
++ struct hid_report *report, u8 *raw_data, int report_size)
+ {
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+@@ -149,7 +149,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
++ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
++ raw_data, report_size);
+
+ return insert && !flush;
+ }
+@@ -2176,7 +2177,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
+ {
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+- char name[WACOM_NAME_MAX];
++ char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
+
+ /* Generic devices name unspecified */
+ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 58719461850d..6be98851edca 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -251,7 +251,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
+
+ static int wacom_dtus_irq(struct wacom_wac *wacom)
+ {
+- char *data = wacom->data;
++ unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->pen_input;
+ unsigned short prox, pressure = 0;
+
+@@ -572,7 +572,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
+ strip2 = ((data[3] & 0x1f) << 8) | data[4];
+ }
+
+- prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
++ prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
+ (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
+
+ wacom_report_numbered_buttons(input, nbuttons, buttons);
+diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
+index 66af44bfa67d..f6546de66fbc 100644
+--- a/drivers/i2c/busses/i2c-cht-wc.c
++++ b/drivers/i2c/busses/i2c-cht-wc.c
+@@ -178,6 +178,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
+ .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
+ };
+
++/*
++ * We are an i2c-adapter which itself is part of an i2c-client. This means that
++ * transfers done through us take adapter->bus_lock twice, once for our parent
++ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
++ * nested locking, to make lockdep happy in the case of busses with muxes, the
++ * i2c-core's i2c_adapter_lock_bus function calls:
++ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
++ *
++ * But i2c_adapter_depth only works when the direct parent of the adapter is
++ * another adapter, as it is only meant for muxes. In our case there is an
++ * i2c-client and MFD instantiated platform_device in the parent->child chain
++ * between the 2 devices.
++ *
++ * So we override the default i2c_lock_operations and pass a hardcoded
++ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
++ *
++ * Note that if there were to be a mux attached to our adapter, this would
++ * break things again since the i2c-mux code expects the root-adapter to have
++ * a locking depth of 0. But we always have only 1 client directly attached
++ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
++ */
++static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ rt_mutex_lock_nested(&adapter->bus_lock, 1);
++}
++
++static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ return rt_mutex_trylock(&adapter->bus_lock);
++}
++
++static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ rt_mutex_unlock(&adapter->bus_lock);
++}
++
++static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
++ .lock_bus = cht_wc_i2c_adap_lock_bus,
++ .trylock_bus = cht_wc_i2c_adap_trylock_bus,
++ .unlock_bus = cht_wc_i2c_adap_unlock_bus,
++};
++
+ /**** irqchip for the client connected to the extchgr i2c adapter ****/
+ static void cht_wc_i2c_irq_lock(struct irq_data *data)
+ {
+@@ -286,6 +331,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
+ adap->adapter.owner = THIS_MODULE;
+ adap->adapter.class = I2C_CLASS_HWMON;
+ adap->adapter.algo = &cht_wc_i2c_adap_algo;
++ adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
+ strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ sizeof(adap->adapter.name));
+ adap->adapter.dev.parent = &pdev->dev;
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 00d5219094e5..48bba4913952 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -22,6 +22,7 @@
+ #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+
+ #define CMDQ_CURR_IRQ_STATUS 0x10
++#define CMDQ_SYNC_TOKEN_UPDATE 0x68
+ #define CMDQ_THR_SLOT_CYCLES 0x30
+ #define CMDQ_THR_BASE 0x100
+ #define CMDQ_THR_SIZE 0x80
+@@ -104,8 +105,12 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
+
+ static void cmdq_init(struct cmdq *cmdq)
+ {
++ int i;
++
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
++ for (i = 0; i <= CMDQ_MAX_EVENT; i++)
++ writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
+ clk_disable(cmdq->clock);
+ }
+
+diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+index 705e17a5479c..d3676fd3cf94 100644
+--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+@@ -47,7 +47,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+
+ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ {
+- struct device_node *np = pdev->dev.of_node;
+ struct qcom_apcs_ipc *apcs;
+ struct regmap *regmap;
+ struct resource *res;
+@@ -55,6 +54,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ void __iomem *base;
+ unsigned long i;
+ int ret;
++ const struct of_device_id apcs_clk_match_table[] = {
++ { .compatible = "qcom,msm8916-apcs-kpss-global", },
++ { .compatible = "qcom,qcs404-apcs-apps-global", },
++ {}
++ };
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+@@ -89,7 +93,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
++ if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ "qcom-apcs-msm8916-clk",
+ -1, NULL, 0);
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 01aaac2c15be..8efd15e40a28 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3738,18 +3738,18 @@ static int raid_iterate_devices(struct dm_target *ti,
+ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ {
+ struct raid_set *rs = ti->private;
+- unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
++ unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
+
+- blk_limits_io_min(limits, chunk_size);
+- blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
++ blk_limits_io_min(limits, chunk_size_bytes);
++ blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
+
+ /*
+ * RAID1 and RAID10 personalities require bio splitting,
+ * RAID0/4/5/6 don't and process large discard bios properly.
+ */
+ if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+- limits->discard_granularity = chunk_size;
+- limits->max_discard_sectors = chunk_size;
++ limits->discard_granularity = chunk_size_bytes;
++ limits->max_discard_sectors = rs->md.chunk_sectors;
+ }
+ }
+
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index aed2c0447966..3c271b14e7c6 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -35,6 +35,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
+ info->mem = &pdev->resource[0];
+ info->irq = pdev->irq;
+
++ pdev->d3cold_delay = 0;
++
+ /* Probably it is enough to set this for iDMA capable devices only */
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index ca3d17e43ed8..ac88caca5ad4 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+ {
+ struct realtek_smi *smi = ds->priv;
++ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return -EINVAL;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return -EINVAL;
+
+ dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+ vlan->vid_begin, vlan->vid_end);
+@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return;
+
+ dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ port,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+index 6c685b920713..bf17cf3ef613 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ static int alloc_uld_rxqs(struct adapter *adap,
+ struct sge_uld_rxq_info *rxq_info, bool lro)
+ {
+- struct sge *s = &adap->sge;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
++ int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
+- unsigned int bmap_idx = 0;
++ struct sge *s = &adap->sge;
+ unsigned int per_chan;
+- int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
+
+@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
+
+ if (msi_idx >= 0) {
+ bmap_idx = get_msix_idx_from_bmap(adap);
++ if (bmap_idx < 0) {
++ err = -ENOSPC;
++ goto freeout;
++ }
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+ }
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 457444894d80..b4b8ba00ee01 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
++ dev_kfree_skb_irq(skb);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index ce78714f536f..a505b2ab88b8 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bulk_serial_device(
+ */
+ if (serial->tiocmget) {
+ tiocmget = serial->tiocmget;
++ tiocmget->endp = hso_get_ep(interface,
++ USB_ENDPOINT_XFER_INT,
++ USB_DIR_IN);
++ if (!tiocmget->endp) {
++ dev_err(&interface->dev, "Failed to find INT IN ep\n");
++ goto exit;
++ }
++
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (tiocmget->urb) {
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+- tiocmget->endp = hso_get_ep(
+- interface,
+- USB_ENDPOINT_XFER_INT,
+- USB_DIR_IN);
+ } else
+ hso_free_tiomget(serial);
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 26c5207466af..54390b77ae21 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1349,6 +1349,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 5f5722bf6762..7370e06a0e4b 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
+ return 0;
+ }
+
+-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+- struct sk_buff *skb,
+- struct sk_buff_head *list)
++static int xennet_fill_frags(struct netfront_queue *queue,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
+ {
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *nskb;
+@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ kfree_skb(nskb);
+- return ~0U;
++ return -ENOENT;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ kfree_skb(nskb);
+ }
+
+- return cons;
++ queue->rx.rsp_cons = cons;
++
++ return 0;
+ }
+
+ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
+@@ -1045,8 +1047,7 @@ err:
+ skb->data_len = rx->status;
+ skb->len += rx->status;
+
+- i = xennet_fill_frags(queue, skb, &tmpq);
+- if (unlikely(i == ~0U))
++ if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
+ goto err;
+
+ if (rx->flags & XEN_NETRXF_csum_blank)
+@@ -1056,7 +1057,7 @@ err:
+
+ __skb_queue_tail(&rxq, skb);
+
+- queue->rx.rsp_cons = ++i;
++ i = ++queue->rx.rsp_cons;
+ work_done++;
+ }
+
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 2ab92409210a..297bf928d652 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -181,7 +181,7 @@ config PCI_LABEL
+
+ config PCI_HYPERV
+ tristate "Hyper-V PCI Frontend"
+- depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
++ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index cee5f2f590e2..14a6ba4067fb 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
+
+ ep->phy = devm_of_phy_get(dev, np, NULL);
+ if (IS_ERR(ep->phy)) {
+- if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
++ if (PTR_ERR(ep->phy) != -ENODEV)
+ return PTR_ERR(ep->phy);
+
+ ep->phy = NULL;
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 9b5cb5b70389..aabf22eaa6b9 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -1173,8 +1173,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
+
+ imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx6_pcie->vpcie)) {
+- if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
++ return PTR_ERR(imx6_pcie->vpcie);
+ imx6_pcie->vpcie = NULL;
+ }
+
+diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+index be61d96cc95e..ca9aa4501e7e 100644
+--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
++++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+@@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
++ .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+ };
+
+ static const struct pci_epc_features*
+diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
+index 954bc2b74bbc..811b5c6d62ea 100644
+--- a/drivers/pci/controller/dwc/pcie-histb.c
++++ b/drivers/pci/controller/dwc/pcie-histb.c
+@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
+
+ hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+ if (IS_ERR(hipcie->vpcie)) {
+- if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(hipcie->vpcie) != -ENODEV)
++ return PTR_ERR(hipcie->vpcie);
+ hipcie->vpcie = NULL;
+ }
+
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 464ba2538d52..03c42e8684f6 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -1994,14 +1994,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ err = of_pci_get_devfn(port);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+- return err;
++ goto err_node_put;
+ }
+
+ index = PCI_SLOT(err);
+
+ if (index < 1 || index > soc->num_ports) {
+ dev_err(dev, "invalid port number: %d\n", index);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_node_put;
+ }
+
+ index--;
+@@ -2010,12 +2011,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ if (err < 0) {
+ dev_err(dev, "failed to parse # of lanes: %d\n",
+ err);
+- return err;
++ goto err_node_put;
+ }
+
+ if (value > 16) {
+ dev_err(dev, "invalid # of lanes: %u\n", value);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_node_put;
+ }
+
+ lanes |= value << (index << 3);
+@@ -2029,13 +2031,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ lane += value;
+
+ rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
+- if (!rp)
+- return -ENOMEM;
++ if (!rp) {
++ err = -ENOMEM;
++ goto err_node_put;
++ }
+
+ err = of_address_to_resource(port, 0, &rp->regs);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+- return err;
++ goto err_node_put;
+ }
+
+ INIT_LIST_HEAD(&rp->list);
+@@ -2062,6 +2066,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ return err;
+
+ return 0;
++
++err_node_put:
++ of_node_put(port);
++ return err;
+ }
+
+ /*
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index 8d20f1793a61..ef8e677ce9d1 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie12v);
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie3v3);
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie1v8);
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie0v9);
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
+index bcd5d357ca23..c3899ee1db99 100644
+--- a/drivers/pci/hotplug/rpaphp_core.c
++++ b/drivers/pci/hotplug/rpaphp_core.c
+@@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
+ struct of_drc_info drc;
+ const __be32 *value;
+ char cell_drc_name[MAX_DRC_NAME_LEN];
+- int j, fndit;
++ int j;
+
+ info = of_find_property(dn->parent, "ibm,drc-info", NULL);
+ if (info == NULL)
+@@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
+
+ /* Should now know end of current entry */
+
+- if (my_index > drc.last_drc_index)
+- continue;
+-
+- fndit = 1;
+- break;
++ /* Found it */
++ if (my_index <= drc.last_drc_index) {
++ sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
++ my_index);
++ break;
++ }
+ }
+- /* Found it */
+-
+- if (fndit)
+- sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
+- my_index);
+
+ if (((drc_name == NULL) ||
+ (drc_name && !strcmp(drc_name, cell_drc_name))) &&
+diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
+index 83fb077d0b41..702966e4fcaa 100644
+--- a/drivers/pci/pci-bridge-emul.c
++++ b/drivers/pci/pci-bridge-emul.c
+@@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior {
+ u32 rsvd;
+ };
+
+-const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
++static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+@@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ },
+ };
+
+-const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
++static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 088fcdc8d2b4..f2ab112c0a71 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -884,8 +884,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+- if (dev->current_state != state && printk_ratelimit())
+- pci_info(dev, "Refused to change power state, currently in D%d\n",
++ if (dev->current_state != state)
++ pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
+ dev->current_state);
+
+ /*
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+index 6c640837073e..5bfa56f3847e 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+@@ -192,8 +192,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+ static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
+ static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
+-static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
+-static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
++static const unsigned int uart_cts_c_pins[] = { GPIOY_11 };
++static const unsigned int uart_rts_c_pins[] = { GPIOY_12 };
+
+ static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+ static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+@@ -439,10 +439,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+ GROUP(pwm_f_x, 3, 18),
+
+ /* Bank Y */
+- GROUP(uart_cts_c, 1, 19),
+- GROUP(uart_rts_c, 1, 18),
+- GROUP(uart_tx_c, 1, 17),
+- GROUP(uart_rx_c, 1, 16),
++ GROUP(uart_cts_c, 1, 17),
++ GROUP(uart_rts_c, 1, 16),
++ GROUP(uart_tx_c, 1, 19),
++ GROUP(uart_rx_c, 1, 18),
+ GROUP(pwm_a_y, 1, 21),
+ GROUP(pwm_f_y, 1, 20),
+ GROUP(i2s_out_ch23_y, 1, 5),
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 9b9c61e3f065..977792654e01 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -565,15 +565,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
+ continue;
+ irq = irq_find_mapping(gc->irq.domain, irqnr + i);
+- generic_handle_irq(irq);
++ if (irq != 0)
++ generic_handle_irq(irq);
+
+ /* Clear interrupt.
+ * We must read the pin register again, in case the
+ * value was changed while executing
+ * generic_handle_irq() above.
++ * If we didn't find a mapping for the interrupt,
++ * disable it in order to avoid a system hang caused
++ * by an interrupt storm.
+ */
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ regval = readl(regs + i);
++ if (irq == 0) {
++ regval &= ~BIT(INTERRUPT_ENABLE_OFF);
++ dev_dbg(&gpio_dev->pdev->dev,
++ "Disabling spurious GPIO IRQ %d\n",
++ irqnr + i);
++ }
+ writel(regval, regs + i);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ ret = IRQ_HANDLED;
+diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
+index eba872ce4a7c..c82ad4b629e3 100644
+--- a/drivers/pinctrl/pinctrl-stmfx.c
++++ b/drivers/pinctrl/pinctrl-stmfx.c
+@@ -296,29 +296,29 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_DISABLE:
++ case PIN_CONFIG_DRIVE_PUSH_PULL:
++ ret = stmfx_pinconf_set_type(pctl, pin, 0);
++ if (ret)
++ return ret;
++ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
++ if (ret)
++ return ret;
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+- ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+- break;
+- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+- if (!dir)
+- ret = stmfx_pinconf_set_type(pctl, pin, 1);
+- else
+- ret = stmfx_pinconf_set_type(pctl, pin, 0);
++ ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+- case PIN_CONFIG_DRIVE_PUSH_PULL:
+- if (!dir)
+- ret = stmfx_pinconf_set_type(pctl, pin, 0);
+- else
+- ret = stmfx_pinconf_set_type(pctl, pin, 1);
++ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index abcfbad94f00..849c3b34e887 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -32,7 +32,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
+
+ static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
+ {
+- writel(val, pmx->regs[bank] + reg);
++ writel_relaxed(val, pmx->regs[bank] + reg);
++ /* make sure pinmux register write completed */
++ pmx_readl(pmx, bank, reg);
+ }
+
+ static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
+index c61f00b72e15..a577218d1ab7 100644
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
++ spin_lock_init(&ptp_qoriq->lock);
++
+ ktime_get_real_ts64(&now);
+ ptp_qoriq_settime(&ptp_qoriq->caps, &now);
+
+@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+- spin_lock_init(&ptp_qoriq->lock);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ regs = &ptp_qoriq->regs;
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index a075e77617dc..3450d615974d 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -166,7 +166,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
+- tmp, sizeof(tmp));
++ tmp, 2);
++ if (ret)
++ return ret;
++
++ ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
++ buf, sizeof(tmp) - 2);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 7ee673a25fd0..4f9a107a0427 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -279,6 +279,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
++ data->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(data->rtc))
++ return PTR_ERR(data->rtc);
++
+ data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
+
+ if (IS_ERR(data->regmap)) {
+@@ -343,10 +347,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ goto error_rtc_device_register;
+ }
+
+- data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+- &snvs_rtc_ops, THIS_MODULE);
+- if (IS_ERR(data->rtc)) {
+- ret = PTR_ERR(data->rtc);
++ data->rtc->ops = &snvs_rtc_ops;
++ ret = rtc_register_device(data->rtc);
++ if (ret) {
+ dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
+ goto error_rtc_device_register;
+ }
+diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
+index 39b8cc4574b4..c6ed0b12e807 100644
+--- a/drivers/scsi/scsi_logging.c
++++ b/drivers/scsi/scsi_logging.c
+@@ -15,57 +15,15 @@
+ #include <scsi/scsi_eh.h>
+ #include <scsi/scsi_dbg.h>
+
+-#define SCSI_LOG_SPOOLSIZE 4096
+-
+-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+-#warning SCSI logging bitmask too large
+-#endif
+-
+-struct scsi_log_buf {
+- char buffer[SCSI_LOG_SPOOLSIZE];
+- unsigned long map;
+-};
+-
+-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+-
+ static char *scsi_log_reserve_buffer(size_t *len)
+ {
+- struct scsi_log_buf *buf;
+- unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+- unsigned long idx = 0;
+-
+- preempt_disable();
+- buf = this_cpu_ptr(&scsi_format_log);
+- idx = find_first_zero_bit(&buf->map, map_bits);
+- if (likely(idx < map_bits)) {
+- while (test_and_set_bit(idx, &buf->map)) {
+- idx = find_next_zero_bit(&buf->map, map_bits, idx);
+- if (idx >= map_bits)
+- break;
+- }
+- }
+- if (WARN_ON(idx >= map_bits)) {
+- preempt_enable();
+- return NULL;
+- }
+- *len = SCSI_LOG_BUFSIZE;
+- return buf->buffer + idx * SCSI_LOG_BUFSIZE;
++ *len = 128;
++ return kmalloc(*len, GFP_ATOMIC);
+ }
+
+ static void scsi_log_release_buffer(char *bufptr)
+ {
+- struct scsi_log_buf *buf;
+- unsigned long idx;
+- int ret;
+-
+- buf = this_cpu_ptr(&scsi_format_log);
+- if (bufptr >= buf->buffer &&
+- bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+- idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+- ret = test_and_clear_bit(idx, &buf->map);
+- WARN_ON(!ret);
+- }
+- preempt_enable();
++ kfree(bufptr);
+ }
+
+ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
+index 3a01cfd70fdc..f518273cfbe3 100644
+--- a/drivers/soundwire/Kconfig
++++ b/drivers/soundwire/Kconfig
+@@ -4,7 +4,7 @@
+ #
+
+ menuconfig SOUNDWIRE
+- bool "SoundWire support"
++ tristate "SoundWire support"
+ help
+ SoundWire is a 2-Pin interface with data and clock line ratified
+ by the MIPI Alliance. SoundWire is used for transporting data
+@@ -17,17 +17,12 @@ if SOUNDWIRE
+
+ comment "SoundWire Devices"
+
+-config SOUNDWIRE_BUS
+- tristate
+- select REGMAP_SOUNDWIRE
+-
+ config SOUNDWIRE_CADENCE
+ tristate
+
+ config SOUNDWIRE_INTEL
+ tristate "Intel SoundWire Master driver"
+ select SOUNDWIRE_CADENCE
+- select SOUNDWIRE_BUS
+ depends on X86 && ACPI && SND_SOC
+ help
+ SoundWire Intel Master driver.
+diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
+index fd99a831b92a..45b7e5001653 100644
+--- a/drivers/soundwire/Makefile
++++ b/drivers/soundwire/Makefile
+@@ -5,7 +5,7 @@
+
+ #Bus Objs
+ soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
+-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
++obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
+
+ #Cadence Objs
+ soundwire-cadence-objs := cadence_master.o
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index 60293a00a14e..8a670bc86c0c 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -283,6 +283,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
+
+ if (pcm) {
+ count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
++
++ /*
++ * WORKAROUND: on all existing Intel controllers, pdi
++ * number 2 reports channel count as 1 even though it
++ * supports 8 channels. Performing hardcoding for pdi
++ * number 2.
++ */
++ if (pdi_num == 2)
++ count = 7;
++
+ } else {
+ count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
+ count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 703948c9fbe1..02206162eaa9 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -438,11 +438,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ /*
+- * Try to reset the device. The success of this is dependent on
+- * being able to lock the device, which is not always possible.
++ * Try to get the locks ourselves to prevent a deadlock. The
++ * success of this is dependent on being able to lock the device,
++ * which is not always possible.
++ * We can not use the "try" reset interface here, which will
++ * overwrite the previously restored configuration information.
+ */
+- if (vdev->reset_works && !pci_try_reset_function(pdev))
+- vdev->needs_reset = false;
++ if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
++ if (device_trylock(&pdev->dev)) {
++ if (!__pci_reset_function_locked(pdev))
++ vdev->needs_reset = false;
++ device_unlock(&pdev->dev);
++ }
++ pci_cfg_access_unlock(pdev);
++ }
+
+ pci_restore_state(pdev);
+ out:
+diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
+index 021b727e8b5c..6afd0d3ae569 100644
+--- a/drivers/video/fbdev/ssd1307fb.c
++++ b/drivers/video/fbdev/ssd1307fb.c
+@@ -432,7 +432,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
+ if (ret < 0)
+ return ret;
+
+- ret = ssd1307fb_write_cmd(par->client, 0x0);
++ ret = ssd1307fb_write_cmd(par->client, par->page_offset);
+ if (ret < 0)
+ return ret;
+
+diff --git a/fs/9p/cache.c b/fs/9p/cache.c
+index 995e332eee5c..eb2151fb6049 100644
+--- a/fs/9p/cache.c
++++ b/fs/9p/cache.c
+@@ -51,6 +51,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
+ if (!v9ses->cachetag) {
+ if (v9fs_random_cachetag(v9ses) < 0) {
+ v9ses->fscache = NULL;
++ kfree(v9ses->cachetag);
++ v9ses->cachetag = NULL;
+ return;
+ }
+ }
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 8e83741b02e0..d4d4fdfac1a6 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
+
+ void ext4_exit_system_zone(void)
+ {
++ rcu_barrier();
+ kmem_cache_destroy(ext4_system_zone_cachep);
+ }
+
+@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
+ return 0;
+ }
+
++static void release_system_zone(struct ext4_system_blocks *system_blks)
++{
++ struct ext4_system_zone *entry, *n;
++
++ rbtree_postorder_for_each_entry_safe(entry, n,
++ &system_blks->root, node)
++ kmem_cache_free(ext4_system_zone_cachep, entry);
++}
++
+ /*
+ * Mark a range of blocks as belonging to the "system zone" --- that
+ * is, filesystem metadata blocks which should never be used by
+ * inodes.
+ */
+-static int add_system_zone(struct ext4_sb_info *sbi,
++static int add_system_zone(struct ext4_system_blocks *system_blks,
+ ext4_fsblk_t start_blk,
+ unsigned int count)
+ {
+ struct ext4_system_zone *new_entry = NULL, *entry;
+- struct rb_node **n = &sbi->system_blks.rb_node, *node;
++ struct rb_node **n = &system_blks->root.rb_node, *node;
+ struct rb_node *parent = NULL, *new_node = NULL;
+
+ while (*n) {
+@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ new_node = &new_entry->node;
+
+ rb_link_node(new_node, parent, n);
+- rb_insert_color(new_node, &sbi->system_blks);
++ rb_insert_color(new_node, &system_blks->root);
+ }
+
+ /* Can we merge to the left? */
+@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ if (can_merge(entry, new_entry)) {
+ new_entry->start_blk = entry->start_blk;
+ new_entry->count += entry->count;
+- rb_erase(node, &sbi->system_blks);
++ rb_erase(node, &system_blks->root);
+ kmem_cache_free(ext4_system_zone_cachep, entry);
+ }
+ }
+@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ entry = rb_entry(node, struct ext4_system_zone, node);
+ if (can_merge(new_entry, entry)) {
+ new_entry->count += entry->count;
+- rb_erase(node, &sbi->system_blks);
++ rb_erase(node, &system_blks->root);
+ kmem_cache_free(ext4_system_zone_cachep, entry);
+ }
+ }
+@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
+ int first = 1;
+
+ printk(KERN_INFO "System zones: ");
+- node = rb_first(&sbi->system_blks);
++ node = rb_first(&sbi->system_blks->root);
+ while (node) {
+ entry = rb_entry(node, struct ext4_system_zone, node);
+ printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
+@@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
+ printk(KERN_CONT "\n");
+ }
+
+-static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
++/*
++ * Returns 1 if the passed-in block region (start_blk,
++ * start_blk+count) is valid; 0 if some part of the block region
++ * overlaps with filesystem metadata blocks.
++ */
++static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
++ struct ext4_system_blocks *system_blks,
++ ext4_fsblk_t start_blk,
++ unsigned int count)
++{
++ struct ext4_system_zone *entry;
++ struct rb_node *n;
++
++ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
++ (start_blk + count < start_blk) ||
++ (start_blk + count > ext4_blocks_count(sbi->s_es))) {
++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
++ return 0;
++ }
++
++ if (system_blks == NULL)
++ return 1;
++
++ n = system_blks->root.rb_node;
++ while (n) {
++ entry = rb_entry(n, struct ext4_system_zone, node);
++ if (start_blk + count - 1 < entry->start_blk)
++ n = n->rb_left;
++ else if (start_blk >= (entry->start_blk + entry->count))
++ n = n->rb_right;
++ else {
++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
++ return 0;
++ }
++ }
++ return 1;
++}
++
++static int ext4_protect_reserved_inode(struct super_block *sb,
++ struct ext4_system_blocks *system_blks,
++ u32 ino)
+ {
+ struct inode *inode;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
+ if (n == 0) {
+ i++;
+ } else {
+- if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
++ if (!ext4_data_block_valid_rcu(sbi, system_blks,
++ map.m_pblk, n)) {
+ ext4_error(sb, "blocks %llu-%llu from inode %u "
+ "overlap system zone", map.m_pblk,
+ map.m_pblk + map.m_len - 1, ino);
+ err = -EFSCORRUPTED;
+ break;
+ }
+- err = add_system_zone(sbi, map.m_pblk, n);
++ err = add_system_zone(system_blks, map.m_pblk, n);
+ if (err < 0)
+ break;
+ i += n;
+@@ -180,94 +231,130 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
+ return err;
+ }
+
++static void ext4_destroy_system_zone(struct rcu_head *rcu)
++{
++ struct ext4_system_blocks *system_blks;
++
++ system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
++ release_system_zone(system_blks);
++ kfree(system_blks);
++}
++
++/*
++ * Build system zone rbtree which is used for block validity checking.
++ *
++ * The update of system_blks pointer in this function is protected by
++ * sb->s_umount semaphore. However we have to be careful as we can be
++ * racing with ext4_data_block_valid() calls reading system_blks rbtree
++ * protected only by RCU. That's why we first build the rbtree and then
++ * swap it in place.
++ */
+ int ext4_setup_system_zone(struct super_block *sb)
+ {
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_system_blocks *system_blks;
+ struct ext4_group_desc *gdp;
+ ext4_group_t i;
+ int flex_size = ext4_flex_bg_size(sbi);
+ int ret;
+
+ if (!test_opt(sb, BLOCK_VALIDITY)) {
+- if (sbi->system_blks.rb_node)
++ if (sbi->system_blks)
+ ext4_release_system_zone(sb);
+ return 0;
+ }
+- if (sbi->system_blks.rb_node)
++ if (sbi->system_blks)
+ return 0;
+
++ system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
++ if (!system_blks)
++ return -ENOMEM;
++
+ for (i=0; i < ngroups; i++) {
+ cond_resched();
+ if (ext4_bg_has_super(sb, i) &&
+ ((i < 5) || ((i % flex_size) == 0)))
+- add_system_zone(sbi, ext4_group_first_block_no(sb, i),
++ add_system_zone(system_blks,
++ ext4_group_first_block_no(sb, i),
+ ext4_bg_num_gdb(sb, i) + 1);
+ gdp = ext4_get_group_desc(sb, i, NULL);
+- ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
++ ret = add_system_zone(system_blks,
++ ext4_block_bitmap(sb, gdp), 1);
+ if (ret)
+- return ret;
+- ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
++ goto err;
++ ret = add_system_zone(system_blks,
++ ext4_inode_bitmap(sb, gdp), 1);
+ if (ret)
+- return ret;
+- ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
++ goto err;
++ ret = add_system_zone(system_blks,
++ ext4_inode_table(sb, gdp),
+ sbi->s_itb_per_group);
+ if (ret)
+- return ret;
++ goto err;
+ }
+ if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
+- ret = ext4_protect_reserved_inode(sb,
++ ret = ext4_protect_reserved_inode(sb, system_blks,
+ le32_to_cpu(sbi->s_es->s_journal_inum));
+ if (ret)
+- return ret;
++ goto err;
+ }
+
++ /*
++ * System blks rbtree complete, announce it once to prevent racing
++ * with ext4_data_block_valid() accessing the rbtree at the same
++ * time.
++ */
++ rcu_assign_pointer(sbi->system_blks, system_blks);
++
+ if (test_opt(sb, DEBUG))
+ debug_print_tree(sbi);
+ return 0;
++err:
++ release_system_zone(system_blks);
++ kfree(system_blks);
++ return ret;
+ }
+
+-/* Called when the filesystem is unmounted */
++/*
++ * Called when the filesystem is unmounted or when remounting it with
++ * noblock_validity specified.
++ *
++ * The update of system_blks pointer in this function is protected by
++ * sb->s_umount semaphore. However we have to be careful as we can be
++ * racing with ext4_data_block_valid() calls reading system_blks rbtree
++ * protected only by RCU. So we first clear the system_blks pointer and
++ * then free the rbtree only after RCU grace period expires.
++ */
+ void ext4_release_system_zone(struct super_block *sb)
+ {
+- struct ext4_system_zone *entry, *n;
++ struct ext4_system_blocks *system_blks;
+
+- rbtree_postorder_for_each_entry_safe(entry, n,
+- &EXT4_SB(sb)->system_blks, node)
+- kmem_cache_free(ext4_system_zone_cachep, entry);
++ system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
++ lockdep_is_held(&sb->s_umount));
++ rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
+
+- EXT4_SB(sb)->system_blks = RB_ROOT;
++ if (system_blks)
++ call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
+ }
+
+-/*
+- * Returns 1 if the passed-in block region (start_blk,
+- * start_blk+count) is valid; 0 if some part of the block region
+- * overlaps with filesystem metadata blocks.
+- */
+ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+ unsigned int count)
+ {
+- struct ext4_system_zone *entry;
+- struct rb_node *n = sbi->system_blks.rb_node;
++ struct ext4_system_blocks *system_blks;
++ int ret;
+
+- if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+- (start_blk + count < start_blk) ||
+- (start_blk + count > ext4_blocks_count(sbi->s_es))) {
+- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+- return 0;
+- }
+- while (n) {
+- entry = rb_entry(n, struct ext4_system_zone, node);
+- if (start_blk + count - 1 < entry->start_blk)
+- n = n->rb_left;
+- else if (start_blk >= (entry->start_blk + entry->count))
+- n = n->rb_right;
+- else {
+- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+- return 0;
+- }
+- }
+- return 1;
++ /*
++ * Lock the system zone to prevent it being released concurrently
++ * when doing a remount which inverse current "[no]block_validity"
++ * mount option.
++ */
++ rcu_read_lock();
++ system_blks = rcu_dereference(sbi->system_blks);
++ ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
++ count);
++ rcu_read_unlock();
++ return ret;
+ }
+
+ int ext4_check_blockref(const char *function, unsigned int line,
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 1cb67859e051..0014b1c5e6be 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -184,6 +184,14 @@ struct ext4_map_blocks {
+ unsigned int m_flags;
+ };
+
++/*
++ * Block validity checking, system zone rbtree.
++ */
++struct ext4_system_blocks {
++ struct rb_root root;
++ struct rcu_head rcu;
++};
++
+ /*
+ * Flags for ext4_io_end->flags
+ */
+@@ -1420,7 +1428,7 @@ struct ext4_sb_info {
+ int s_jquota_fmt; /* Format of quota to use */
+ #endif
+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
+- struct rb_root system_blks;
++ struct ext4_system_blocks __rcu *system_blks;
+
+ #ifdef EXTENTS_STATS
+ /* ext4 extents stats */
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 973f1e818770..01038aff5d8e 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -894,7 +894,21 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
+
+ static int f2fs_drop_inode(struct inode *inode)
+ {
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret;
++
++ /*
++ * during filesystem shutdown, if checkpoint is disabled,
++ * drop useless meta/node dirty pages.
++ */
++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
++ inode->i_ino == F2FS_META_INO(sbi)) {
++ trace_f2fs_drop_inode(inode, 1);
++ return 1;
++ }
++ }
++
+ /*
+ * This is to avoid a deadlock condition like below.
+ * writeback_single_inode(inode)
+diff --git a/fs/fat/dir.c b/fs/fat/dir.c
+index 1bda2ab6745b..814ad2c2ba80 100644
+--- a/fs/fat/dir.c
++++ b/fs/fat/dir.c
+@@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
+ err = -ENOMEM;
+ goto error;
+ }
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[n]);
+ memset(bhs[n]->b_data, 0, sb->s_blocksize);
+ set_buffer_uptodate(bhs[n]);
++ unlock_buffer(bhs[n]);
+ mark_buffer_dirty_inode(bhs[n], dir);
+
+ n++;
+@@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
+ fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
+
+ de = (struct msdos_dir_entry *)bhs[0]->b_data;
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[0]);
+ /* filling the new directory slots ("." and ".." entries) */
+ memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
+ memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
+@@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
+ de[0].size = de[1].size = 0;
+ memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
+ set_buffer_uptodate(bhs[0]);
++ unlock_buffer(bhs[0]);
+ mark_buffer_dirty_inode(bhs[0], dir);
+
+ err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
+@@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
+
+ /* fill the directory entry */
+ copy = min(size, sb->s_blocksize);
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[n]);
+ memcpy(bhs[n]->b_data, slots, copy);
+- slots += copy;
+- size -= copy;
+ set_buffer_uptodate(bhs[n]);
++ unlock_buffer(bhs[n]);
+ mark_buffer_dirty_inode(bhs[n], dir);
++ slots += copy;
++ size -= copy;
+ if (!size)
+ break;
+ n++;
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
+index 265983635f2b..3647c65a0f48 100644
+--- a/fs/fat/fatent.c
++++ b/fs/fat/fatent.c
+@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
+ err = -ENOMEM;
+ goto error;
+ }
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(c_bh);
+ memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
+ set_buffer_uptodate(c_bh);
++ unlock_buffer(c_bh);
+ mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
+ if (sb->s_flags & SB_SYNCHRONOUS)
+ err = sync_dirty_buffer(c_bh);
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 103643c68e3f..87c2c9687d90 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
+ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
+ break;
+ case FS_CONTEXT_FOR_RECONFIGURE:
+- /* We don't pin any namespaces as the superblock's
+- * subscriptions cannot be changed at this point.
+- */
+ atomic_inc(&reference->d_sb->s_active);
++ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
+ fc->root = dget(reference);
+ break;
+ }
+diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
+index e78657742bd8..3883633e82eb 100644
+--- a/fs/ocfs2/dlm/dlmunlock.c
++++ b/fs/ocfs2/dlm/dlmunlock.c
+@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ enum dlm_status status;
+ int actions = 0;
+ int in_use;
+- u8 owner;
++ u8 owner;
++ int recovery_wait = 0;
+
+ mlog(0, "master_node = %d, valblk = %d\n", master_node,
+ flags & LKM_VALBLK);
+@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ }
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 0;
+- else
+- lock->unlock_pending = 0;
+-
++ else {
++ if (!lock->unlock_pending)
++ recovery_wait = 1;
++ else
++ lock->unlock_pending = 0;
++ }
+ }
+
+ /* get an extra ref on lock. if we are just switching
+@@ -229,6 +233,17 @@ leave:
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
++ if (recovery_wait) {
++ spin_lock(&res->spinlock);
++ /* Unlock request will directly succeed after owner dies,
++ * and the lock is already removed from grant list. We have to
++ * wait for RECOVERING done or we miss the chance to purge it
++ * since the removement is much faster than RECOVERING proc.
++ */
++ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
++ spin_unlock(&res->spinlock);
++ }
++
+ /* let the caller's final dlm_lock_put handle the actual kfree */
+ if (actions & DLM_UNLOCK_FREE_LOCK) {
+ /* this should always be coupled with list removal */
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 5b7709894415..db9f67d34af3 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -144,6 +144,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
+ if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
+ (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
+ &header_length) == 3) {
++ time->tv_nsec *= 1000;
+ if (data_type == 'C')
+ *compressed = true;
+ else
+@@ -151,6 +152,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
+ } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
+ (time64_t *)&time->tv_sec, &time->tv_nsec,
+ &header_length) == 2) {
++ time->tv_nsec *= 1000;
+ *compressed = false;
+ } else {
+ time->tv_sec = 0;
+diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
+index ccb73422c2fa..e6f54ef6698b 100644
+--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
+@@ -20,6 +20,9 @@
+ #define CMDQ_WFE_WAIT BIT(15)
+ #define CMDQ_WFE_WAIT_VALUE 0x1
+
++/** cmdq event maximum */
++#define CMDQ_MAX_EVENT 0x3ff
++
+ /*
+ * CMDQ_CODE_MASK:
+ * set write mask
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index dd436da7eccc..9feb59ac8550 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2375,4 +2375,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+ #define pci_notice_ratelimited(pdev, fmt, arg...) \
+ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
+
++#define pci_info_ratelimited(pdev, fmt, arg...) \
++ dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
++
+ #endif /* LINUX_PCI_H */
+diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
+index 54ade13a9b15..4e8899972db4 100644
+--- a/include/linux/soc/mediatek/mtk-cmdq.h
++++ b/include/linux/soc/mediatek/mtk-cmdq.h
+@@ -13,9 +13,6 @@
+
+ #define CMDQ_NO_TIMEOUT 0xffffffffu
+
+-/** cmdq event maximum */
+-#define CMDQ_MAX_EVENT 0x3ff
+-
+ struct cmdq_pkt;
+
+ struct cmdq_client {
+diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
+index e03bd9d41fa8..7b196d234626 100644
+--- a/include/scsi/scsi_dbg.h
++++ b/include/scsi/scsi_dbg.h
+@@ -6,8 +6,6 @@ struct scsi_cmnd;
+ struct scsi_device;
+ struct scsi_sense_hdr;
+
+-#define SCSI_LOG_BUFSIZE 128
+-
+ extern void scsi_print_command(struct scsi_cmnd *);
+ extern size_t __scsi_format_command(char *, size_t,
+ const unsigned char *, size_t);
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index fa06b528c73c..0972c48d81d7 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -1071,7 +1071,7 @@ TRACE_EVENT(rxrpc_recvmsg,
+ ),
+
+ TP_fast_assign(
+- __entry->call = call->debug_id;
++ __entry->call = call ? call->debug_id : 0;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index d5870723b8ad..15d70a90b50d 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
+ {
+ struct page *pages;
+
++ if (fatal_signal_pending(current))
++ return NULL;
+ pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
+ if (pages) {
+ unsigned int count, i;
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index c4ce08f43bd6..ab4a4606d19b 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -1175,6 +1175,7 @@ err:
+ pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
+ patch->mod->name, obj->mod->name, obj->mod->name);
+ mod->klp_alive = false;
++ obj->mod = NULL;
+ klp_cleanup_module_patches_limited(mod, patch);
+ mutex_unlock(&klp_mutex);
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index cbdfae379896..120ec6f64bbc 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -599,7 +599,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+ int "Maximum kmemleak early log entries"
+ depends on DEBUG_KMEMLEAK
+ range 200 40000
+- default 400
++ default 16000
+ help
+ Kmemleak must track all the memory allocations to avoid
+ reporting false positives. Since memory may be allocated or
+diff --git a/net/core/sock.c b/net/core/sock.c
+index df7b38b60164..3d54153b8325 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1696,8 +1696,6 @@ static void __sk_destruct(struct rcu_head *head)
+ sk_filter_uncharge(sk, filter);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
+ }
+- if (rcu_access_pointer(sk->sk_reuseport_cb))
+- reuseport_detach_sock(sk);
+
+ sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
+
+@@ -1724,7 +1722,14 @@ static void __sk_destruct(struct rcu_head *head)
+
+ void sk_destruct(struct sock *sk)
+ {
+- if (sock_flag(sk, SOCK_RCU_FREE))
++ bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
++
++ if (rcu_access_pointer(sk->sk_reuseport_cb)) {
++ reuseport_detach_sock(sk);
++ use_call_rcu = true;
++ }
++
++ if (use_call_rcu)
+ call_rcu(&sk->sk_rcu, __sk_destruct);
+ else
+ __sk_destruct(&sk->sk_rcu);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index a53a543fe055..52690bb3e40f 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
+ struct ip_tunnel *t = netdev_priv(dev);
+
+ ether_setup(dev);
++ dev->max_mtu = 0;
+ dev->netdev_ops = &erspan_netdev_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index f6b7b11835ee..148dfcb5cbd9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -915,16 +915,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ if (peer->rate_tokens == 0 ||
+ time_after(jiffies,
+ (peer->rate_last +
+- (ip_rt_redirect_load << peer->rate_tokens)))) {
++ (ip_rt_redirect_load << peer->n_redirects)))) {
+ __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
+
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+- ++peer->rate_tokens;
+ ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+ if (log_martians &&
+- peer->rate_tokens == ip_rt_redirect_number)
++ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 3e8b38c73d8c..483323332d74 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
+ return false;
+
+ start_ts = tcp_sk(sk)->retrans_stamp;
+- if (likely(timeout == 0))
+- timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
++ if (likely(timeout == 0)) {
++ unsigned int rto_base = TCP_RTO_MIN;
++
++ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
++ rto_base = tcp_timeout_init(sk);
++ timeout = tcp_model_timeout(sk, boundary, rto_base);
++ }
+
+ return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index acab7738f733..665f26e32d77 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -833,6 +833,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ int is_udplite = IS_UDPLITE(sk);
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+ __wsum csum = 0;
+
+ /*
+@@ -866,10 +867,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
+- cork->gso_size);
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 2454fce6fbfa..c94bc461e268 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5962,13 +5962,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ switch (event) {
+ case RTM_NEWADDR:
+ /*
+- * If the address was optimistic
+- * we inserted the route at the start of
+- * our DAD process, so we don't need
+- * to do it again
++ * If the address was optimistic we inserted the route at the
++ * start of our DAD process, so we don't need to do it again.
++ * If the device was taken down in the middle of the DAD
++ * cycle there is a race where we could get here without a
++ * host route, so nothing to insert. That will be fixed when
++ * the device is brought up.
+ */
+- if (!rcu_access_pointer(ifp->rt->fib6_node))
++ if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
+ ip6_ins_rt(net, ifp->rt);
++ } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
++ pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
++ &ifp->addr, ifp->idev->dev->name);
++ }
++
+ if (ifp->idev->cnf.forwarding)
+ addrconf_join_anycast(ifp);
+ if (!ipv6_addr_any(&ifp->peer_addr))
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index fa014d5f1732..a593aaf25748 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -221,6 +221,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+ if (ipv6_addr_is_multicast(&hdr->saddr))
+ goto err;
+
++ /* While RFC4291 is not explicit about v4mapped addresses
++ * in IPv6 headers, it seems clear linux dual-stack
++ * model can not deal properly with these.
++ * Security models could be fooled by ::ffff:127.0.0.1 for example.
++ *
++ * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
++ */
++ if (ipv6_addr_v4mapped(&hdr->saddr))
++ goto err;
++
+ skb->transport_header = skb->network_header + sizeof(*hdr);
+ IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 1258be19e186..f0b5edd861d0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1122,6 +1122,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ __wsum csum = 0;
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+
+ /*
+ * Create a UDP header
+@@ -1154,8 +1155,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 8dfea26536c9..ccdd790e163a 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+ llcp_sock->service_name_len,
+ GFP_KERNEL);
+-
++ if (!llcp_sock->service_name) {
++ ret = -ENOMEM;
++ goto put_dev;
++ }
+ llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
++ kfree(llcp_sock->service_name);
++ llcp_sock->service_name = NULL;
+ ret = -EADDRINUSE;
+ goto put_dev;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index ea64c90b14e8..17e6ca62f1be 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
+ int rc;
+ u32 idx;
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++ !info->attrs[NFC_ATTR_TARGET_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+@@ -1018,7 +1019,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
+ struct sk_buff *msg = NULL;
+ u32 idx;
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++ !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index b8d581b779b2..992e03ceee9f 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
+ refcount_set(&rds_ibdev->refcount, 1);
+ INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+
++ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
++ INIT_LIST_HEAD(&rds_ibdev->conn_list);
++
+ rds_ibdev->max_wrs = device->attrs.max_qp_wr;
+ rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
+
+@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
+ device->name,
+ rds_ibdev->use_fastreg ? "FRMR" : "FMR");
+
+- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+- INIT_LIST_HEAD(&rds_ibdev->conn_list);
+-
+ down_write(&rds_ib_devices_lock);
+ list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+ up_write(&rds_ib_devices_lock);
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 06c7a2da21bc..39b427dc7512 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
+ [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
+ };
+
++static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
++ struct nlattr *opt,
++ struct netlink_ext_ack *extack)
++{
++ int err;
++
++ if (!opt) {
++ NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
++ cbq_policy, extack);
++ if (err < 0)
++ return err;
++
++ if (tb[TCA_CBQ_WRROPT]) {
++ const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
++
++ if (wrr->priority > TC_CBQ_MAXPRIO) {
++ NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
++ err = -EINVAL;
++ }
++ }
++ return err;
++}
++
+ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ q->delay_timer.function = cbq_undelay;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
+@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
+ struct cbq_class *parent;
+ struct qdisc_rate_table *rtab = NULL;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index 4a403d35438f..284ab2dcf47f 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index bad1cbe59a56..05605b30bef3 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+ goto errout;
+
+ err = -EINVAL;
++ if (!tb[TCA_DSMARK_INDICES])
++ goto errout;
+ indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
+
+ if (hweight32(indices) != 1)
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 11c2873ec68b..2f2967dcf15a 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -668,12 +668,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
+- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+- speed * 1000 * 1000);
++ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 2050fd386642..fd9af899637d 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -163,6 +163,7 @@ struct tipc_link {
+ struct {
+ u16 len;
+ u16 limit;
++ struct sk_buff *target_bskb;
+ } backlog[5];
+ u16 snd_nxt;
+ u16 prev_from;
+@@ -872,6 +873,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
+ void tipc_link_reset(struct tipc_link *l)
+ {
+ struct sk_buff_head list;
++ u32 imp;
+
+ __skb_queue_head_init(&list);
+
+@@ -893,11 +895,10 @@ void tipc_link_reset(struct tipc_link *l)
+ __skb_queue_purge(&l->deferdq);
+ __skb_queue_purge(&l->backlogq);
+ __skb_queue_purge(&l->failover_deferdq);
+- l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
++ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
++ l->backlog[imp].len = 0;
++ l->backlog[imp].target_bskb = NULL;
++ }
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+@@ -938,7 +939,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff_head *backlogq = &l->backlogq;
+- struct sk_buff *skb, *_skb, *bskb;
++ struct sk_buff *skb, *_skb, **tskb;
+ int pkt_cnt = skb_queue_len(list);
+ int rc = 0;
+
+@@ -988,19 +989,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ seqno++;
+ continue;
+ }
+- if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
++ tskb = &l->backlog[imp].target_bskb;
++ if (tipc_msg_bundle(*tskb, hdr, mtu)) {
+ kfree_skb(__skb_dequeue(list));
+ l->stats.sent_bundled++;
+ continue;
+ }
+- if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
++ if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
+ kfree_skb(__skb_dequeue(list));
+- __skb_queue_tail(backlogq, bskb);
+- l->backlog[msg_importance(buf_msg(bskb))].len++;
++ __skb_queue_tail(backlogq, *tskb);
++ l->backlog[imp].len++;
+ l->stats.sent_bundled++;
+ l->stats.sent_bundles++;
+ continue;
+ }
++ l->backlog[imp].target_bskb = NULL;
+ l->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+@@ -1016,6 +1019,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
+ u16 seqno = l->snd_nxt;
+ u16 ack = l->rcv_nxt - 1;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
++ u32 imp;
+
+ while (skb_queue_len(&l->transmq) < l->window) {
+ skb = skb_peek(&l->backlogq);
+@@ -1026,7 +1030,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+- l->backlog[msg_importance(hdr)].len--;
++ imp = msg_importance(hdr);
++ l->backlog[imp].len--;
++ if (unlikely(skb == l->backlog[imp].target_bskb))
++ l->backlog[imp].target_bskb = NULL;
+ __skb_queue_tail(&l->transmq, skb);
+ /* next retransmit attempt */
+ if (link_is_bc_sndlink(l))
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index f48e5857210f..b956ce4a40ef 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+ bmsg = buf_msg(_skb);
+ tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+ INT_H_SIZE, dnode);
+- if (msg_isdata(msg))
+- msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+- else
+- msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
++ msg_set_importance(bmsg, msg_importance(msg));
+ msg_set_seqno(bmsg, msg_seqno(msg));
+ msg_set_ack(bmsg, msg_ack(msg));
+ msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index ab47bf3ab66e..2ab43b2bba31 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
+ }
+ EXPORT_SYMBOL_GPL(__vsock_create);
+
+-static void __vsock_release(struct sock *sk)
++static void __vsock_release(struct sock *sk, int level)
+ {
+ if (sk) {
+ struct sk_buff *skb;
+@@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
+ vsk = vsock_sk(sk);
+ pending = NULL; /* Compiler warning. */
+
++ /* The release call is supposed to use lock_sock_nested()
++ * rather than lock_sock(), if a sock lock should be acquired.
++ */
+ transport->release(vsk);
+
+- lock_sock(sk);
++ /* When "level" is SINGLE_DEPTH_NESTING, use the nested
++ * version to avoid the warning "possible recursive locking
++ * detected". When "level" is 0, lock_sock_nested(sk, level)
++ * is the same as lock_sock(sk).
++ */
++ lock_sock_nested(sk, level);
+ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+@@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
+
+ /* Clean up any sockets that never were accepted. */
+ while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+- __vsock_release(pending);
++ __vsock_release(pending, SINGLE_DEPTH_NESTING);
+ sock_put(pending);
+ }
+
+@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+
+ static int vsock_release(struct socket *sock)
+ {
+- __vsock_release(sock->sk);
++ __vsock_release(sock->sk, 0);
+ sock->sk = NULL;
+ sock->state = SS_FREE;
+
+diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
+index 6c81a911fc02..920f14705346 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -528,7 +528,7 @@ static void hvs_release(struct vsock_sock *vsk)
+ struct sock *sk = sk_vsock(vsk);
+ bool remove_sock;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ remove_sock = hvs_close_lock_held(vsk);
+ release_sock(sk);
+ if (remove_sock)
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 6f1a8aff65c5..a7adffd062c7 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -790,7 +790,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 3ec7ac70c313..c106167423a1 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3403,7 +3403,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
+ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+ {
+- const struct task_security_struct *tsec = current_security();
++ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ u32 parent_sid, newsid, clen;
+ int rc;
+ char *context;
+diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
+index 91c5395dd20c..586b7abd0aa7 100644
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -37,16 +37,6 @@ struct task_security_struct {
+ u32 sockcreate_sid; /* fscreate SID */
+ };
+
+-/*
+- * get the subjective security ID of the current task
+- */
+-static inline u32 current_sid(void)
+-{
+- const struct task_security_struct *tsec = current_security();
+-
+- return tsec->sid;
+-}
+-
+ enum label_initialized {
+ LABEL_INVALID, /* invalid or not initialized */
+ LABEL_INITIALIZED, /* initialized */
+@@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc(
+ return ipc->security + selinux_blob_sizes.lbs_ipc;
+ }
+
++/*
++ * get the subjective security ID of the current task
++ */
++static inline u32 current_sid(void)
++{
++ const struct task_security_struct *tsec = selinux_cred(current_cred());
++
++ return tsec->sid;
++}
++
+ #endif /* _SELINUX_OBJSEC_H_ */
+diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
+index f1c93a7be9ec..38ac3da4e791 100644
+--- a/security/smack/smack_access.c
++++ b/security/smack/smack_access.c
+@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return ERR_PTR(-EINVAL);
+
+- smack = kzalloc(i + 1, GFP_KERNEL);
++ smack = kzalloc(i + 1, GFP_NOFS);
+ if (smack == NULL)
+ return ERR_PTR(-ENOMEM);
+
+@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+- cat, GFP_KERNEL);
++ cat, GFP_NOFS);
+ if (rc < 0) {
+ netlbl_catmap_free(sap->attr.mls.cat);
+ return rc;
+@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
+ if (skp != NULL)
+ goto freeout;
+
+- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
++ skp = kzalloc(sizeof(*skp), GFP_NOFS);
+ if (skp == NULL) {
+ skp = ERR_PTR(-ENOMEM);
+ goto freeout;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 4c5e5a438f8b..36b6b9d4cbaf 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+ if (!(ip->i_opflags & IOP_XATTR))
+ return ERR_PTR(-EOPNOTSUPP);
+
+- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
++ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+
+@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
+
+ if (rc != 0)
+ return rc;
+- } else if (bprm->unsafe)
++ }
++ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
+ return -EPERM;
+
+ bsp->smk_task = isp->smk_task;
+@@ -3925,6 +3926,8 @@ access_check:
+ skp = smack_ipv6host_label(&sadd);
+ if (skp == NULL)
+ skp = smack_net_ambient;
++ if (skb == NULL)
++ break;
+ #ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index b8265ee9923f..614b31aad168 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V4,
+ .gso_len = CONST_MSS_V4,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V6,
+ .gso_len = CONST_MSS_V6,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
+index 97f9f491c541..c402464b038f 100644
+--- a/tools/testing/selftests/powerpc/tm/tm.h
++++ b/tools/testing/selftests/powerpc/tm/tm.h
+@@ -55,7 +55,8 @@ static inline bool failure_is_unavailable(void)
+ static inline bool failure_is_reschedule(void)
+ {
+ if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED ||
+- (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED)
++ (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED ||
++ (failure_code() & TM_CAUSE_KVM_FAC_UNAV) == TM_CAUSE_KVM_FAC_UNAV)
+ return true;
+
+ return false;
+diff --git a/usr/Makefile b/usr/Makefile
+index 4a70ae43c9cb..bdb3f52fadc4 100644
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
+ datafile_d_y = .$(datafile_y).d
+ AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+
++# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all
++# possible compression formats.
++clean-files += initramfs_data.cpio*
+
+ # Generate builtin.o based on initramfs_data.o
+ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o