summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-02-28 11:41:26 -0500
committerMike Pagano <mpagano@gentoo.org>2020-02-28 11:41:26 -0500
commitb1bd048a3be78094255666cd125bae1fc1a0f8a1 (patch)
tree69f2de22b32ee9a97d17ea28a171ede5b38c7f88 /1022_linux-5.4.23.patch
parentLinux patch 5.4.22 (diff)
downloadlinux-patches-b1bd048a3be78094255666cd125bae1fc1a0f8a1.tar.gz
linux-patches-b1bd048a3be78094255666cd125bae1fc1a0f8a1.tar.bz2
linux-patches-b1bd048a3be78094255666cd125bae1fc1a0f8a1.zip
Linux patch 5.4.235.4-23
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1022_linux-5.4.23.patch')
-rw-r--r--1022_linux-5.4.23.patch5941
1 files changed, 5941 insertions, 0 deletions
diff --git a/1022_linux-5.4.23.patch b/1022_linux-5.4.23.patch
new file mode 100644
index 00000000..b1928ccc
--- /dev/null
+++ b/1022_linux-5.4.23.patch
@@ -0,0 +1,5941 @@
+diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
+index d4a85d535bf9..4a9d9c794ee5 100644
+--- a/Documentation/arm64/tagged-address-abi.rst
++++ b/Documentation/arm64/tagged-address-abi.rst
+@@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
+ how the user addresses are used by the kernel:
+
+ 1. User addresses not accessed by the kernel but used for address space
+- management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
+- of valid tagged pointers in this context is always allowed.
++ management (e.g. ``mprotect()``, ``madvise()``). The use of valid
++ tagged pointers in this context is allowed with the exception of
++ ``brk()``, ``mmap()`` and the ``new_address`` argument to
++ ``mremap()`` as these have the potential to alias with existing
++ user addresses.
++
++ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
++ incorrectly accept valid tagged pointers for the ``brk()``,
++ ``mmap()`` and ``mremap()`` system calls.
+
+ 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
+ relaxation is disabled by default and the application thread needs to
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d1aeebb59e6a..fe6fa5d3a63e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -8201,7 +8201,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+ M: Rodrigo Vivi <rodrigo.vivi@intel.com>
+ L: intel-gfx@lists.freedesktop.org
+ W: https://01.org/linuxgraphics/
+-B: https://01.org/linuxgraphics/documentation/how-report-bugs
++B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ C: irc://chat.freenode.net/intel-gfx
+ Q: http://patchwork.freedesktop.org/project/intel-gfx/
+ T: git git://anongit.freedesktop.org/drm-intel
+diff --git a/Makefile b/Makefile
+index 9428ec3b611a..af5e90075514 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
+index 73834996c4b6..5de132100b6d 100644
+--- a/arch/arm64/include/asm/lse.h
++++ b/arch/arm64/include/asm/lse.h
+@@ -6,7 +6,7 @@
+
+ #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+-#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
++#define __LSE_PREAMBLE ".arch_extension lse\n"
+
+ #include <linux/compiler_types.h>
+ #include <linux/export.h>
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index c23c47360664..08df42e4db96 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -219,7 +219,7 @@ static inline unsigned long kaslr_offset(void)
+ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
+
+ #define untagged_addr(addr) ({ \
+- u64 __addr = (__force u64)addr; \
++ u64 __addr = (__force u64)(addr); \
+ __addr &= __untagged_addr(__addr); \
+ (__force __typeof__(addr))__addr; \
+ })
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index c8bb14ff4713..6ba5adb96a3b 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
+ /*
+ * Some number of bits at the level of the page table that points to
+ * a hugepte are used to encode the size. This masks those bits.
++ * On 8xx, HW assistance requires 4k alignment for the hugepte.
+ */
++#ifdef CONFIG_PPC_8xx
++#define HUGEPD_SHIFT_MASK 0xfff
++#else
+ #define HUGEPD_SHIFT_MASK 0x3f
++#endif
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 2fb166928e91..4fd7efdf2a53 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -1200,6 +1200,17 @@ void eeh_handle_special_event(void)
+ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ eeh_handle_normal_event(pe);
+ } else {
++ eeh_for_each_pe(pe, tmp_pe)
++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
++ edev->mode &= ~EEH_DEV_NO_HANDLER;
++
++ /* Notify all devices to be down */
++ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
++ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
++ eeh_pe_report(
++ "error_detected(permanent failure)", pe,
++ eeh_report_failure, NULL);
++
+ pci_lock_rescan_remove();
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb_pe = eeh_phb_pe_get(hose);
+@@ -1208,16 +1219,6 @@ void eeh_handle_special_event(void)
+ (phb_pe->state & EEH_PE_RECOVERING))
+ continue;
+
+- eeh_for_each_pe(pe, tmp_pe)
+- eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+- edev->mode &= ~EEH_DEV_NO_HANDLER;
+-
+- /* Notify all devices to be down */
+- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+- eeh_pe_report(
+- "error_detected(permanent failure)", pe,
+- eeh_report_failure, NULL);
+ bus = eeh_pe_bus_get(phb_pe);
+ if (!bus) {
+ pr_err("%s: Cannot find PCI bus for "
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 59bb4f4ae316..13f699256258 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -778,7 +778,7 @@ fast_exception_return:
+ 1: lis r3,exc_exit_restart_end@ha
+ addi r3,r3,exc_exit_restart_end@l
+ cmplw r12,r3
+-#if CONFIG_PPC_BOOK3S_601
++#ifdef CONFIG_PPC_BOOK3S_601
+ bge 2b
+ #else
+ bge 3f
+@@ -786,7 +786,7 @@ fast_exception_return:
+ lis r4,exc_exit_restart@ha
+ addi r4,r4,exc_exit_restart@l
+ cmplw r12,r4
+-#if CONFIG_PPC_BOOK3S_601
++#ifdef CONFIG_PPC_BOOK3S_601
+ blt 2b
+ #else
+ blt 3f
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 19f583e18402..98d8b6832fcb 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -289,7 +289,7 @@ InstructionTLBMiss:
+ * set. All other Linux PTE bits control the behavior
+ * of the MMU.
+ */
+- rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
++ rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
+ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
+ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
+ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
+diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
+index e6c30cee6abf..d215f9554553 100644
+--- a/arch/powerpc/kernel/signal.c
++++ b/arch/powerpc/kernel/signal.c
+@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
+ * normal/non-checkpointed stack pointer.
+ */
+
++ unsigned long ret = tsk->thread.regs->gpr[1];
++
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BUG_ON(tsk != current);
+
+ if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
++ preempt_disable();
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
+ if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+- return tsk->thread.ckpt_regs.gpr[1];
++ ret = tsk->thread.ckpt_regs.gpr[1];
++
++ /*
++ * If we treclaim, we must clear the current thread's TM bits
++ * before re-enabling preemption. Otherwise we might be
++ * preempted and have the live MSR[TS] changed behind our back
++ * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
++ * enter the signal handler in non-transactional state.
++ */
++ tsk->thread.regs->msr &= ~MSR_TS_MASK;
++ preempt_enable();
+ }
+ #endif
+- return tsk->thread.regs->gpr[1];
++ return ret;
+ }
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 98600b276f76..1b090a76b444 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ */
+ static int save_tm_user_regs(struct pt_regs *regs,
+ struct mcontext __user *frame,
+- struct mcontext __user *tm_frame, int sigret)
++ struct mcontext __user *tm_frame, int sigret,
++ unsigned long msr)
+ {
+- unsigned long msr = regs->msr;
+-
+ WARN_ON(tm_suspend_disabled);
+
+- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+- * just indicates to userland that we were doing a transaction, but we
+- * don't want to return in transactional state. This also ensures
+- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+- */
+- regs->msr &= ~MSR_TS_MASK;
+-
+ /* Save both sets of general registers */
+ if (save_general_regs(&current->thread.ckpt_regs, frame)
+ || save_general_regs(regs, tm_frame))
+@@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ int sigret;
+ unsigned long tramp;
+ struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ /* Save the thread's msr before get_tm_stackpointer() changes it */
++ unsigned long msr = regs->msr;
++#endif
+
+ BUG_ON(tsk != current);
+
+@@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_frame = &rt_sf->uc_transact.uc_mcontext;
+- if (MSR_TM_ACTIVE(regs->msr)) {
++ if (MSR_TM_ACTIVE(msr)) {
+ if (__put_user((unsigned long)&rt_sf->uc_transact,
+ &rt_sf->uc.uc_link) ||
+ __put_user((unsigned long)tm_frame,
+ &rt_sf->uc_transact.uc_regs))
+ goto badframe;
+- if (save_tm_user_regs(regs, frame, tm_frame, sigret))
++ if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
+ goto badframe;
+ }
+ else
+@@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ int sigret;
+ unsigned long tramp;
+ struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ /* Save the thread's msr before get_tm_stackpointer() changes it */
++ unsigned long msr = regs->msr;
++#endif
+
+ BUG_ON(tsk != current);
+
+@@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_mctx = &frame->mctx_transact;
+- if (MSR_TM_ACTIVE(regs->msr)) {
++ if (MSR_TM_ACTIVE(msr)) {
+ if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
+- sigret))
++ sigret, msr))
+ goto badframe;
+ }
+ else
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 117515564ec7..84ed2e77ef9c 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
+ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ struct sigcontext __user *tm_sc,
+ struct task_struct *tsk,
+- int signr, sigset_t *set, unsigned long handler)
++ int signr, sigset_t *set, unsigned long handler,
++ unsigned long msr)
+ {
+ /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
+ * process never used altivec yet (MSR_VEC is zero in pt_regs of
+@@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
+ #endif
+ struct pt_regs *regs = tsk->thread.regs;
+- unsigned long msr = tsk->thread.regs->msr;
+ long err = 0;
+
+ BUG_ON(tsk != current);
+
+- BUG_ON(!MSR_TM_ACTIVE(regs->msr));
++ BUG_ON(!MSR_TM_ACTIVE(msr));
+
+ WARN_ON(tm_suspend_disabled);
+
+@@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ */
+ msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
+
+- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
+- * just indicates to userland that we were doing a transaction, but we
+- * don't want to return in transactional state. This also ensures
+- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+- */
+- regs->msr &= ~MSR_TS_MASK;
+-
+ #ifdef CONFIG_ALTIVEC
+ err |= __put_user(v_regs, &sc->v_regs);
+ err |= __put_user(tm_v_regs, &tm_sc->v_regs);
+@@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ unsigned long newsp = 0;
+ long err = 0;
+ struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++ /* Save the thread's msr before get_tm_stackpointer() changes it */
++ unsigned long msr = regs->msr;
++#endif
+
+ BUG_ON(tsk != current);
+
+@@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+- if (MSR_TM_ACTIVE(regs->msr)) {
++ if (MSR_TM_ACTIVE(msr)) {
+ /* The ucontext_t passed to userland points to the second
+ * ucontext_t (for transactional state) with its uc_link ptr.
+ */
+@@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
+ &frame->uc_transact.uc_mcontext,
+ tsk, ksig->sig, NULL,
+- (unsigned long)ksig->ka.sa.sa_handler);
++ (unsigned long)ksig->ka.sa.sa_handler,
++ msr);
+ } else
+ #endif
+ {
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 73d4873fc7f8..33b3461d91e8 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+ if (pshift >= pdshift) {
+ cachep = PGT_CACHE(PTE_T_ORDER);
+ num_hugepd = 1 << (pshift - pdshift);
++ new = NULL;
+ } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
+- cachep = PGT_CACHE(PTE_INDEX_SIZE);
++ cachep = NULL;
+ num_hugepd = 1;
++ new = pte_alloc_one(mm);
+ } else {
+ cachep = PGT_CACHE(pdshift - pshift);
+ num_hugepd = 1;
++ new = NULL;
+ }
+
+- if (!cachep) {
++ if (!cachep && !new) {
+ WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ return -ENOMEM;
+ }
+
+- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
++ if (cachep)
++ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+
+ BUG_ON(pshift > HUGEPD_SHIFT_MASK);
+ BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
+@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
+ if (i < num_hugepd) {
+ for (i = i - 1 ; i >= 0; i--, hpdp--)
+ *hpdp = __hugepd(0);
+- kmem_cache_free(cachep, new);
++ if (cachep)
++ kmem_cache_free(cachep, new);
++ else
++ pte_free(mm, new);
+ } else {
+ kmemleak_ignore(new);
+ }
+@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
+ if (shift >= pdshift)
+ hugepd_free(tlb, hugepte);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+- pgtable_free_tlb(tlb, hugepte,
+- get_hugepd_cache_index(PTE_INDEX_SIZE));
++ pgtable_free_tlb(tlb, hugepte, 0);
+ else
+ pgtable_free_tlb(tlb, hugepte,
+ get_hugepd_cache_index(pdshift - shift));
+@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
+ * if we have pdshift and shift value same, we don't
+ * use pgt cache for hugepd.
+ */
+- if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
+- pgtable_cache_add(PTE_INDEX_SIZE);
+- else if (pdshift > shift)
+- pgtable_cache_add(pdshift - shift);
+- else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
++ if (pdshift > shift) {
++ if (!IS_ENABLED(CONFIG_PPC_8xx))
++ pgtable_cache_add(pdshift - shift);
++ } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
++ IS_ENABLED(CONFIG_PPC_8xx)) {
+ pgtable_cache_add(PTE_T_ORDER);
++ }
+
+ configured = true;
+ }
+diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
+index 5d12352545c5..5591243d673e 100644
+--- a/arch/s390/boot/kaslr.c
++++ b/arch/s390/boot/kaslr.c
+@@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
+ *(unsigned long *) prng.parm_block ^= seed;
+ for (i = 0; i < 16; i++) {
+ cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
+- (char *) entropy, (char *) entropy,
++ (u8 *) entropy, (u8 *) entropy,
+ sizeof(entropy));
+ memcpy(prng.parm_block, entropy, sizeof(entropy));
+ }
+diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
+index 3f5cb55cde35..e399102367af 100644
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
+
+ static inline void storage_key_init_range(unsigned long start, unsigned long end)
+ {
+- if (PAGE_DEFAULT_KEY)
++ if (PAGE_DEFAULT_KEY != 0)
+ __storage_key_init_range(start, end);
+ }
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c1ed054c103c..734a3334e0f0 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1098,7 +1098,7 @@ struct kvm_x86_ops {
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
++ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+ int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 6a3124664289..1682e4b5ce75 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -510,6 +510,8 @@
+ #define MSR_K7_HWCR 0xc0010015
+ #define MSR_K7_HWCR_SMMLOCK_BIT 0
+ #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
++#define MSR_K7_HWCR_IRPERF_EN_BIT 30
++#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+ #define MSR_K7_FID_VID_CTL 0xc0010041
+ #define MSR_K7_FID_VID_STATUS 0xc0010042
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 62c30279be77..c3f4dd4ae155 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -28,6 +28,7 @@
+
+ static const int amd_erratum_383[];
+ static const int amd_erratum_400[];
++static const int amd_erratum_1054[];
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+
+ /*
+@@ -978,6 +979,15 @@ static void init_amd(struct cpuinfo_x86 *c)
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++ /*
++ * Turn on the Instructions Retired free counter on machines not
++ * susceptible to erratum #1054 "Instructions Retired Performance
++ * Counter May Be Inaccurate".
++ */
++ if (cpu_has(c, X86_FEATURE_IRPERF) &&
++ !cpu_has_amd_erratum(c, amd_erratum_1054))
++ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ }
+
+ #ifdef CONFIG_X86_32
+@@ -1105,6 +1115,10 @@ static const int amd_erratum_400[] =
+ static const int amd_erratum_383[] =
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+
++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
++static const int amd_erratum_1054[] =
++ AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
++
+
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 259f3f4e2e5f..1cf34fcc3a8e 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -1161,9 +1161,12 @@ static const struct sysfs_ops threshold_ops = {
+ .store = store,
+ };
+
++static void threshold_block_release(struct kobject *kobj);
++
+ static struct kobj_type threshold_ktype = {
+ .sysfs_ops = &threshold_ops,
+ .default_attrs = default_attrs,
++ .release = threshold_block_release,
+ };
+
+ static const char *get_name(unsigned int bank, struct threshold_block *b)
+@@ -1196,8 +1199,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
+ return buf_mcatype;
+ }
+
+-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+- unsigned int block, u32 address)
++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
++ unsigned int bank, unsigned int block,
++ u32 address)
+ {
+ struct threshold_block *b = NULL;
+ u32 low, high;
+@@ -1241,16 +1245,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+
+ INIT_LIST_HEAD(&b->miscj);
+
+- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
+- list_add(&b->miscj,
+- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
+- } else {
+- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+- }
++ if (tb->blocks)
++ list_add(&b->miscj, &tb->blocks->miscj);
++ else
++ tb->blocks = b;
+
+- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
+- per_cpu(threshold_banks, cpu)[bank]->kobj,
+- get_name(bank, b));
++ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
+ if (err)
+ goto out_free;
+ recurse:
+@@ -1258,7 +1258,7 @@ recurse:
+ if (!address)
+ return 0;
+
+- err = allocate_threshold_blocks(cpu, bank, block, address);
++ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
+ if (err)
+ goto out_free;
+
+@@ -1343,8 +1343,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ goto out_free;
+ }
+
+- per_cpu(threshold_banks, cpu)[bank] = b;
+-
+ if (is_shared_bank(bank)) {
+ refcount_set(&b->cpus, 1);
+
+@@ -1355,9 +1353,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ }
+ }
+
+- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
+- if (!err)
+- goto out;
++ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
++ if (err)
++ goto out_free;
++
++ per_cpu(threshold_banks, cpu)[bank] = b;
++
++ return 0;
+
+ out_free:
+ kfree(b);
+@@ -1366,8 +1368,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ return err;
+ }
+
+-static void deallocate_threshold_block(unsigned int cpu,
+- unsigned int bank)
++static void threshold_block_release(struct kobject *kobj)
++{
++ kfree(to_block(kobj));
++}
++
++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
+ {
+ struct threshold_block *pos = NULL;
+ struct threshold_block *tmp = NULL;
+@@ -1377,13 +1383,11 @@ static void deallocate_threshold_block(unsigned int cpu,
+ return;
+
+ list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+- kobject_put(&pos->kobj);
+ list_del(&pos->miscj);
+- kfree(pos);
++ kobject_put(&pos->kobj);
+ }
+
+- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
+- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
++ kobject_put(&head->blocks->kobj);
+ }
+
+ static void __threshold_remove_blocks(struct threshold_bank *b)
+diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
+index 4d4f5d9faac3..23054909c8dd 100644
+--- a/arch/x86/kernel/ima_arch.c
++++ b/arch/x86/kernel/ima_arch.c
+@@ -10,8 +10,6 @@ extern struct boot_params boot_params;
+
+ static enum efi_secureboot_mode get_sb_mode(void)
+ {
+- efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
+- efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
+ efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+ efi_status_t status;
+ unsigned long size;
+@@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
+ }
+
+ /* Get variable contents into buffer */
+- status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
++ status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
+ NULL, &size, &secboot);
+ if (status == EFI_NOT_FOUND) {
+ pr_info("ima: secureboot mode disabled\n");
+@@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
+ }
+
+ size = sizeof(setupmode);
+- status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
++ status = efi.get_variable(L"SetupMode", &efi_variable_guid,
+ NULL, &size, &setupmode);
+
+ if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
+diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
+index 8ecd48d31800..5ddcaacef291 100644
+--- a/arch/x86/kvm/irq_comm.c
++++ b/arch/x86/kvm/irq_comm.c
+@@ -416,7 +416,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
+
+ kvm_set_msi_irq(vcpu->kvm, entry, &irq);
+
+- if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
++ if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
+ irq.dest_id, irq.dest_mode))
+ __set_bit(irq.vector, ioapic_handled_vectors);
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 15728971a430..5d2587005d0e 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -637,9 +637,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+ {
+ u8 val;
+- if (pv_eoi_get_user(vcpu, &val) < 0)
++ if (pv_eoi_get_user(vcpu, &val) < 0) {
+ printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
+ (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++ return false;
++ }
+ return val & 0x1;
+ }
+
+@@ -1056,11 +1058,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+ apic->regs + APIC_TMR);
+ }
+
+- if (vcpu->arch.apicv_active)
+- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
+- else {
++ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ kvm_lapic_set_irr(vector, apic);
+-
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ }
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 8d1be7c61f10..207030db3481 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5141,8 +5141,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ return;
+ }
+
+-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ {
++ if (!vcpu->arch.apicv_active)
++ return -1;
++
+ kvm_lapic_set_irr(vec, vcpu->arch.apic);
+ smp_mb__after_atomic();
+
+@@ -5154,6 +5157,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ put_cpu();
+ } else
+ kvm_vcpu_wake_up(vcpu);
++
++ return 0;
+ }
+
+ static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
+index 283bdb7071af..f486e2606247 100644
+--- a/arch/x86/kvm/vmx/capabilities.h
++++ b/arch/x86/kvm/vmx/capabilities.h
+@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
+ extern bool __read_mostly enable_unrestricted_guest;
+ extern bool __read_mostly enable_ept_ad_bits;
+ extern bool __read_mostly enable_pml;
++extern bool __read_mostly enable_apicv;
+ extern int __read_mostly pt_mode;
+
+ #define PT_MODE_SYSTEM 0
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 931d3b5f3acd..802ef7177d53 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -5132,24 +5132,17 @@ fail:
+ return 1;
+ }
+
+-
+-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+- struct vmcs12 *vmcs12)
++/*
++ * Return true if an IO instruction with the specified port and size should cause
++ * a VM-exit into L1.
++ */
++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
++ int size)
+ {
+- unsigned long exit_qualification;
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gpa_t bitmap, last_bitmap;
+- unsigned int port;
+- int size;
+ u8 b;
+
+- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+-
+- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-
+- port = exit_qualification >> 16;
+- size = (exit_qualification & 7) + 1;
+-
+ last_bitmap = (gpa_t)-1;
+ b = -1;
+
+@@ -5176,6 +5169,24 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+ return false;
+ }
+
++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
++ struct vmcs12 *vmcs12)
++{
++ unsigned long exit_qualification;
++ unsigned short port;
++ int size;
++
++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
++
++ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++
++ port = exit_qualification >> 16;
++ size = (exit_qualification & 7) + 1;
++
++ return nested_vmx_check_io_bitmaps(vcpu, port, size);
++}
++
+ /*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check whether L1 expressed
+@@ -5796,8 +5807,7 @@ void nested_vmx_vcpu_setup(void)
+ * bit in the high half is on if the corresponding bit in the control field
+ * may be on. See also vmx_control_verify().
+ */
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+- bool apicv)
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+ {
+ /*
+ * Note that as a general rule, the high half of the MSRs (bits in
+@@ -5824,7 +5834,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+ PIN_BASED_EXT_INTR_MASK |
+ PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS |
+- (apicv ? PIN_BASED_POSTED_INTR : 0);
++ (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
+ msrs->pinbased_ctls_high |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
+index 6280f33e5fa6..b8521c451bb0 100644
+--- a/arch/x86/kvm/vmx/nested.h
++++ b/arch/x86/kvm/vmx/nested.h
+@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
+ };
+
+ void vmx_leave_nested(struct kvm_vcpu *vcpu);
+-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+- bool apicv);
++void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
+ void nested_vmx_hardware_unsetup(void);
+ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+ void nested_vmx_vcpu_setup(void);
+@@ -33,6 +32,8 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
++ int size);
+
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 84b57b461ad6..8ebcd9de87a2 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -95,7 +95,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+ static bool __read_mostly fasteoi = 1;
+ module_param(fasteoi, bool, S_IRUGO);
+
+-static bool __read_mostly enable_apicv = 1;
++bool __read_mostly enable_apicv = 1;
+ module_param(enable_apicv, bool, S_IRUGO);
+
+ /*
+@@ -3853,24 +3853,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+ * 2. If target vcpu isn't running(root mode), kick it to pick up the
+ * interrupt from PIR in next vmentry.
+ */
+-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ if (!r)
+- return;
++ return 0;
++
++ if (!vcpu->arch.apicv_active)
++ return -1;
+
+ if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+- return;
++ return 0;
+
+ /* If a previous notification has sent the IPI, nothing to do. */
+ if (pi_test_and_set_on(&vmx->pi_desc))
+- return;
++ return 0;
+
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ kvm_vcpu_kick(vcpu);
++
++ return 0;
+ }
+
+ /*
+@@ -6802,8 +6807,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+
+ if (nested)
+ nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
+- vmx_capability.ept,
+- kvm_vcpu_apicv_active(&vmx->vcpu));
++ vmx_capability.ept);
+ else
+ memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
+
+@@ -6885,8 +6889,7 @@ static int __init vmx_check_processor_compat(void)
+ if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
+ return -EIO;
+ if (nested)
+- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
+- enable_apicv);
++ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
+ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ smp_processor_id());
+@@ -7132,6 +7135,39 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+ to_vmx(vcpu)->req_immediate_exit = true;
+ }
+
++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
++ struct x86_instruction_info *info)
++{
++ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++ unsigned short port;
++ bool intercept;
++ int size;
++
++ if (info->intercept == x86_intercept_in ||
++ info->intercept == x86_intercept_ins) {
++ port = info->src_val;
++ size = info->dst_bytes;
++ } else {
++ port = info->dst_val;
++ size = info->src_bytes;
++ }
++
++ /*
++ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
++ * VM-exits depend on the 'unconditional IO exiting' VM-execution
++ * control.
++ *
++ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
++ */
++ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++ intercept = nested_cpu_has(vmcs12,
++ CPU_BASED_UNCOND_IO_EXITING);
++ else
++ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
++
++ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
++}
++
+ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage)
+@@ -7139,19 +7175,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+
++ switch (info->intercept) {
+ /*
+ * RDPID causes #UD if disabled through secondary execution controls.
+ * Because it is marked as EmulateOnUD, we need to intercept it here.
+ */
+- if (info->intercept == x86_intercept_rdtscp &&
+- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+- ctxt->exception.vector = UD_VECTOR;
+- ctxt->exception.error_code_valid = false;
+- return X86EMUL_PROPAGATE_FAULT;
+- }
++ case x86_intercept_rdtscp:
++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
++ ctxt->exception.vector = UD_VECTOR;
++ ctxt->exception.error_code_valid = false;
++ return X86EMUL_PROPAGATE_FAULT;
++ }
++ break;
++
++ case x86_intercept_in:
++ case x86_intercept_ins:
++ case x86_intercept_out:
++ case x86_intercept_outs:
++ return vmx_check_intercept_io(vcpu, info);
+
+ /* TODO: check more intercepts... */
+- return X86EMUL_CONTINUE;
++ default:
++ break;
++ }
++
++ return X86EMUL_UNHANDLEABLE;
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -7736,7 +7784,7 @@ static __init int hardware_setup(void)
+
+ if (nested) {
+ nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
+- vmx_capability.ept, enable_apicv);
++ vmx_capability.ept);
+
+ r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ if (r)
+diff --git a/crypto/hash_info.c b/crypto/hash_info.c
+index c754cb75dd1a..a49ff96bde77 100644
+--- a/crypto/hash_info.c
++++ b/crypto/hash_info.c
+@@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
+ [HASH_ALGO_TGR_128] = "tgr128",
+ [HASH_ALGO_TGR_160] = "tgr160",
+ [HASH_ALGO_TGR_192] = "tgr192",
+- [HASH_ALGO_SM3_256] = "sm3-256",
++ [HASH_ALGO_SM3_256] = "sm3",
+ [HASH_ALGO_STREEBOG_256] = "streebog256",
+ [HASH_ALGO_STREEBOG_512] = "streebog512",
+ };
+diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
+index 9e2f5a05c066..bad2257356fe 100644
+--- a/drivers/acpi/acpica/evevent.c
++++ b/drivers/acpi/acpica/evevent.c
+@@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
+ handler) (acpi_gbl_fixed_event_handlers[event].context));
+ }
+
++/*******************************************************************************
++ *
++ * FUNCTION: acpi_any_fixed_event_status_set
++ *
++ * PARAMETERS: None
++ *
++ * RETURN: TRUE or FALSE
++ *
++ * DESCRIPTION: Checks the PM status register for active fixed events
++ *
++ ******************************************************************************/
++
++u32 acpi_any_fixed_event_status_set(void)
++{
++ acpi_status status;
++ u32 in_status;
++ u32 in_enable;
++ u32 i;
++
++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
++ if (ACPI_FAILURE(status)) {
++ return (FALSE);
++ }
++
++ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
++ if (ACPI_FAILURE(status)) {
++ return (FALSE);
++ }
++
++ /*
++ * Check for all possible Fixed Events and dispatch those that are active
++ */
++ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
++
++ /* Both the status and enable bits must be on for this event */
++
++ if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
++ (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
++ return (TRUE);
++ }
++ }
++
++ return (FALSE);
++}
++
+ #endif /* !ACPI_REDUCED_HARDWARE */
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 62348ec2a807..827530dae682 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -992,6 +992,13 @@ static bool acpi_s2idle_wake(void)
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ return true;
+
++ /*
++ * If the status bit of any enabled fixed event is set, the
++ * wakeup is regarded as valid.
++ */
++ if (acpi_any_fixed_event_status_set())
++ return true;
++
+ /*
+ * If there are no EC events to process and at least one of the
+ * other enabled GPEs is active, the wakeup is regarded as a
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 05c2b32dcc4d..1787e3ad9c44 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -80,6 +80,7 @@ enum board_ids {
+
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
++static void ahci_shutdown_one(struct pci_dev *dev);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -593,6 +594,7 @@ static struct pci_driver ahci_pci_driver = {
+ .id_table = ahci_pci_tbl,
+ .probe = ahci_init_one,
+ .remove = ahci_remove_one,
++ .shutdown = ahci_shutdown_one,
+ .driver = {
+ .pm = &ahci_pci_pm_ops,
+ },
+@@ -1864,6 +1866,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ return 0;
+ }
+
++static void ahci_shutdown_one(struct pci_dev *pdev)
++{
++ ata_pci_shutdown_one(pdev);
++}
++
+ static void ahci_remove_one(struct pci_dev *pdev)
+ {
+ pm_runtime_get_noresume(&pdev->dev);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 84b183a6424e..581595b35573 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6762,6 +6762,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
+ ata_host_detach(host);
+ }
+
++void ata_pci_shutdown_one(struct pci_dev *pdev)
++{
++ struct ata_host *host = pci_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < host->n_ports; i++) {
++ struct ata_port *ap = host->ports[i];
++
++ ap->pflags |= ATA_PFLAG_FROZEN;
++
++ /* Disable port interrupts */
++ if (ap->ops->freeze)
++ ap->ops->freeze(ap);
++
++ /* Stop the port DMA engines */
++ if (ap->ops->port_stop)
++ ap->ops->port_stop(ap);
++ }
++}
++
+ /* move to PCI subsystem */
+ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+ {
+@@ -7382,6 +7402,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+
+ #ifdef CONFIG_PCI
+ EXPORT_SYMBOL_GPL(pci_test_config_bits);
++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
+ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+ #ifdef CONFIG_PM
+ EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 485865fd0412..f19a03b62365 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
+ /* selects the fdc and drive, and enables the fdc's input/dma. */
+ static void set_fdc(int drive)
+ {
++ unsigned int new_fdc = fdc;
++
+ if (drive >= 0 && drive < N_DRIVE) {
+- fdc = FDC(drive);
++ new_fdc = FDC(drive);
+ current_drive = drive;
+ }
+- if (fdc != 1 && fdc != 0) {
++ if (new_fdc >= N_FDC) {
+ pr_info("bad fdc value\n");
+ return;
+ }
++ fdc = new_fdc;
+ set_dor(fdc, ~0, 8);
+ #if N_FDC > 1
+ set_dor(1 - fdc, ~8, 0);
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 5817dfe5c5d2..2f8026b71933 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -831,6 +831,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
+ return 0;
+ }
+
++ bank->crypto_id = HASH_ALGO__LAST;
++
+ return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
+ }
+
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 66f1b2ac5cde..c27e206a764c 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -760,8 +760,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
+ return;
+ }
+ sdmac->desc = desc = to_sdma_desc(&vd->tx);
+-
+- list_del(&vd->node);
++ /*
++ * Do not delete the node in desc_issued list in cyclic mode, otherwise
++ * the desc allocated will never be freed in vchan_dma_desc_free_list
++ */
++ if (!(sdmac->flags & IMX_DMA_SG_LOOP))
++ list_del(&vd->node);
+
+ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+@@ -1067,6 +1071,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
++ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
+ sdmac->context_loaded = false;
+@@ -1075,19 +1080,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ static int sdma_disable_channel_async(struct dma_chan *chan)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&sdmac->vc.lock, flags);
+
+ sdma_disable_channel(chan);
+
+- if (sdmac->desc) {
+- vchan_terminate_vdesc(&sdmac->desc->vd);
+- sdmac->desc = NULL;
++ if (sdmac->desc)
+ schedule_work(&sdmac->terminate_worker);
+- }
+-
+- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 596722e79a26..2816d0329738 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -3977,11 +3977,13 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ uint64_t clock;
+
++ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
++ amdgpu_gfx_off_ctrl(adev, true);
+ return clock;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0125ea7c4103..d85e1e559c82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4080,11 +4080,13 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ uint64_t clock;
+
++ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
++ amdgpu_gfx_off_ctrl(adev, true);
+ return clock;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 4ccfcdf8f16a..80934ca17260 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -267,7 +267,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
+
+ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+- return adev->clock.spll.reference_freq;
++ u32 reference_clock = adev->clock.spll.reference_freq;
++
++ if (adev->asic_type == CHIP_RAVEN)
++ return reference_clock / 4;
++
++ return reference_clock;
+ }
+
+
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 8a8d605021f0..0454675a44cb 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -294,7 +294,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
+
+ static int tc_aux_wait_busy(struct tc_data *tc)
+ {
+- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
++ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
+ }
+
+ static int tc_aux_write_data(struct tc_data *tc, const void *data,
+@@ -637,7 +637,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
+ if (ret)
+ goto err;
+
+- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+ return ret;
+@@ -861,7 +861,7 @@ static int tc_wait_link_training(struct tc_data *tc)
+ int ret;
+
+ ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
+- LT_LOOPDONE, 1, 1000);
++ LT_LOOPDONE, 500, 100000);
+ if (ret) {
+ dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
+ return ret;
+@@ -934,7 +934,7 @@ static int tc_main_link_enable(struct tc_data *tc)
+ dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
+
+- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
++ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
+ if (ret) {
+ dev_err(dev, "timeout waiting for phy become ready");
+ return ret;
+diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
+index 0d21402945ab..3317798945e8 100644
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
+ help
+ This option enables capturing the GPU state when a hang is detected.
+ This information is vital for triaging hangs and assists in debugging.
+- Please report any hang to
+- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+- for triaging.
++ Please report any hang for triaging according to:
++ https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+
+ If in doubt, say "Y".
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index af50f05f4e9d..272503615378 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -10510,7 +10510,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+- base = obj->phys_handle->busaddr;
++ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index 646859fea224..08b35587bc6d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -240,9 +240,6 @@ struct drm_i915_gem_object {
+
+ void *gvt_info;
+ };
+-
+- /** for phys allocated objects */
+- struct drm_dma_handle *phys_handle;
+ };
+
+ static inline struct drm_i915_gem_object *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+index 768356908160..0cfe9bd76377 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+@@ -21,88 +21,87 @@
+ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+ {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+- struct drm_dma_handle *phys;
+- struct sg_table *st;
+ struct scatterlist *sg;
+- char *vaddr;
++ struct sg_table *st;
++ dma_addr_t dma;
++ void *vaddr;
++ void *dst;
+ int i;
+- int err;
+
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
+
+- /* Always aligning to the object size, allows a single allocation
++ /*
++ * Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+- phys = drm_pci_alloc(obj->base.dev,
+- roundup_pow_of_two(obj->base.size),
+- roundup_pow_of_two(obj->base.size));
+- if (!phys)
++ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ &dma, GFP_KERNEL);
++ if (!vaddr)
+ return -ENOMEM;
+
+- vaddr = phys->vaddr;
++ st = kmalloc(sizeof(*st), GFP_KERNEL);
++ if (!st)
++ goto err_pci;
++
++ if (sg_alloc_table(st, 1, GFP_KERNEL))
++ goto err_st;
++
++ sg = st->sgl;
++ sg->offset = 0;
++ sg->length = obj->base.size;
++
++ sg_assign_page(sg, (struct page *)vaddr);
++ sg_dma_address(sg) = dma;
++ sg_dma_len(sg) = obj->base.size;
++
++ dst = vaddr;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+- char *src;
++ void *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+- if (IS_ERR(page)) {
+- err = PTR_ERR(page);
+- goto err_phys;
+- }
++ if (IS_ERR(page))
++ goto err_st;
+
+ src = kmap_atomic(page);
+- memcpy(vaddr, src, PAGE_SIZE);
+- drm_clflush_virt_range(vaddr, PAGE_SIZE);
++ memcpy(dst, src, PAGE_SIZE);
++ drm_clflush_virt_range(dst, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ put_page(page);
+- vaddr += PAGE_SIZE;
++ dst += PAGE_SIZE;
+ }
+
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+- st = kmalloc(sizeof(*st), GFP_KERNEL);
+- if (!st) {
+- err = -ENOMEM;
+- goto err_phys;
+- }
+-
+- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+- kfree(st);
+- err = -ENOMEM;
+- goto err_phys;
+- }
+-
+- sg = st->sgl;
+- sg->offset = 0;
+- sg->length = obj->base.size;
+-
+- sg_dma_address(sg) = phys->busaddr;
+- sg_dma_len(sg) = obj->base.size;
+-
+- obj->phys_handle = phys;
+-
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+
+-err_phys:
+- drm_pci_free(obj->base.dev, phys);
+-
+- return err;
++err_st:
++ kfree(st);
++err_pci:
++ dma_free_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ vaddr, dma);
++ return -ENOMEM;
+ }
+
+ static void
+ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+ {
++ dma_addr_t dma = sg_dma_address(pages->sgl);
++ void *vaddr = sg_page(pages->sgl);
++
+ __i915_gem_object_release_shmem(obj, pages, false);
+
+ if (obj->mm.dirty) {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+- char *vaddr = obj->phys_handle->vaddr;
++ void *src = vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+@@ -114,15 +113,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ continue;
+
+ dst = kmap_atomic(page);
+- drm_clflush_virt_range(vaddr, PAGE_SIZE);
+- memcpy(dst, vaddr, PAGE_SIZE);
++ drm_clflush_virt_range(src, PAGE_SIZE);
++ memcpy(dst, src, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+ put_page(page);
+- vaddr += PAGE_SIZE;
++
++ src += PAGE_SIZE;
+ }
+ obj->mm.dirty = false;
+ }
+@@ -130,7 +130,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ sg_free_table(pages);
+ kfree(pages);
+
+- drm_pci_free(obj->base.dev, obj->phys_handle);
++ dma_free_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ vaddr, dma);
+ }
+
+ static void phys_release(struct drm_i915_gem_object *obj)
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index 22aab8593abf..926272b5a0ca 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -250,6 +250,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+ return pos & (ring->size - 1);
+ }
+
++static inline int intel_ring_direction(const struct intel_ring *ring,
++ u32 next, u32 prev)
++{
++ typecheck(typeof(ring->size), next);
++ typecheck(typeof(ring->size), prev);
++ return (next - prev) << ring->wrap;
++}
++
+ static inline bool
+ intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+index 798e1b024406..c77c9518c58b 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -107,6 +107,7 @@ struct intel_ring {
+
+ u32 space;
+ u32 size;
++ u32 wrap;
+ u32 effective_size;
+ };
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 4949b5ad860f..66f6d1a897f2 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -471,12 +471,6 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
+ return desc;
+ }
+
+-static void unwind_wa_tail(struct i915_request *rq)
+-{
+- rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
+- assert_ring_tail_valid(rq->ring, rq->tail);
+-}
+-
+ static struct i915_request *
+ __unwind_incomplete_requests(struct intel_engine_cs *engine)
+ {
+@@ -495,7 +489,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
+ continue; /* XXX */
+
+ __i915_request_unsubmit(rq);
+- unwind_wa_tail(rq);
+
+ /*
+ * Push the request back into the queue for later resubmission.
+@@ -650,13 +643,35 @@ execlists_schedule_out(struct i915_request *rq)
+ i915_request_put(rq);
+ }
+
+-static u64 execlists_update_context(const struct i915_request *rq)
++static u64 execlists_update_context(struct i915_request *rq)
+ {
+ struct intel_context *ce = rq->hw_context;
+- u64 desc;
++ u64 desc = ce->lrc_desc;
++ u32 tail, prev;
+
+- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+- intel_ring_set_tail(rq->ring, rq->tail);
++ /*
++ * WaIdleLiteRestore:bdw,skl
++ *
++ * We should never submit the context with the same RING_TAIL twice
++ * just in case we submit an empty ring, which confuses the HW.
++ *
++ * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
++ * the normal request to be able to always advance the RING_TAIL on
++ * subsequent resubmissions (for lite restore). Should that fail us,
++ * and we try and submit the same tail again, force the context
++ * reload.
++ *
++ * If we need to return to a preempted context, we need to skip the
++ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
++ * HW has a tendency to ignore us rewinding the TAIL to the end of
++ * an earlier request.
++ */
++ tail = intel_ring_set_tail(rq->ring, rq->tail);
++ prev = ce->lrc_reg_state[CTX_RING_TAIL + 1];
++ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
++ desc |= CTX_DESC_FORCE_RESTORE;
++ ce->lrc_reg_state[CTX_RING_TAIL + 1] = tail;
++ rq->tail = rq->wa_tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+@@ -675,7 +690,6 @@ static u64 execlists_update_context(const struct i915_request *rq)
+ */
+ mb();
+
+- desc = ce->lrc_desc;
+ ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+
+ return desc;
+@@ -919,6 +933,11 @@ last_active(const struct intel_engine_execlists *execlists)
+ return *last;
+ }
+
++#define for_each_waiter(p__, rq__) \
++ list_for_each_entry_lockless(p__, \
++ &(rq__)->sched.waiters_list, \
++ wait_link)
++
+ static void defer_request(struct i915_request *rq, struct list_head * const pl)
+ {
+ LIST_HEAD(list);
+@@ -936,7 +955,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
++ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+@@ -1102,14 +1121,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
+ */
+ __unwind_incomplete_requests(engine);
+
+- /*
+- * If we need to return to the preempted context, we
+- * need to skip the lite-restore and force it to
+- * reload the RING_TAIL. Otherwise, the HW has a
+- * tendency to ignore us rewinding the TAIL to the
+- * end of an earlier request.
+- */
+- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last = NULL;
+ } else if (need_timeslice(engine, last) &&
+ !timer_pending(&engine->execlists.timer)) {
+@@ -1150,16 +1161,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
+ if (!list_is_last(&last->sched.link,
+ &engine->active.requests))
+ return;
+-
+- /*
+- * WaIdleLiteRestore:bdw,skl
+- * Apply the wa NOOPs to prevent
+- * ring:HEAD == rq:TAIL as we resubmit the
+- * request. See gen8_emit_fini_breadcrumb() for
+- * where we prepare the padding after the
+- * end of the request.
+- */
+- last->tail = last->wa_tail;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+index bacaa7bb8c9a..eee9fcbe0434 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+@@ -1312,6 +1312,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+ kref_init(&ring->ref);
+
+ ring->size = size;
++ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
++
+ /* Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index 4b04af569c05..7dc7bb850d0a 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
+
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del(&mm->ppgtt_mm.list);
++
++ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+ list_del(&mm->ppgtt_mm.lru_list);
++ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
++
+ invalidate_ppgtt_mm(mm);
+ } else {
+ vfree(mm->ggtt_mm.virtual_ggtt);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 98305d987ac1..4d561da3dcea 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -136,7 +136,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
+ {
+- void *vaddr = obj->phys_handle->vaddr + args->offset;
++ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+
+ /*
+@@ -802,10 +802,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
+
+ if (ret == -EFAULT || ret == -ENOSPC) {
+- if (obj->phys_handle)
+- ret = i915_gem_phys_pwrite(obj, args, file);
+- else
++ if (i915_gem_object_has_struct_page(obj))
+ ret = i915_gem_shmem_pwrite(obj, args);
++ else
++ ret = i915_gem_phys_pwrite(obj, args, file);
+ }
+
+ i915_gem_object_unpin_pages(obj);
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index e284bd76fa86..fe9edbba997c 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -1768,7 +1768,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
+ if (!xchg(&warned, true) &&
+ ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
+ pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+- pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
++ pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
++ pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
+ pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
+ pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
+index 3eba8a2b39c2..0ef205fe5e29 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.c
++++ b/drivers/gpu/drm/i915/i915_scheduler.c
+@@ -418,8 +418,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+
+ if (!node_signaled(signal)) {
+ INIT_LIST_HEAD(&dep->dfs_link);
+- list_add(&dep->wait_link, &signal->waiters_list);
+- list_add(&dep->signal_link, &node->signalers_list);
+ dep->signaler = signal;
+ dep->waiter = node;
+ dep->flags = flags;
+@@ -429,6 +427,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ !node_started(signal))
+ node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+
++ /* All set, now publish. Beware the lockless walkers. */
++ list_add(&dep->signal_link, &node->signalers_list);
++ list_add_rcu(&dep->wait_link, &signal->waiters_list);
++
+ /*
+ * As we do not allow WAIT to preempt inflight requests,
+ * once we have executed a request, along with triggering
+diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
+index 16acdf7bdbe6..17cfeef35a24 100644
+--- a/drivers/gpu/drm/i915/i915_utils.c
++++ b/drivers/gpu/drm/i915/i915_utils.c
+@@ -8,9 +8,8 @@
+ #include "i915_drv.h"
+ #include "i915_utils.h"
+
+-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+- "providing the dmesg log by booting with drm.debug=0xf"
++#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
++#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
+
+ void
+ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+index 24ab6249083a..6f420cc73dbd 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
++ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
++ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 5193b6257061..b856e87574fd 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -451,6 +451,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
+ asyw->clr.ntfy = armw->ntfy.handle != 0;
+ asyw->clr.sema = armw->sema.handle != 0;
+ asyw->clr.xlut = armw->xlut.handle != 0;
++ if (asyw->clr.xlut && asyw->visible)
++ asyw->set.xlut = asyw->xlut.handle != 0;
+ asyw->clr.csc = armw->csc.valid;
+ if (wndw->func->image_clr)
+ asyw->clr.image = armw->image.handle[0] != 0;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 763cfca886a7..3107b0738e40 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+ as = mmu->as;
+ if (as >= 0) {
+ int en = atomic_inc_return(&mmu->as_count);
+- WARN_ON(en >= NUM_JOB_SLOTS);
++
++ /*
++ * AS can be retained by active jobs or a perfcnt context,
++ * hence the '+ 1' here.
++ */
++ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
+
+ list_move(&mmu->list, &pfdev->as_lru_list);
+ goto out;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+index 684820448be3..6913578d5aa7 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+ struct panfrost_file_priv *user = file_priv->driver_priv;
+ struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
+ struct drm_gem_shmem_object *bo;
+- u32 cfg;
++ u32 cfg, as;
+ int ret;
+
+ if (user == perfcnt->user)
+@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
+
+ perfcnt->user = user;
+
+- /*
+- * Always use address space 0 for now.
+- * FIXME: this needs to be updated when we start using different
+- * address space.
+- */
+- cfg = GPU_PERFCNT_CFG_AS(0) |
++ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
++ cfg = GPU_PERFCNT_CFG_AS(as) |
+ GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
+
+ /*
+@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
+ perfcnt->buf = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
++ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
+ pm_runtime_mark_last_busy(pfdev->dev);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index b273e421e910..a1a035270cab 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
+ }
+ }
+
++static void
++isert_wait4cmds(struct iscsi_conn *conn)
++{
++ isert_info("iscsi_conn %p\n", conn);
++
++ if (conn->sess) {
++ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
++ target_wait_for_sess_cmds(conn->sess->se_sess);
++ }
++}
++
+ /**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ * unsolicitate dataout
+@@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+
+ ib_drain_qp(isert_conn->qp);
+ isert_put_unsol_pending_cmds(conn);
++ isert_wait4cmds(conn);
+ isert_wait4logout(isert_conn);
+
+ queue_work(isert_release_wq, &isert_conn->release_work);
+diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
+index c31e7bc4ccbe..e0b3fa2bb7ab 100644
+--- a/drivers/iommu/qcom_iommu.c
++++ b/drivers/iommu/qcom_iommu.c
+@@ -345,21 +345,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
+ {
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+
+- if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
+- return;
+-
+ iommu_put_dma_cookie(domain);
+
+- /* NOTE: unmap can be called after client device is powered off,
+- * for example, with GPUs or anything involving dma-buf. So we
+- * cannot rely on the device_link. Make sure the IOMMU is on to
+- * avoid unclocked accesses in the TLB inv path:
+- */
+- pm_runtime_get_sync(qcom_domain->iommu->dev);
+-
+- free_io_pgtable_ops(qcom_domain->pgtbl_ops);
+-
+- pm_runtime_put_sync(qcom_domain->iommu->dev);
++ if (qcom_domain->iommu) {
++ /*
++ * NOTE: unmap can be called after client device is powered
++ * off, for example, with GPUs or anything involving dma-buf.
++ * So we cannot rely on the device_link. Make sure the IOMMU
++ * is on to avoid unclocked accesses in the TLB inv path:
++ */
++ pm_runtime_get_sync(qcom_domain->iommu->dev);
++ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
++ pm_runtime_put_sync(qcom_domain->iommu->dev);
++ }
+
+ kfree(qcom_domain);
+ }
+@@ -405,7 +403,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ unsigned i;
+
+- if (!qcom_domain->iommu)
++ if (WARN_ON(!qcom_domain->iommu))
+ return;
+
+ pm_runtime_get_sync(qcom_iommu->dev);
+@@ -418,8 +416,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ ctx->domain = NULL;
+ }
+ pm_runtime_put_sync(qcom_iommu->dev);
+-
+- qcom_domain->iommu = NULL;
+ }
+
+ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index c27ed7363768..8c4507838325 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -4713,12 +4713,12 @@ int e1000e_close(struct net_device *netdev)
+
+ pm_runtime_get_sync(&pdev->dev);
+
+- if (!test_bit(__E1000_DOWN, &adapter->state)) {
++ if (netif_device_present(netdev)) {
+ e1000e_down(adapter, true);
+ e1000_free_irq(adapter);
+
+ /* Link status message must follow this format */
+- pr_info("%s NIC Link is Down\n", adapter->netdev->name);
++ pr_info("%s NIC Link is Down\n", netdev->name);
+ }
+
+ napi_disable(&adapter->napi);
+@@ -6309,10 +6309,14 @@ static int e1000e_pm_freeze(struct device *dev)
+ {
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
++ bool present;
+
++ rtnl_lock();
++
++ present = netif_device_present(netdev);
+ netif_device_detach(netdev);
+
+- if (netif_running(netdev)) {
++ if (present && netif_running(netdev)) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+@@ -6324,6 +6328,8 @@ static int e1000e_pm_freeze(struct device *dev)
+ e1000e_down(adapter, false);
+ e1000_free_irq(adapter);
+ }
++ rtnl_unlock();
++
+ e1000e_reset_interrupt_capability(adapter);
+
+ /* Allow time for pending master requests to run */
+@@ -6571,6 +6577,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
+ __e1000e_disable_aspm(pdev, state, 1);
+ }
+
++static int e1000e_pm_thaw(struct device *dev)
++{
++ struct net_device *netdev = dev_get_drvdata(dev);
++ struct e1000_adapter *adapter = netdev_priv(netdev);
++ int rc = 0;
++
++ e1000e_set_interrupt_capability(adapter);
++
++ rtnl_lock();
++ if (netif_running(netdev)) {
++ rc = e1000_request_irq(adapter);
++ if (rc)
++ goto err_irq;
++
++ e1000e_up(adapter);
++ }
++
++ netif_device_attach(netdev);
++err_irq:
++ rtnl_unlock();
++
++ return rc;
++}
++
+ #ifdef CONFIG_PM
+ static int __e1000_resume(struct pci_dev *pdev)
+ {
+@@ -6638,26 +6668,6 @@ static int __e1000_resume(struct pci_dev *pdev)
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+-static int e1000e_pm_thaw(struct device *dev)
+-{
+- struct net_device *netdev = dev_get_drvdata(dev);
+- struct e1000_adapter *adapter = netdev_priv(netdev);
+-
+- e1000e_set_interrupt_capability(adapter);
+- if (netif_running(netdev)) {
+- u32 err = e1000_request_irq(adapter);
+-
+- if (err)
+- return err;
+-
+- e1000e_up(adapter);
+- }
+-
+- netif_device_attach(netdev);
+-
+- return 0;
+-}
+-
+ static int e1000e_pm_suspend(struct device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+@@ -6829,16 +6839,11 @@ static void e1000_netpoll(struct net_device *netdev)
+ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+ {
+- struct net_device *netdev = pci_get_drvdata(pdev);
+- struct e1000_adapter *adapter = netdev_priv(netdev);
+-
+- netif_device_detach(netdev);
++ e1000e_pm_freeze(&pdev->dev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+- if (netif_running(netdev))
+- e1000e_down(adapter, true);
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+@@ -6904,10 +6909,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
+
+ e1000_init_manageability_pt(adapter);
+
+- if (netif_running(netdev))
+- e1000e_up(adapter);
+-
+- netif_device_attach(netdev);
++ e1000e_pm_thaw(&pdev->dev);
+
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
+ * is up. For all other cases, let the f/w know that the h/w is now
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+index 3a975641f902..20b907dc1e29 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
+ netdev_err(priv->netdev, err_str);
+
+ if (!reporter)
+- return err_ctx->recover(&err_ctx->ctx);
++ return err_ctx->recover(err_ctx->ctx);
+
+ return devlink_health_report(reporter, err_str, err_ctx);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 7c8796d9743f..a226277b0980 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
+ }
+ }
+
++static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
++{
++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
++ mlx5_wq_ll_reset(&rq->mpwqe.wq);
++ else
++ mlx5_wq_cyc_reset(&rq->wqe.wq);
++}
++
+ /* SW parser related functions */
+
+ struct mlx5e_swp_spec {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 29a5a8c894e3..e5e91cbcbc31 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -723,6 +723,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
+ if (!in)
+ return -ENOMEM;
+
++ if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
++ mlx5e_rqwq_reset(rq);
++
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 60fddf8afc99..c6ed4b7f4f97 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -2319,25 +2319,17 @@ out:
+
+ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+ {
+- int err = 0;
+-
+ if (!esw)
+ return -EOPNOTSUPP;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
+- mutex_lock(&esw->state_lock);
+- if (esw->mode != MLX5_ESWITCH_LEGACY) {
+- err = -EOPNOTSUPP;
+- goto out;
+- }
++ if (esw->mode != MLX5_ESWITCH_LEGACY)
++ return -EOPNOTSUPP;
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+-
+-out:
+- mutex_unlock(&esw->state_lock);
+- return err;
++ return 0;
+ }
+
+ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+index dd2315ce4441..41e35b341b70 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+@@ -96,6 +96,13 @@ err_db_free:
+ return err;
+ }
+
++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
++{
++ wq->wqe_ctr = 0;
++ wq->cur_sz = 0;
++ mlx5_wq_cyc_update_db_record(wq);
++}
++
+ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+@@ -194,6 +201,19 @@ err_db_free:
+ return err;
+ }
+
++static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
++{
++ struct mlx5_wqe_srq_next_seg *next_seg;
++ int i;
++
++ for (i = 0; i < wq->fbc.sz_m1; i++) {
++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
++ next_seg->next_wqe_index = cpu_to_be16(i + 1);
++ }
++ next_seg = mlx5_wq_ll_get_wqe(wq, i);
++ wq->tail_next = &next_seg->next_wqe_index;
++}
++
+ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+@@ -201,9 +221,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
+ struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
+- struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+- int i;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+@@ -222,13 +240,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
+
+- for (i = 0; i < fbc->sz_m1; i++) {
+- next_seg = mlx5_wq_ll_get_wqe(wq, i);
+- next_seg->next_wqe_index = cpu_to_be16(i + 1);
+- }
+- next_seg = mlx5_wq_ll_get_wqe(wq, i);
+- wq->tail_next = &next_seg->next_wqe_index;
+-
++ mlx5_wq_ll_init_list(wq);
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+@@ -239,6 +251,15 @@ err_db_free:
+ return err;
+ }
+
++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
++{
++ wq->head = 0;
++ wq->wqe_ctr = 0;
++ wq->cur_sz = 0;
++ mlx5_wq_ll_init_list(wq);
++ mlx5_wq_ll_update_db_record(wq);
++}
++
+ void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+ {
+ mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+index 55791f71a778..5efc038440df 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+@@ -80,10 +80,12 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
+
+ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *qpc, struct mlx5_wq_qp *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
+
+ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 132ade51ee87..aed6354cb271 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -711,6 +711,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ }
+
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
++ kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf) {
+ error = -ENOMEM;
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 74d497d39c5a..c6695354b123 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+ }
+
++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ /* do not allow to mmap ashmem backing shmem file directly */
++ return -EPERM;
++}
++
++static unsigned long
++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
++{
++ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
++}
++
+ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
++ static struct file_operations vmfile_fops;
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+@@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ }
+ vmfile->f_mode |= FMODE_LSEEK;
+ asma->file = vmfile;
++ /*
++ * override mmap operation of the vmfile so that it can't be
++ * remapped which would lead to creation of a new vma with no
++ * asma permission checks. Have to override get_unmapped_area
++ * as well to prevent VM_BUG_ON check for f_ops modification.
++ */
++ if (!vmfile_fops.mmap) {
++ vmfile_fops = *vmfile->f_op;
++ vmfile_fops.mmap = ashmem_vmfile_mmap;
++ vmfile_fops.get_unmapped_area =
++ ashmem_vmfile_get_unmapped_area;
++ }
++ vmfile->f_op = &vmfile_fops;
+ }
+ get_file(asma->file);
+
+diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
+index 9b19ea9d3fa1..9a3f7c034ab4 100644
+--- a/drivers/staging/greybus/audio_manager.c
++++ b/drivers/staging/greybus/audio_manager.c
+@@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
+
+ list_for_each_entry_safe(module, next, &modules_list, list) {
+ list_del(&module->list);
+- kobject_put(&module->kobj);
+ ida_simple_remove(&module_id, module->id);
++ kobject_put(&module->kobj);
+ }
+
+ is_empty = list_empty(&modules_list);
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 9f0418ee7528..630e7d933b10 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -2025,7 +2025,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+ struct ieee_param *param;
+ uint ret = 0;
+
+- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -2812,7 +2812,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+ goto out;
+ }
+
+- if (!p->pointer) {
++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+index b44e902ed338..b6d56cfb0a19 100644
+--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+@@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
+ s32 ret;
+ struct adapter *padapter;
+ struct xmit_priv *pxmitpriv;
+- u8 thread_name[20] = "RTWHALXT";
+-
++ u8 thread_name[20];
+
+ ret = _SUCCESS;
+ padapter = context;
+ pxmitpriv = &padapter->xmitpriv;
+
+- rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
++ rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
+ thread_enter(thread_name);
+
+ DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index d1b199e3e5bd..d8d44fd9a92f 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -3379,7 +3379,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+
+ /* down(&ieee->wx_sem); */
+
+- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -4213,7 +4213,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+
+
+ /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
+- if (!p->pointer) {
++ if (!p->pointer || p->length != sizeof(*param)) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 3b94e80f1d5e..879ceef517fb 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
+
+ vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+
+- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
++ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
+ priv->current_rssi = priv->bb_pre_ed_rssi;
+
+ skb_pull(skb, 8);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index c070cb2a6a5b..d19e051f2bc2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ conn->cid);
+
+- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+- return iscsit_add_reject_cmd(cmd,
+- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++ target_get_sess_cmd(&cmd->se_cmd, true);
+
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ scsilun_to_int(&hdr->lun));
+@@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+- return iscsit_add_reject_cmd(cmd,
+- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++ target_get_sess_cmd(&cmd->se_cmd, true);
+
+ /*
+ * TASK_REASSIGN for ERL=2 / connection stays inside of
+@@ -4151,6 +4147,9 @@ int iscsit_close_connection(
+ iscsit_stop_nopin_response_timer(conn);
+ iscsit_stop_nopin_timer(conn);
+
++ if (conn->conn_transport->iscsit_wait_conn)
++ conn->conn_transport->iscsit_wait_conn(conn);
++
+ /*
+ * During Connection recovery drop unacknowledged out of order
+ * commands for this connection, and prepare the other commands
+@@ -4233,11 +4232,6 @@ int iscsit_close_connection(
+ * must wait until they have completed.
+ */
+ iscsit_check_conn_usage_count(conn);
+- target_sess_cmd_list_set_waiting(sess->se_sess);
+- target_wait_for_sess_cmds(sess->se_sess);
+-
+- if (conn->conn_transport->iscsit_wait_conn)
+- conn->conn_transport->iscsit_wait_conn(conn);
+
+ ahash_request_free(conn->conn_tx_hash);
+ if (conn->conn_rx_hash) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index eda8b4736c15..d542e26ca56a 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+
+ target_remove_from_state_list(cmd);
+
++ /*
++ * Clear struct se_cmd->se_lun before the handoff to FE.
++ */
++ cmd->se_lun = NULL;
++
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+@@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+ return cmd->se_tfo->check_stop_free(cmd);
+ }
+
++static void transport_lun_remove_cmd(struct se_cmd *cmd)
++{
++ struct se_lun *lun = cmd->se_lun;
++
++ if (!lun)
++ return;
++
++ if (cmpxchg(&cmd->lun_ref_active, true, false))
++ percpu_ref_put(&lun->lun_ref);
++}
++
+ static void target_complete_failure_work(struct work_struct *work)
+ {
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+@@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
+
+ WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
+
++ transport_lun_remove_cmd(cmd);
++
+ transport_cmd_check_stop_to_fabric(cmd);
+ }
+
+@@ -1695,6 +1713,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
+ se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
++ transport_lun_remove_cmd(se_cmd);
+ transport_cmd_check_stop_to_fabric(se_cmd);
+ }
+
+@@ -1885,6 +1904,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ goto queue_full;
+
+ check_stop:
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+@@ -2182,6 +2202,7 @@ queue_status:
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
+ return;
+ }
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ }
+
+@@ -2276,6 +2297,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (ret)
+ goto queue_full;
+
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+@@ -2301,6 +2323,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (ret)
+ goto queue_full;
+
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+@@ -2336,6 +2359,7 @@ queue_rsp:
+ if (ret)
+ goto queue_full;
+
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+@@ -2371,6 +2395,7 @@ queue_status:
+ break;
+ }
+
++ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+@@ -2697,6 +2722,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ */
+ if (cmd->state_active)
+ target_remove_from_state_list(cmd);
++
++ if (cmd->se_lun)
++ transport_lun_remove_cmd(cmd);
+ }
+ if (aborted)
+ cmd->free_compl = &compl;
+@@ -2768,9 +2796,6 @@ static void target_release_cmd_kref(struct kref *kref)
+ struct completion *abrt_compl = se_cmd->abrt_compl;
+ unsigned long flags;
+
+- if (se_cmd->lun_ref_active)
+- percpu_ref_put(&se_cmd->se_lun->lun_ref);
+-
+ if (se_sess) {
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ list_del_init(&se_cmd->se_cmd_list);
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index c5974c9af841..e53932d27ac5 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -274,6 +274,12 @@ out:
+ return ret;
+ }
+
++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
++ size_t bytes)
++{
++ return -EPERM;
++}
++
+ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+ {
+@@ -319,6 +325,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+ config.read_only = true;
+ } else {
+ config.name = "nvm_non_active";
++ config.reg_read = tb_switch_nvm_no_read;
+ config.reg_write = tb_switch_nvm_write;
+ config.root_only = true;
+ }
+diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
+index d1cdd2ab8b4c..d367803e2044 100644
+--- a/drivers/tty/serdev/serdev-ttyport.c
++++ b/drivers/tty/serdev/serdev-ttyport.c
+@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *parent,
+ struct tty_driver *drv, int idx)
+ {
+- const struct tty_port_client_operations *old_ops;
+ struct serdev_controller *ctrl;
+ struct serport *serport;
+ int ret;
+@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+
+ ctrl->ops = &ctrl_ops;
+
+- old_ops = port->client_ops;
+ port->client_ops = &client_ops;
+ port->client_data = ctrl;
+
+@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+
+ err_reset_data:
+ port->client_data = NULL;
+- port->client_ops = old_ops;
++ port->client_ops = &tty_port_default_client_ops;
+ serdev_controller_put(ctrl);
+
+ return ERR_PTR(ret);
+@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
+ return -ENODEV;
+
+ serdev_controller_remove(ctrl);
+- port->client_ops = NULL;
+ port->client_data = NULL;
++ port->client_ops = &tty_port_default_client_ops;
+ serdev_controller_put(ctrl);
+
+ return 0;
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index 0438d9a905ce..6ba2efde7252 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -379,7 +379,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
+ port.port.line = rc;
+
+ port.port.irq = irq_of_parse_and_map(np, 0);
+- port.port.irqflags = IRQF_SHARED;
+ port.port.handle_irq = aspeed_vuart_handle_irq;
+ port.port.iotype = UPIO_MEM;
+ port.port.type = PORT_16550A;
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index e682390ce0de..28bdbd7b4ab2 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ struct hlist_head *h;
+ struct hlist_node *n;
+ struct irq_info *i;
+- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
++ int ret;
+
+ mutex_lock(&hash_mutex);
+
+@@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ INIT_LIST_HEAD(&up->list);
+ i->head = &up->list;
+ spin_unlock_irq(&i->lock);
+- irq_flags |= up->port.irqflags;
+ ret = request_irq(up->port.irq, serial8250_interrupt,
+- irq_flags, up->port.name, i);
++ up->port.irqflags, up->port.name, i);
+ if (ret < 0)
+ serial_do_unlink(i, up);
+ }
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
+index 0826cfdbd406..9ba31701a372 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -172,7 +172,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
+
+ port->type = type;
+ port->uartclk = clk;
+- port->irqflags |= IRQF_SHARED;
+
+ if (of_property_read_bool(np, "no-loopback-test"))
+ port->flags |= UPF_SKIP_TEST;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 8407166610ce..2c65c775bf5a 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2192,6 +2192,10 @@ int serial8250_do_startup(struct uart_port *port)
+ }
+ }
+
++ /* Check if we need to have shared IRQs */
++ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
++ up->port.irqflags |= IRQF_SHARED;
++
+ if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
+ unsigned char iir1;
+ /*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 1ba9bc667e13..8a909d556185 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -574,7 +574,8 @@ static void atmel_stop_tx(struct uart_port *port)
+ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+
+ if (atmel_uart_is_half_duplex(port))
+- atmel_start_rx(port);
++ if (!atomic_read(&atmel_port->tasklet_shutdown))
++ atmel_start_rx(port);
+
+ }
+
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 9d8c660dc289..22d8705cd5cd 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -603,7 +603,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
+
+ sport->tx_bytes = uart_circ_chars_pending(xmit);
+
+- if (xmit->tail < xmit->head) {
++ if (xmit->tail < xmit->head || xmit->head == 0) {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ } else {
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index 14c6306bc462..f98a79172ad2 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -125,6 +125,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
+ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+ static void qcom_geni_serial_stop_rx(struct uart_port *uport);
++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
+
+ static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 32000000, 48000000, 64000000, 80000000,
+@@ -615,7 +616,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+ u32 irq_en;
+ u32 status;
+ struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+- u32 irq_clear = S_CMD_DONE_EN;
++ u32 s_irq_status;
+
+ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+@@ -631,10 +632,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+ return;
+
+ geni_se_cancel_s_cmd(&port->se);
+- qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+- S_GENI_CMD_CANCEL, false);
++ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
++ S_CMD_CANCEL_EN, true);
++ /*
++ * If timeout occurs secondary engine remains active
++ * and Abort sequence is executed.
++ */
++ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
++ /* Flush the Rx buffer */
++ if (s_irq_status & S_RX_FIFO_LAST_EN)
++ qcom_geni_serial_handle_rx(uport, true);
++ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
++
+ status = readl(uport->membase + SE_GENI_STATUS);
+- writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ if (status & S_GENI_CMD_ACTIVE)
+ qcom_geni_serial_abort_rx(uport);
+ }
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index 044c3cbdcfa4..ea80bf872f54 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
+ }
+ }
+
+-static const struct tty_port_client_operations default_client_ops = {
++const struct tty_port_client_operations tty_port_default_client_ops = {
+ .receive_buf = tty_port_default_receive_buf,
+ .write_wakeup = tty_port_default_wakeup,
+ };
++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
+
+ void tty_port_init(struct tty_port *port)
+ {
+@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
+ spin_lock_init(&port->lock);
+ port->close_delay = (50 * HZ) / 100;
+ port->closing_wait = (3000 * HZ) / 100;
+- port->client_ops = &default_client_ops;
++ port->client_ops = &tty_port_default_client_ops;
+ kref_init(&port->kref);
+ }
+ EXPORT_SYMBOL(tty_port_init);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 78732feaf65b..44d974d4159f 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -29,6 +29,8 @@
+ #include <linux/console.h>
+ #include <linux/tty_flip.h>
+
++#include <linux/sched/signal.h>
++
+ /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
+ #define isspace(c) ((c) == ' ')
+
+@@ -350,6 +352,7 @@ int paste_selection(struct tty_struct *tty)
+ unsigned int count;
+ struct tty_ldisc *ld;
+ DECLARE_WAITQUEUE(wait, current);
++ int ret = 0;
+
+ console_lock();
+ poke_blanked_console();
+@@ -363,6 +366,10 @@ int paste_selection(struct tty_struct *tty)
+ add_wait_queue(&vc->paste_wait, &wait);
+ while (sel_buffer && sel_buffer_lth > pasted) {
+ set_current_state(TASK_INTERRUPTIBLE);
++ if (signal_pending(current)) {
++ ret = -EINTR;
++ break;
++ }
+ if (tty_throttled(tty)) {
+ schedule();
+ continue;
+@@ -378,6 +385,6 @@ int paste_selection(struct tty_struct *tty)
+
+ tty_buffer_unlock_exclusive(&vc->port);
+ tty_ldisc_deref(ld);
+- return 0;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(paste_selection);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 34aa39d1aed9..3b4ccc2a30c1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
+ WARN_CONSOLE_UNLOCKED();
+
+ set_origin(vc);
+- if (vc->vc_sw->con_flush_scrollback)
++ if (vc->vc_sw->con_flush_scrollback) {
+ vc->vc_sw->con_flush_scrollback(vc);
+- else
++ } else if (con_is_visible(vc)) {
++ /*
++ * When no con_flush_scrollback method is provided then the
++ * legacy way for flushing the scrollback buffer is to use
++ * a side effect of the con_switch method. We do it only on
++ * the foreground console as background consoles have no
++ * scrollback buffers in that case and we obviously don't
++ * want to switch to them.
++ */
++ hide_cursor(vc);
+ vc->vc_sw->con_switch(vc);
++ set_cursor(vc);
++ }
+ }
+
+ /*
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 8b0ed139592f..ee6c91ef1f6c 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
+ return -EINVAL;
+
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
++ struct vc_data *vcp;
++
+ if (!vc_cons[i].d)
+ continue;
+ console_lock();
+- if (v.v_vlin)
+- vc_cons[i].d->vc_scan_lines = v.v_vlin;
+- if (v.v_clin)
+- vc_cons[i].d->vc_font.height = v.v_clin;
+- vc_cons[i].d->vc_resize_user = 1;
+- vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
++ vcp = vc_cons[i].d;
++ if (vcp) {
++ if (v.v_vlin)
++ vcp->vc_scan_lines = v.v_vlin;
++ if (v.v_clin)
++ vcp->vc_font.height = v.v_clin;
++ vcp->vc_resize_user = 1;
++ vc_resize(vcp, v.v_cols, v.v_rows);
++ }
+ console_unlock();
+ }
+ break;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 3e94259406d7..c68217b7dace 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
+ {
++ struct usb_device *udev = to_usb_device(ddev);
+ unsigned char *buffer0 = buffer;
+ struct usb_endpoint_descriptor *d;
+ struct usb_host_endpoint *endpoint;
+@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+
++ /* Ignore blacklisted endpoints */
++ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
++ if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
++ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
++ cfgno, inum, asnum,
++ d->bEndpointAddress);
++ goto skip_to_next_endpoint_or_interface_descriptor;
++ }
++ }
++
+ endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ ++ifp->desc.bNumEndpoints;
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 4ac74b354801..f381faa10f15 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -37,7 +37,9 @@
+ #include "otg_whitelist.h"
+
+ #define USB_VENDOR_GENESYS_LOGIC 0x05e3
++#define USB_VENDOR_SMSC 0x0424
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
++#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
+
+ #define USB_TP_TRANSMISSION_DELAY 40 /* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
+@@ -1216,11 +1218,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ #ifdef CONFIG_PM
+ udev->reset_resume = 1;
+ #endif
+- /* Don't set the change_bits when the device
+- * was powered off.
+- */
+- if (test_bit(port1, hub->power_bits))
+- set_bit(port1, hub->change_bits);
+
+ } else {
+ /* The power session is gone; tell hub_wq */
+@@ -1730,6 +1727,10 @@ static void hub_disconnect(struct usb_interface *intf)
+ kfree(hub->buffer);
+
+ pm_suspend_ignore_children(&intf->dev, false);
++
++ if (hub->quirk_disable_autosuspend)
++ usb_autopm_put_interface(intf);
++
+ kref_put(&hub->kref, hub_release);
+ }
+
+@@ -1862,6 +1863,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
+ hub->quirk_check_port_auto_suspend = 1;
+
++ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
++ hub->quirk_disable_autosuspend = 1;
++ usb_autopm_get_interface(intf);
++ }
++
+ if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+ return 0;
+
+@@ -5484,6 +5490,10 @@ out_hdev_lock:
+ }
+
+ static const struct usb_device_id hub_id_table[] = {
++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
++ .idVendor = USB_VENDOR_SMSC,
++ .bInterfaceClass = USB_CLASS_HUB,
++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = USB_VENDOR_GENESYS_LOGIC,
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index a9e24e4b8df1..a97dd1ba964e 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -61,6 +61,7 @@ struct usb_hub {
+ unsigned quiescing:1;
+ unsigned disconnected:1;
+ unsigned in_reset:1;
++ unsigned quirk_disable_autosuspend:1;
+
+ unsigned quirk_check_port_auto_suspend:1;
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6b6413073584..2b24336a72e5 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
++ /* Sound Devices USBPre2 */
++ { USB_DEVICE(0x0926, 0x0202), .driver_info =
++ USB_QUIRK_ENDPOINT_BLACKLIST },
++
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* novation SoundControl XL */
++ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ { } /* terminating entry must be last */
+ };
+
+@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ { } /* terminating entry must be last */
+ };
+
++/*
++ * Entries for blacklisted endpoints that should be ignored when parsing
++ * configuration descriptors.
++ *
++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
++ */
++static const struct usb_device_id usb_endpoint_blacklist[] = {
++ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
++ { }
++};
++
++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++ struct usb_host_interface *intf,
++ struct usb_endpoint_descriptor *epd)
++{
++ const struct usb_device_id *id;
++ unsigned int address;
++
++ for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
++ if (!usb_match_device(udev, id))
++ continue;
++
++ if (!usb_match_one_id_intf(udev, intf, id))
++ continue;
++
++ address = id->driver_info;
++ if (address == epd->bEndpointAddress)
++ return true;
++ }
++
++ return false;
++}
++
+ static bool usb_match_any_interface(struct usb_device *udev,
+ const struct usb_device_id *id)
+ {
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index cf4783cf661a..3ad0ee57e859 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
+ extern void usb_detect_quirks(struct usb_device *udev);
+ extern void usb_detect_interface_quirks(struct usb_device *udev);
+ extern void usb_release_quirk_list(void);
++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++ struct usb_host_interface *intf,
++ struct usb_endpoint_descriptor *epd);
+ extern int usb_remove_device(struct usb_device *udev);
+
+ extern int usb_get_device_descriptor(struct usb_device *dev,
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index a9133773b89e..7fd0900a9cb0 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ else
+ packets = 1; /* send one packet if length is zero. */
+
+- if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+- dev_err(hsotg->dev, "req length > maxpacket*mc\n");
+- return;
+- }
+-
+ if (dir_in && index != 0)
+ if (hs_ep->isochronous)
+ epsize = DXEPTSIZ_MC(packets);
+@@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+
++ /* Don't queue ISOC request if length greater than mps*mc */
++ if (hs_ep->isochronous &&
++ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
++ dev_err(hs->dev, "req length > maxpacket*mc\n");
++ return -EINVAL;
++ }
++
+ /* In DDMA mode for ISOC's don't queue request if length greater
+ * than descriptor limits.
+ */
+@@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ struct dwc2_hsotg_ep *ep;
+ __le16 reply;
++ u16 status;
+ int ret;
+
+ dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+@@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+- /*
+- * bit 0 => self powered
+- * bit 1 => remote wakeup
+- */
+- reply = cpu_to_le16(0);
++ status = 1 << USB_DEVICE_SELF_POWERED;
++ status |= hsotg->remote_wakeup_allowed <<
++ USB_DEVICE_REMOTE_WAKEUP;
++ reply = cpu_to_le16(status);
+ break;
+
+ case USB_RECIP_INTERFACE:
+@@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ case USB_RECIP_DEVICE:
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+- hsotg->remote_wakeup_allowed = 1;
++ if (set)
++ hsotg->remote_wakeup_allowed = 1;
++ else
++ hsotg->remote_wakeup_allowed = 0;
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+@@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ return -EINVAL;
+
+ hsotg->test_mode = wIndex >> 8;
+- ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+- if (ret) {
+- dev_err(hsotg->dev,
+- "%s: failed to send reply\n", __func__);
+- return ret;
+- }
+ break;
+ default:
+ return -ENOENT;
+ }
++
++ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
++ if (ret) {
++ dev_err(hsotg->dev,
++ "%s: failed to send reply\n", __func__);
++ return ret;
++ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index 9baabed87d61..f2c97058a00b 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
+ u8 epnum = event->endpoint_number;
+ size_t len;
+ int status;
+- int ret;
+
+- ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
++ len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
+ (epnum & 1) ? "in" : "out");
+- if (ret < 0)
+- return "UNKNOWN";
+
+ status = event->status;
+
+ switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+- len = strlen(str);
+- snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
++ len += scnprintf(str + len, size - len,
++ "Transfer Complete (%c%c%c)",
+ status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ status & DEPEVT_STATUS_LST ? 'L' : 'l');
+
+- len = strlen(str);
+-
+ if (epnum <= 1)
+- snprintf(str + len, size - len, " [%s]",
++ scnprintf(str + len, size - len, " [%s]",
+ dwc3_ep0_state_string(ep0state));
+ break;
+ case DWC3_DEPEVT_XFERINPROGRESS:
+- len = strlen(str);
+-
+- snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
++ scnprintf(str + len, size - len,
++ "Transfer In Progress [%d] (%c%c%c)",
+ event->parameters,
+ status & DEPEVT_STATUS_SHORT ? 'S' : 's',
+ status & DEPEVT_STATUS_IOC ? 'I' : 'i',
+ status & DEPEVT_STATUS_LST ? 'M' : 'm');
+ break;
+ case DWC3_DEPEVT_XFERNOTREADY:
+- len = strlen(str);
+-
+- snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
++ len += scnprintf(str + len, size - len,
++ "Transfer Not Ready [%d]%s",
+ event->parameters,
+ status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
+ " (Active)" : " (Not Active)");
+
+- len = strlen(str);
+-
+ /* Control Endpoints */
+ if (epnum <= 1) {
+ int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+ switch (phase) {
+ case DEPEVT_STATUS_CONTROL_DATA:
+- snprintf(str + ret, size - ret,
++ scnprintf(str + len, size - len,
+ " [Data Phase]");
+ break;
+ case DEPEVT_STATUS_CONTROL_STATUS:
+- snprintf(str + ret, size - ret,
++ scnprintf(str + len, size - len,
+ " [Status Phase]");
+ }
+ }
+ break;
+ case DWC3_DEPEVT_RXTXFIFOEVT:
+- snprintf(str + ret, size - ret, "FIFO");
++ scnprintf(str + len, size - len, "FIFO");
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ status = event->status;
+
+ switch (status) {
+ case DEPEVT_STREAMEVT_FOUND:
+- snprintf(str + ret, size - ret, " Stream %d Found",
++ scnprintf(str + len, size - len, " Stream %d Found",
+ event->parameters);
+ break;
+ case DEPEVT_STREAMEVT_NOTFOUND:
+ default:
+- snprintf(str + ret, size - ret, " Stream Not Found");
++ scnprintf(str + len, size - len, " Stream Not Found");
+ break;
+ }
+
+ break;
+ case DWC3_DEPEVT_EPCMDCMPLT:
+- snprintf(str + ret, size - ret, "Endpoint Command Complete");
++ scnprintf(str + len, size - len, "Endpoint Command Complete");
+ break;
+ default:
+- snprintf(str, size, "UNKNOWN");
++ scnprintf(str + len, size - len, "UNKNOWN");
+ }
+
+ return str;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8b95be897078..e0cb1c2d5675 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2426,7 +2426,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
+ if (event->status & DEPEVT_STATUS_SHORT && !chain)
+ return 1;
+
+- if (event->status & DEPEVT_STATUS_IOC)
++ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
++ (trb->ctrl & DWC3_TRB_CTRL_LST))
+ return 1;
+
+ return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 5ec54b69c29c..0d45d7a4f949 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ val = CONFIG_USB_GADGET_VBUS_DRAW;
+ if (!val)
+ return 0;
+- switch (speed) {
+- case USB_SPEED_SUPER:
+- return DIV_ROUND_UP(val, 8);
+- default:
++ if (speed < USB_SPEED_SUPER)
+ return DIV_ROUND_UP(val, 2);
+- }
++ else
++ return DIV_ROUND_UP(val, 8);
+ }
+
+ static int config_buf(struct usb_configuration *config,
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 7a3a29e5e9d2..af92b2576fe9 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
+ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ u16 wLength)
+ {
++ struct xhci_port_cap *port_cap = NULL;
+ int i, ssa_count;
+ u32 temp;
+ u16 desc_size, ssp_cap_size, ssa_size = 0;
+@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
+
+ /* does xhci support USB 3.1 Enhanced SuperSpeed */
+- if (xhci->usb3_rhub.min_rev >= 0x01) {
++ for (i = 0; i < xhci->num_port_caps; i++) {
++ if (xhci->port_caps[i].maj_rev == 0x03 &&
++ xhci->port_caps[i].min_rev >= 0x01) {
++ usb3_1 = true;
++ port_cap = &xhci->port_caps[i];
++ break;
++ }
++ }
++
++ if (usb3_1) {
+ /* does xhci provide a PSI table for SSA speed attributes? */
+- if (xhci->usb3_rhub.psi_count) {
++ if (port_cap->psi_count) {
+ /* two SSA entries for each unique PSI ID, RX and TX */
+- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
++ ssa_count = port_cap->psi_uid_count * 2;
+ ssa_size = ssa_count * sizeof(u32);
+ ssp_cap_size -= 16; /* skip copying the default SSA */
+ }
+ desc_size += ssp_cap_size;
+- usb3_1 = true;
+ }
+ memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
+
+@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ }
+
+ /* If PSI table exists, add the custom speed attributes from it */
+- if (usb3_1 && xhci->usb3_rhub.psi_count) {
++ if (usb3_1 && port_cap->psi_count) {
+ u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
+ int offset;
+
+@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+
+ /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
+ bm_attrib = (ssa_count - 1) & 0x1f;
+- bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
++ bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
+ put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
+
+ if (wLength < desc_size + ssa_size)
+@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ * USB 3.1 requires two SSA entries (RX and TX) for every link
+ */
+ offset = desc_size;
+- for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+- psi = xhci->usb3_rhub.psi[i];
++ for (i = 0; i < port_cap->psi_count; i++) {
++ psi = port_cap->psi[i];
+ psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ psi_exp = XHCI_EXT_PORT_PSIE(psi);
+ psi_mant = XHCI_EXT_PORT_PSIM(psi);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 3b1388fa2f36..884c601bfa15 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ /* Allow 3 retries for everything but isoc, set CErr = 3 */
+ if (!usb_endpoint_xfer_isoc(&ep->desc))
+ err_count = 3;
+- /* Some devices get this wrong */
+- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
+- max_packet = 512;
++ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
++ if (usb_endpoint_xfer_bulk(&ep->desc)) {
++ if (udev->speed == USB_SPEED_HIGH)
++ max_packet = 512;
++ if (udev->speed == USB_SPEED_FULL) {
++ max_packet = rounddown_pow_of_two(max_packet);
++ max_packet = clamp_val(max_packet, 8, 64);
++ }
++ }
+ /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ avg_trb_len = 8;
+@@ -1909,17 +1915,17 @@ no_bw:
+ xhci->usb3_rhub.num_ports = 0;
+ xhci->num_active_eps = 0;
+ kfree(xhci->usb2_rhub.ports);
+- kfree(xhci->usb2_rhub.psi);
+ kfree(xhci->usb3_rhub.ports);
+- kfree(xhci->usb3_rhub.psi);
+ kfree(xhci->hw_ports);
+ kfree(xhci->rh_bw);
+ kfree(xhci->ext_caps);
++ for (i = 0; i < xhci->num_port_caps; i++)
++ kfree(xhci->port_caps[i].psi);
++ kfree(xhci->port_caps);
++ xhci->num_port_caps = 0;
+
+ xhci->usb2_rhub.ports = NULL;
+- xhci->usb2_rhub.psi = NULL;
+ xhci->usb3_rhub.ports = NULL;
+- xhci->usb3_rhub.psi = NULL;
+ xhci->hw_ports = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ u8 major_revision, minor_revision;
+ struct xhci_hub *rhub;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
++ struct xhci_port_cap *port_cap;
+
+ temp = readl(addr);
+ major_revision = XHCI_EXT_PORT_MAJOR(temp);
+@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ /* WTF? "Valid values are ‘1’ to MaxPorts" */
+ return;
+
+- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
+- if (rhub->psi_count) {
+- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
+- GFP_KERNEL, dev_to_node(dev));
+- if (!rhub->psi)
+- rhub->psi_count = 0;
++ port_cap = &xhci->port_caps[xhci->num_port_caps++];
++ if (xhci->num_port_caps > max_caps)
++ return;
++
++ port_cap->maj_rev = major_revision;
++ port_cap->min_rev = minor_revision;
++ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
+
+- rhub->psi_uid_count++;
+- for (i = 0; i < rhub->psi_count; i++) {
+- rhub->psi[i] = readl(addr + 4 + i);
++ if (port_cap->psi_count) {
++ port_cap->psi = kcalloc_node(port_cap->psi_count,
++ sizeof(*port_cap->psi),
++ GFP_KERNEL, dev_to_node(dev));
++ if (!port_cap->psi)
++ port_cap->psi_count = 0;
++
++ port_cap->psi_uid_count++;
++ for (i = 0; i < port_cap->psi_count; i++) {
++ port_cap->psi[i] = readl(addr + 4 + i);
+
+ /* count unique ID values, two consecutive entries can
+ * have the same ID if link is assymetric
+ */
+- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
+- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
+- rhub->psi_uid_count++;
++ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
++ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
++ port_cap->psi_uid_count++;
+
+ xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
+- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
+- XHCI_EXT_PORT_PLT(rhub->psi[i]),
+- XHCI_EXT_PORT_PFD(rhub->psi[i]),
+- XHCI_EXT_PORT_LP(rhub->psi[i]),
+- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
++ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
++ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
++ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
++ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
++ XHCI_EXT_PORT_LP(port_cap->psi[i]),
++ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
+ }
+ }
+ /* cache usb2 port capabilities */
+@@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ continue;
+ }
+ hw_port->rhub = rhub;
++ hw_port->port_cap = port_cap;
+ rhub->num_ports++;
+ }
+ /* FIXME: Should we disable ports not in the Extended Capabilities? */
+@@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+ if (!xhci->ext_caps)
+ return -ENOMEM;
+
++ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
++ flags, dev_to_node(dev));
++ if (!xhci->port_caps)
++ return -ENOMEM;
++
+ offset = cap_start;
+
+ while (offset) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4917c5b033fa..5e9b537df631 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -49,6 +49,7 @@
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
+ #define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
++#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
+
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+@@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
++ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
+ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return 0;
+
++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++ xhci_pme_acpi_rtd3_enable(pdev);
++
+ xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+ /* Find any debug ports */
+@@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
+- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+- xhci_pme_acpi_rtd3_enable(dev);
+-
+ /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ pm_runtime_put_noidle(&dev->dev);
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 4a2fe56940bd..f7a190fb2353 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2740,6 +2740,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
+ return 1;
+ }
+
++/*
++ * Update Event Ring Dequeue Pointer:
++ * - When all events have finished
++ * - To avoid "Event Ring Full Error" condition
++ */
++static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
++ union xhci_trb *event_ring_deq)
++{
++ u64 temp_64;
++ dma_addr_t deq;
++
++ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
++ /* If necessary, update the HW's version of the event ring deq ptr. */
++ if (event_ring_deq != xhci->event_ring->dequeue) {
++ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
++ xhci->event_ring->dequeue);
++ if (deq == 0)
++ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
++ /*
++ * Per 4.9.4, Software writes to the ERDP register shall
++ * always advance the Event Ring Dequeue Pointer value.
++ */
++ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
++ ((u64) deq & (u64) ~ERST_PTR_MASK))
++ return;
++
++ /* Update HC event ring dequeue pointer */
++ temp_64 &= ERST_PTR_MASK;
++ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
++ }
++
++ /* Clear the event handler busy flag (RW1C) */
++ temp_64 |= ERST_EHB;
++ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
++}
++
+ /*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
+@@ -2751,9 +2787,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ union xhci_trb *event_ring_deq;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+- dma_addr_t deq;
+ u64 temp_64;
+ u32 status;
++ int event_loop = 0;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Check if the xHC generated the interrupt, or the irq is shared */
+@@ -2807,24 +2843,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ /* FIXME this should be a delayed service routine
+ * that clears the EHB.
+ */
+- while (xhci_handle_event(xhci) > 0) {}
+-
+- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+- /* If necessary, update the HW's version of the event ring deq ptr. */
+- if (event_ring_deq != xhci->event_ring->dequeue) {
+- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+- xhci->event_ring->dequeue);
+- if (deq == 0)
+- xhci_warn(xhci, "WARN something wrong with SW event "
+- "ring dequeue ptr.\n");
+- /* Update HC event ring dequeue pointer */
+- temp_64 &= ERST_PTR_MASK;
+- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
++ while (xhci_handle_event(xhci) > 0) {
++ if (event_loop++ < TRBS_PER_SEGMENT / 2)
++ continue;
++ xhci_update_erst_dequeue(xhci, event_ring_deq);
++ event_loop = 0;
+ }
+
+- /* Clear the event handler busy flag (RW1C); event ring is empty. */
+- temp_64 |= ERST_EHB;
+- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
++ xhci_update_erst_dequeue(xhci, event_ring_deq);
+ ret = IRQ_HANDLED;
+
+ out:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 973d665052a2..98b98a0cd2a8 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1702,12 +1702,20 @@ struct xhci_bus_state {
+ * Intel Lynx Point LP xHCI host.
+ */
+ #define XHCI_MAX_REXIT_TIMEOUT_MS 20
++struct xhci_port_cap {
++ u32 *psi; /* array of protocol speed ID entries */
++ u8 psi_count;
++ u8 psi_uid_count;
++ u8 maj_rev;
++ u8 min_rev;
++};
+
+ struct xhci_port {
+ __le32 __iomem *addr;
+ int hw_portnum;
+ int hcd_portnum;
+ struct xhci_hub *rhub;
++ struct xhci_port_cap *port_cap;
+ };
+
+ struct xhci_hub {
+@@ -1719,9 +1727,6 @@ struct xhci_hub {
+ /* supported prococol extended capabiliy values */
+ u8 maj_rev;
+ u8 min_rev;
+- u32 *psi; /* array of protocol speed ID entries */
+- u8 psi_count;
+- u8 psi_uid_count;
+ };
+
+ /* There is one xhci_hcd structure per controller */
+@@ -1880,6 +1885,9 @@ struct xhci_hcd {
+ /* cached usb2 extened protocol capabilites */
+ u32 *ext_caps;
+ unsigned int num_ext_caps;
++ /* cached extended protocol port capabilities */
++ struct xhci_port_cap *port_caps;
++ unsigned int num_port_caps;
+ /* Compliance Mode Recovery Data */
+ struct timer_list comp_mode_recovery_timer;
+ u32 port_status_u0;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index dce44fbf031f..dce20301e367 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -33,6 +33,14 @@
+ #define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
+ /* full speed iowarrior */
+ #define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
++/* fuller speed iowarrior */
++#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
++#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
++#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
++
++/* OEMed devices */
++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
++#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
+
+ /* Get a minor range for your devices from the usb maintainer */
+ #ifdef CONFIG_USB_DYNAMIC_MINORS
+@@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
++ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
+ {} /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, iowarrior_ids);
+@@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
+ }
+ switch (dev->product_id) {
+ case USB_DEVICE_ID_CODEMERCS_IOW24:
++ case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
+ case USB_DEVICE_ID_CODEMERCS_IOWPV1:
+ case USB_DEVICE_ID_CODEMERCS_IOWPV2:
+ case USB_DEVICE_ID_CODEMERCS_IOW40:
+@@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
+ goto exit;
+ break;
+ case USB_DEVICE_ID_CODEMERCS_IOW56:
++ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
++ case USB_DEVICE_ID_CODEMERCS_IOW28:
++ case USB_DEVICE_ID_CODEMERCS_IOW28L:
++ case USB_DEVICE_ID_CODEMERCS_IOW100:
+ /* The IOW56 uses asynchronous IO and more urbs */
+ if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
+ /* Wait until we are below the limit for submitted urbs */
+@@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
+ switch (cmd) {
+ case IOW_WRITE:
+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
++ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
+ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
+ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
+ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
+@@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ goto error;
+ }
+
+- if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
++ if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
+ res = usb_find_last_int_out_endpoint(iface_desc,
+ &dev->int_out_endpoint);
+ if (res) {
+@@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
+ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
+ if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
+- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
++ ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
+ /* IOWarrior56 has wMaxPacketSize different from report size */
+ dev->report_size = 7;
+
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 475b9c692827..bb2198496f42 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -45,6 +45,7 @@ struct uas_dev_info {
+ struct scsi_cmnd *cmnd[MAX_CMNDS];
+ spinlock_t lock;
+ struct work_struct work;
++ struct work_struct scan_work; /* for async scanning */
+ };
+
+ enum {
+@@ -114,6 +115,17 @@ out:
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ }
+
++static void uas_scan_work(struct work_struct *work)
++{
++ struct uas_dev_info *devinfo =
++ container_of(work, struct uas_dev_info, scan_work);
++ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
++
++ dev_dbg(&devinfo->intf->dev, "starting scan\n");
++ scsi_scan_host(shost);
++ dev_dbg(&devinfo->intf->dev, "scan complete\n");
++}
++
+ static void uas_add_work(struct uas_cmd_info *cmdinfo)
+ {
+ struct scsi_pointer *scp = (void *)cmdinfo;
+@@ -983,6 +995,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ init_usb_anchor(&devinfo->data_urbs);
+ spin_lock_init(&devinfo->lock);
+ INIT_WORK(&devinfo->work, uas_do_work);
++ INIT_WORK(&devinfo->scan_work, uas_scan_work);
+
+ result = uas_configure_endpoints(devinfo);
+ if (result)
+@@ -999,7 +1012,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ if (result)
+ goto free_streams;
+
+- scsi_scan_host(shost);
++ /* Submit the delayed_work for SCSI-device scanning */
++ schedule_work(&devinfo->scan_work);
++
+ return result;
+
+ free_streams:
+@@ -1167,6 +1182,12 @@ static void uas_disconnect(struct usb_interface *intf)
+ usb_kill_anchored_urbs(&devinfo->data_urbs);
+ uas_zap_pending(devinfo, DID_NO_CONNECT);
+
++ /*
++ * Prevent SCSI scanning (if it hasn't started yet)
++ * or wait for the SCSI-scanning routine to stop.
++ */
++ cancel_work_sync(&devinfo->scan_work);
++
+ scsi_remove_host(shost);
+ uas_free_streams(devinfo);
+ scsi_host_put(shost);
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index 8b9919c26095..456a164364a2 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ * cpu.
+ */
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+- _cond_resched();
++ local_irq_enable();
++ cond_resched();
++ local_irq_disable();
+ __this_cpu_write(xen_in_preemptible_hcall, true);
+ }
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index b0ccca5d08b5..5cdd1b51285b 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3203,6 +3203,7 @@ retry_root_backup:
+ if (IS_ERR(fs_info->fs_root)) {
+ err = PTR_ERR(fs_info->fs_root);
+ btrfs_warn(fs_info, "failed to read fs tree: %d", err);
++ fs_info->fs_root = NULL;
+ goto fail_qgroup;
+ }
+
+@@ -4293,6 +4294,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ cond_resched();
+ spin_lock(&delayed_refs->lock);
+ }
++ btrfs_qgroup_destroy_extent_records(trans);
+
+ spin_unlock(&delayed_refs->lock);
+
+@@ -4518,7 +4520,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ wake_up(&fs_info->transaction_wait);
+
+ btrfs_destroy_delayed_inodes(fs_info);
+- btrfs_assert_delayed_root_empty(fs_info);
+
+ btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
+ EXTENT_DIRTY);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index dc50605ecbda..47ecf7216b3e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4411,6 +4411,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
+
+ ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
+ offset, ins, 1);
++ if (ret)
++ btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
+ btrfs_put_block_group(block_group);
+ return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index b83eef445db3..50feb01f27f3 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4734,6 +4734,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ u64 bytes_deleted = 0;
+ bool be_nice = false;
+ bool should_throttle = false;
++ const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
++ struct extent_state *cached_state = NULL;
+
+ BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
+
+@@ -4750,6 +4752,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ return -ENOMEM;
+ path->reada = READA_BACK;
+
++ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
++ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
++ &cached_state);
++
+ /*
+ * We want to drop from the next block forward in case this new size is
+ * not block aligned since we will be keeping the last block of the
+@@ -4786,7 +4792,6 @@ search_again:
+ goto out;
+ }
+
+- path->leave_spinning = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+@@ -4938,7 +4943,6 @@ delete:
+ root == fs_info->tree_root)) {
+ struct btrfs_ref ref = { 0 };
+
+- btrfs_set_path_blocking(path);
+ bytes_deleted += extent_num_bytes;
+
+ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
+@@ -5014,6 +5018,8 @@ out:
+ if (!ret && last_size > new_size)
+ last_size = new_size;
+ btrfs_ordered_update_i_size(inode, last_size, NULL);
++ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
++ (u64)-1, &cached_state);
+ }
+
+ btrfs_free_path(path);
+@@ -10464,6 +10470,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_key ins;
+ u64 cur_offset = start;
++ u64 clear_offset = start;
+ u64 i_size;
+ u64 cur_bytes;
+ u64 last_alloc = (u64)-1;
+@@ -10498,6 +10505,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ btrfs_end_transaction(trans);
+ break;
+ }
++
++ /*
++ * We've reserved this space, and thus converted it from
++ * ->bytes_may_use to ->bytes_reserved. Any error that happens
++ * from here on out we will only need to clear our reservation
++ * for the remaining unreserved area, so advance our
++ * clear_offset by our extent size.
++ */
++ clear_offset += ins.offset;
+ btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+
+ last_alloc = ins.offset;
+@@ -10578,9 +10594,9 @@ next:
+ if (own_trans)
+ btrfs_end_transaction(trans);
+ }
+- if (cur_offset < end)
+- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
+- end - cur_offset + 1);
++ if (clear_offset < end)
++ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
++ end - clear_offset + 1);
+ return ret;
+ }
+
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 6240a5a1f2c0..00e1ef4f7979 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -690,10 +690,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+ }
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ end = ordered->file_offset;
++ /*
++ * If the ordered extent had an error save the error but don't
++ * exit without waiting first for all other ordered extents in
++ * the range to complete.
++ */
+ if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ ret = -EIO;
+ btrfs_put_ordered_extent(ordered);
+- if (ret || end == 0 || end == start)
++ if (end == 0 || end == start)
+ break;
+ end--;
+ }
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 50517221638a..286c8c11c8d3 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -4018,3 +4018,16 @@ out:
+ }
+ return ret;
+ }
++
++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
++{
++ struct btrfs_qgroup_extent_record *entry;
++ struct btrfs_qgroup_extent_record *next;
++ struct rb_root *root;
++
++ root = &trans->delayed_refs.dirty_extent_root;
++ rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
++ ulist_free(entry->old_roots);
++ kfree(entry);
++ }
++}
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 46ba7bd2961c..17e8ac992c50 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
+ u64 last_snapshot);
+ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct extent_buffer *eb);
++void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
+
+ #endif
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index ceffec752234..98b6903e3938 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -51,6 +51,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
+ BUG_ON(!list_empty(&transaction->list));
+ WARN_ON(!RB_EMPTY_ROOT(
+ &transaction->delayed_refs.href_root.rb_root));
++ WARN_ON(!RB_EMPTY_ROOT(
++ &transaction->delayed_refs.dirty_extent_root));
+ if (transaction->delayed_refs.pending_csums)
+ btrfs_err(transaction->fs_info,
+ "pending csums is %llu",
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index f91db24bbf3b..a064b408d841 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
+ struct extent_crypt_result ecr;
+ int rc = 0;
+
+- BUG_ON(!crypt_stat || !crypt_stat->tfm
+- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
++ if (!crypt_stat || !crypt_stat->tfm
++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
++ return -EINVAL;
++
+ if (unlikely(ecryptfs_verbosity > 0)) {
+ ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+ crypt_stat->key_size);
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 216fbe6a4837..4dc09638de8f 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
+ printk(KERN_WARNING "Tag 1 packet contains key larger "
+ "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
+ rc = -EINVAL;
+- goto out;
++ goto out_free;
+ }
+ memcpy((*new_auth_tok)->session_key.encrypted_key,
+ &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
+diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
+index d668e60b85b5..c05ca39aa449 100644
+--- a/fs/ecryptfs/messaging.c
++++ b/fs/ecryptfs/messaging.c
+@@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
+ * ecryptfs_message_buf_len),
+ GFP_KERNEL);
+ if (!ecryptfs_msg_ctx_arr) {
++ kfree(ecryptfs_daemon_hash);
+ rc = -ENOMEM;
+ goto out;
+ }
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 0b202e00d93f..5aba67a504cf 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct buffer_head *bh_p;
+
+ if (block_group >= ngroups) {
+ ext4_error(sb, "block_group >= groups_count - block_group = %u,"
+@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+
+ group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+- if (!sbi->s_group_desc[group_desc]) {
++ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
++ /*
++ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
++ * the pointer being dereferenced won't be dereferenced again. By
++ * looking at the usage in add_new_gdb() the value isn't modified,
++ * just the pointer, and so it remains valid.
++ */
++ if (!bh_p) {
+ ext4_error(sb, "Group descriptor not loaded - "
+ "block_group = %u, group_desc = %u, desc = %u",
+ block_group, group_desc, offset);
+@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ }
+
+ desc = (struct ext4_group_desc *)(
+- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
++ (__u8 *)bh_p->b_data +
+ offset * EXT4_DESC_SIZE(sb));
+ if (bh)
+- *bh = sbi->s_group_desc[group_desc];
++ *bh = bh_p;
+ return desc;
+ }
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e2f65b565c1f..d576addfdd03 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1396,7 +1396,7 @@ struct ext4_sb_info {
+ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
+ struct buffer_head * s_sbh; /* Buffer containing the super block */
+ struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
+- struct buffer_head **s_group_desc;
++ struct buffer_head * __rcu *s_group_desc;
+ unsigned int s_mount_opt;
+ unsigned int s_mount_opt2;
+ unsigned int s_mount_flags;
+@@ -1458,7 +1458,7 @@ struct ext4_sb_info {
+ #endif
+
+ /* for buddy allocator */
+- struct ext4_group_info ***s_group_info;
++ struct ext4_group_info ** __rcu *s_group_info;
+ struct inode *s_buddy_cache;
+ spinlock_t s_md_lock;
+ unsigned short *s_mb_offsets;
+@@ -1508,7 +1508,7 @@ struct ext4_sb_info {
+ unsigned int s_extent_max_zeroout_kb;
+
+ unsigned int s_log_groups_per_flex;
+- struct flex_groups *s_flex_groups;
++ struct flex_groups * __rcu *s_flex_groups;
+ ext4_group_t s_flex_groups_allocated;
+
+ /* workqueue for reserved extent conversions (buffered io) */
+@@ -1548,8 +1548,11 @@ struct ext4_sb_info {
+ struct ratelimit_state s_warning_ratelimit_state;
+ struct ratelimit_state s_msg_ratelimit_state;
+
+- /* Barrier between changing inodes' journal flags and writepages ops. */
+- struct percpu_rw_semaphore s_journal_flag_rwsem;
++ /*
++ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
++ * or EXTENTS flag.
++ */
++ struct percpu_rw_semaphore s_writepages_rwsem;
+ struct dax_device *s_daxdev;
+ };
+
+@@ -1569,6 +1572,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+ ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
+ }
+
++/*
++ * Returns: sbi->field[index]
++ * Used to access an array element from the following sbi fields which require
++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
++ * - s_group_desc
++ * - s_group_info
++ * - s_flex_group
++ */
++#define sbi_array_rcu_deref(sbi, field, index) \
++({ \
++ typeof(*((sbi)->field)) _v; \
++ rcu_read_lock(); \
++ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
++ rcu_read_unlock(); \
++ _v; \
++})
++
+ /*
+ * Inode dynamic state flags
+ */
+@@ -2666,6 +2686,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
+ extern bool ext4_empty_dir(struct inode *inode);
+
+ /* resize.c */
++extern void ext4_kvfree_array_rcu(void *to_free);
+ extern int ext4_group_add(struct super_block *sb,
+ struct ext4_new_group_data *input);
+ extern int ext4_group_extend(struct super_block *sb,
+@@ -2913,13 +2934,13 @@ static inline
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group)
+ {
+- struct ext4_group_info ***grp_info;
++ struct ext4_group_info **grp_info;
+ long indexv, indexh;
+ BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
+- grp_info = EXT4_SB(sb)->s_group_info;
+ indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+ indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+- return grp_info[indexv][indexh];
++ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++ return grp_info[indexh];
+ }
+
+ /*
+@@ -2969,7 +2990,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
+ !inode_is_locked(inode));
+ down_write(&EXT4_I(inode)->i_data_sem);
+ if (newsize > EXT4_I(inode)->i_disksize)
+- EXT4_I(inode)->i_disksize = newsize;
++ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
+
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 564e2ceb8417..a6288730210e 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -325,11 +325,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+
+ percpu_counter_inc(&sbi->s_freeinodes_counter);
+ if (sbi->s_log_groups_per_flex) {
+- ext4_group_t f = ext4_flex_group(sbi, block_group);
++ struct flex_groups *fg;
+
+- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
++ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
++ ext4_flex_group(sbi, block_group));
++ atomic_inc(&fg->free_inodes);
+ if (is_directory)
+- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
++ atomic_dec(&fg->used_dirs);
+ }
+ BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+ fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+@@ -365,12 +367,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ int flex_size, struct orlov_stats *stats)
+ {
+ struct ext4_group_desc *desc;
+- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
+
+ if (flex_size > 1) {
+- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
++ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
++ s_flex_groups, g);
++ stats->free_inodes = atomic_read(&fg->free_inodes);
++ stats->free_clusters = atomic64_read(&fg->free_clusters);
++ stats->used_dirs = atomic_read(&fg->used_dirs);
+ return;
+ }
+
+@@ -1051,7 +1054,8 @@ got:
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t f = ext4_flex_group(sbi, group);
+
+- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
++ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
++ f)->used_dirs);
+ }
+ }
+ if (ext4_has_group_desc_csum(sb)) {
+@@ -1074,7 +1078,8 @@ got:
+
+ if (sbi->s_log_groups_per_flex) {
+ flex_group = ext4_flex_group(sbi, group);
+- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
++ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
++ flex_group)->free_inodes);
+ }
+
+ inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 76a38ef5f226..70ef4a714b33 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2573,7 +2573,7 @@ update_disksize:
+ * truncate are avoided by checking i_size under i_data_sem.
+ */
+ disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
+- if (disksize > EXT4_I(inode)->i_disksize) {
++ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
+ int err2;
+ loff_t i_size;
+
+@@ -2734,7 +2734,7 @@ static int ext4_writepages(struct address_space *mapping,
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
+- percpu_down_read(&sbi->s_journal_flag_rwsem);
++ percpu_down_read(&sbi->s_writepages_rwsem);
+ trace_ext4_writepages(inode, wbc);
+
+ /*
+@@ -2955,7 +2955,7 @@ unplug:
+ out_writepages:
+ trace_ext4_writepages_result(inode, wbc, ret,
+ nr_to_write - wbc->nr_to_write);
+- percpu_up_read(&sbi->s_journal_flag_rwsem);
++ percpu_up_read(&sbi->s_writepages_rwsem);
+ return ret;
+ }
+
+@@ -2970,13 +2970,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
+- percpu_down_read(&sbi->s_journal_flag_rwsem);
++ percpu_down_read(&sbi->s_writepages_rwsem);
+ trace_ext4_writepages(inode, wbc);
+
+ ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ trace_ext4_writepages_result(inode, wbc, ret,
+ nr_to_write - wbc->nr_to_write);
+- percpu_up_read(&sbi->s_journal_flag_rwsem);
++ percpu_up_read(&sbi->s_writepages_rwsem);
+ return ret;
+ }
+
+@@ -6185,7 +6185,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ }
+ }
+
+- percpu_down_write(&sbi->s_journal_flag_rwsem);
++ percpu_down_write(&sbi->s_writepages_rwsem);
+ jbd2_journal_lock_updates(journal);
+
+ /*
+@@ -6202,7 +6202,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ err = jbd2_journal_flush(journal);
+ if (err < 0) {
+ jbd2_journal_unlock_updates(journal);
+- percpu_up_write(&sbi->s_journal_flag_rwsem);
++ percpu_up_write(&sbi->s_writepages_rwsem);
+ return err;
+ }
+ ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+@@ -6210,7 +6210,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ ext4_set_aops(inode);
+
+ jbd2_journal_unlock_updates(journal);
+- percpu_up_write(&sbi->s_journal_flag_rwsem);
++ percpu_up_write(&sbi->s_writepages_rwsem);
+
+ if (val)
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index a3e2767bdf2f..c76ffc259d19 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ unsigned size;
+- struct ext4_group_info ***new_groupinfo;
++ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
+
+ size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ EXT4_DESC_PER_BLOCK_BITS(sb);
+@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
+ return -ENOMEM;
+ }
+- if (sbi->s_group_info) {
+- memcpy(new_groupinfo, sbi->s_group_info,
++ rcu_read_lock();
++ old_groupinfo = rcu_dereference(sbi->s_group_info);
++ if (old_groupinfo)
++ memcpy(new_groupinfo, old_groupinfo,
+ sbi->s_group_info_size * sizeof(*sbi->s_group_info));
+- kvfree(sbi->s_group_info);
+- }
+- sbi->s_group_info = new_groupinfo;
++ rcu_read_unlock();
++ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
+ sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
++ if (old_groupinfo)
++ ext4_kvfree_array_rcu(old_groupinfo);
+ ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
+ sbi->s_group_info_size);
+ return 0;
+@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ {
+ int i;
+ int metalen = 0;
++ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_info **meta_group_info;
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ "for a buddy group");
+ goto exit_meta_group_info;
+ }
+- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
+- meta_group_info;
++ rcu_read_lock();
++ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
++ rcu_read_unlock();
+ }
+
+- meta_group_info =
+- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
++ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
+ i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+
+ meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
+@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ exit_group_info:
+ /* If a meta_group_info table has been allocated, release it now */
+ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
+- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
++ struct ext4_group_info ***group_info;
++
++ rcu_read_lock();
++ group_info = rcu_dereference(sbi->s_group_info);
++ kfree(group_info[idx]);
++ group_info[idx] = NULL;
++ rcu_read_unlock();
+ }
+ exit_meta_group_info:
+ return -ENOMEM;
+@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int err;
+ struct ext4_group_desc *desc;
++ struct ext4_group_info ***group_info;
+ struct kmem_cache *cachep;
+
+ err = ext4_mb_alloc_groupinfo(sb, ngroups);
+@@ -2507,11 +2517,16 @@ err_freebuddy:
+ while (i-- > 0)
+ kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+ i = sbi->s_group_info_size;
++ rcu_read_lock();
++ group_info = rcu_dereference(sbi->s_group_info);
+ while (i-- > 0)
+- kfree(sbi->s_group_info[i]);
++ kfree(group_info[i]);
++ rcu_read_unlock();
+ iput(sbi->s_buddy_cache);
+ err_freesgi:
+- kvfree(sbi->s_group_info);
++ rcu_read_lock();
++ kvfree(rcu_dereference(sbi->s_group_info));
++ rcu_read_unlock();
+ return -ENOMEM;
+ }
+
+@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ ext4_group_t i;
+ int num_meta_group_infos;
+- struct ext4_group_info *grinfo;
++ struct ext4_group_info *grinfo, ***group_info;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+
+@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
+ num_meta_group_infos = (ngroups +
+ EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ EXT4_DESC_PER_BLOCK_BITS(sb);
++ rcu_read_lock();
++ group_info = rcu_dereference(sbi->s_group_info);
+ for (i = 0; i < num_meta_group_infos; i++)
+- kfree(sbi->s_group_info[i]);
+- kvfree(sbi->s_group_info);
++ kfree(group_info[i]);
++ kvfree(group_info);
++ rcu_read_unlock();
+ }
+ kfree(sbi->s_mb_offsets);
+ kfree(sbi->s_mb_maxs);
+@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ ext4_group_t flex_group = ext4_flex_group(sbi,
+ ac->ac_b_ex.fe_group);
+ atomic64_sub(ac->ac_b_ex.fe_len,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ &sbi_array_rcu_deref(sbi, s_flex_groups,
++ flex_group)->free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -4914,7 +4933,8 @@ do_more:
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ atomic64_add(count_clusters,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ &sbi_array_rcu_deref(sbi, s_flex_groups,
++ flex_group)->free_clusters);
+ }
+
+ /*
+@@ -5071,7 +5091,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ atomic64_add(clusters_freed,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ &sbi_array_rcu_deref(sbi, s_flex_groups,
++ flex_group)->free_clusters);
+ }
+
+ ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index b1e4d359f73b..be4ee3dcc5cf 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
+
+ int ext4_ext_migrate(struct inode *inode)
+ {
++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ handle_t *handle;
+ int retval = 0, i;
+ __le32 *i_data;
+@@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
+ */
+ return retval;
+
++ percpu_down_write(&sbi->s_writepages_rwsem);
++
+ /*
+ * Worst case we can touch the allocation bitmaps, a bgd
+ * block, and a block to link in the orphan list. We do need
+@@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
+
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+- return retval;
++ goto out_unlock;
+ }
+ goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
+ EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
+@@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
+ if (IS_ERR(tmp_inode)) {
+ retval = PTR_ERR(tmp_inode);
+ ext4_journal_stop(handle);
+- return retval;
++ goto out_unlock;
+ }
+ i_size_write(tmp_inode, i_size_read(inode));
+ /*
+@@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
+ */
+ ext4_orphan_del(NULL, tmp_inode);
+ retval = PTR_ERR(handle);
+- goto out;
++ goto out_tmp_inode;
+ }
+
+ ei = EXT4_I(inode);
+@@ -595,10 +598,11 @@ err_out:
+ /* Reset the extent details */
+ ext4_ext_tree_init(handle, tmp_inode);
+ ext4_journal_stop(handle);
+-out:
++out_tmp_inode:
+ unlock_new_inode(tmp_inode);
+ iput(tmp_inode);
+-
++out_unlock:
++ percpu_up_write(&sbi->s_writepages_rwsem);
+ return retval;
+ }
+
+@@ -608,7 +612,8 @@ out:
+ int ext4_ind_migrate(struct inode *inode)
+ {
+ struct ext4_extent_header *eh;
+- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++ struct ext4_super_block *es = sbi->s_es;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_extent *ex;
+ unsigned int i, len;
+@@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
+ if (test_opt(inode->i_sb, DELALLOC))
+ ext4_alloc_da_blocks(inode);
+
++ percpu_down_write(&sbi->s_writepages_rwsem);
++
+ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
++ if (IS_ERR(handle)) {
++ ret = PTR_ERR(handle);
++ goto out_unlock;
++ }
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ret = ext4_ext_check_inode(inode);
+@@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
+ errout:
+ ext4_journal_stop(handle);
+ up_write(&EXT4_I(inode)->i_data_sem);
++out_unlock:
++ percpu_up_write(&sbi->s_writepages_rwsem);
+ return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 94d84910dc1e..a564d0289a70 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1507,6 +1507,7 @@ restart:
+ /*
+ * We deal with the read-ahead logic here.
+ */
++ cond_resched();
+ if (ra_ptr >= ra_max) {
+ /* Refill the readahead buffer */
+ ra_ptr = 0;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index c0e9aef376a7..080e25f6ef56 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -17,6 +17,33 @@
+
+ #include "ext4_jbd2.h"
+
++struct ext4_rcu_ptr {
++ struct rcu_head rcu;
++ void *ptr;
++};
++
++static void ext4_rcu_ptr_callback(struct rcu_head *head)
++{
++ struct ext4_rcu_ptr *ptr;
++
++ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
++ kvfree(ptr->ptr);
++ kfree(ptr);
++}
++
++void ext4_kvfree_array_rcu(void *to_free)
++{
++ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
++
++ if (ptr) {
++ ptr->ptr = to_free;
++ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
++ return;
++ }
++ synchronize_rcu();
++ kvfree(to_free);
++}
++
+ int ext4_resize_begin(struct super_block *sb)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ brelse(gdb);
+ goto out;
+ }
+- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+- gdb->b_size);
++ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
++ s_group_desc, j)->b_data, gdb->b_size);
+ set_buffer_uptodate(gdb);
+
+ err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+@@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ }
+ brelse(dind);
+
+- o_group_desc = EXT4_SB(sb)->s_group_desc;
++ rcu_read_lock();
++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ memcpy(n_group_desc, o_group_desc,
+ EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++ rcu_read_unlock();
+ n_group_desc[gdb_num] = gdb_bh;
+- EXT4_SB(sb)->s_group_desc = n_group_desc;
++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ EXT4_SB(sb)->s_gdb_count++;
+- kvfree(o_group_desc);
++ ext4_kvfree_array_rcu(o_group_desc);
+
+ le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
+ err = ext4_handle_dirty_super(handle, sb);
+@@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ return err;
+ }
+
+- o_group_desc = EXT4_SB(sb)->s_group_desc;
++ rcu_read_lock();
++ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ memcpy(n_group_desc, o_group_desc,
+ EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++ rcu_read_unlock();
+ n_group_desc[gdb_num] = gdb_bh;
+
+ BUFFER_TRACE(gdb_bh, "get_write_access");
+@@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ return err;
+ }
+
+- EXT4_SB(sb)->s_group_desc = n_group_desc;
++ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ EXT4_SB(sb)->s_gdb_count++;
+- kvfree(o_group_desc);
++ ext4_kvfree_array_rcu(o_group_desc);
+ return err;
+ }
+
+@@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
+ * use non-sparse filesystems anymore. This is already checked above.
+ */
+ if (gdb_off) {
+- gdb_bh = sbi->s_group_desc[gdb_num];
++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++ gdb_num);
+ BUFFER_TRACE(gdb_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, gdb_bh);
+
+@@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+ /*
+ * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
+ */
+- gdb_bh = sbi->s_group_desc[gdb_num];
++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
+ /* Update group descriptor block for new group */
+ gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
+ gdb_off * EXT4_DESC_SIZE(sb));
+@@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
+ percpu_counter_read(&sbi->s_freeclusters_counter));
+ if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group;
++ struct flex_groups *fg;
++
+ flex_group = ext4_flex_group(sbi, group_data[0].group);
++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+ atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ &fg->free_clusters);
+ atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+- &sbi->s_flex_groups[flex_group].free_inodes);
++ &fg->free_inodes);
+ }
+
+ /*
+@@ -1519,7 +1554,8 @@ exit_journal:
+ for (; gdb_num <= gdb_num_end; gdb_num++) {
+ struct buffer_head *gdb_bh;
+
+- gdb_bh = sbi->s_group_desc[gdb_num];
++ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++ gdb_num);
+ if (old_gdb == gdb_bh->b_blocknr)
+ continue;
+ update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 914230e63054..3ca604807839 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -970,6 +970,8 @@ static void ext4_put_super(struct super_block *sb)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
++ struct buffer_head **group_desc;
++ struct flex_groups **flex_groups;
+ int aborted = 0;
+ int i, err;
+
+@@ -1000,15 +1002,23 @@ static void ext4_put_super(struct super_block *sb)
+ if (!sb_rdonly(sb))
+ ext4_commit_super(sb, 1);
+
++ rcu_read_lock();
++ group_desc = rcu_dereference(sbi->s_group_desc);
+ for (i = 0; i < sbi->s_gdb_count; i++)
+- brelse(sbi->s_group_desc[i]);
+- kvfree(sbi->s_group_desc);
+- kvfree(sbi->s_flex_groups);
++ brelse(group_desc[i]);
++ kvfree(group_desc);
++ flex_groups = rcu_dereference(sbi->s_flex_groups);
++ if (flex_groups) {
++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++ kvfree(flex_groups[i]);
++ kvfree(flex_groups);
++ }
++ rcu_read_unlock();
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ #ifdef CONFIG_QUOTA
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(get_qf_name(sb, sbi, i));
+@@ -2332,8 +2342,8 @@ done:
+ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- struct flex_groups *new_groups;
+- int size;
++ struct flex_groups **old_groups, **new_groups;
++ int size, i;
+
+ if (!sbi->s_log_groups_per_flex)
+ return 0;
+@@ -2342,22 +2352,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ if (size <= sbi->s_flex_groups_allocated)
+ return 0;
+
+- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+- new_groups = kvzalloc(size, GFP_KERNEL);
++ new_groups = kvzalloc(roundup_pow_of_two(size *
++ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
+ if (!new_groups) {
+- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+- size / (int) sizeof(struct flex_groups));
++ ext4_msg(sb, KERN_ERR,
++ "not enough memory for %d flex group pointers", size);
+ return -ENOMEM;
+ }
+-
+- if (sbi->s_flex_groups) {
+- memcpy(new_groups, sbi->s_flex_groups,
+- (sbi->s_flex_groups_allocated *
+- sizeof(struct flex_groups)));
+- kvfree(sbi->s_flex_groups);
++ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
++ new_groups[i] = kvzalloc(roundup_pow_of_two(
++ sizeof(struct flex_groups)),
++ GFP_KERNEL);
++ if (!new_groups[i]) {
++ for (i--; i >= sbi->s_flex_groups_allocated; i--)
++ kvfree(new_groups[i]);
++ kvfree(new_groups);
++ ext4_msg(sb, KERN_ERR,
++ "not enough memory for %d flex groups", size);
++ return -ENOMEM;
++ }
+ }
+- sbi->s_flex_groups = new_groups;
+- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
++ rcu_read_lock();
++ old_groups = rcu_dereference(sbi->s_flex_groups);
++ if (old_groups)
++ memcpy(new_groups, old_groups,
++ (sbi->s_flex_groups_allocated *
++ sizeof(struct flex_groups *)));
++ rcu_read_unlock();
++ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
++ sbi->s_flex_groups_allocated = size;
++ if (old_groups)
++ ext4_kvfree_array_rcu(old_groups);
+ return 0;
+ }
+
+@@ -2365,6 +2390,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_desc *gdp = NULL;
++ struct flex_groups *fg;
+ ext4_group_t flex_group;
+ int i, err;
+
+@@ -2382,12 +2408,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ gdp = ext4_get_group_desc(sb, i, NULL);
+
+ flex_group = ext4_flex_group(sbi, i);
+- atomic_add(ext4_free_inodes_count(sb, gdp),
+- &sbi->s_flex_groups[flex_group].free_inodes);
++ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
++ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
+ atomic64_add(ext4_free_group_clusters(sb, gdp),
+- &sbi->s_flex_groups[flex_group].free_clusters);
+- atomic_add(ext4_used_dirs_count(sb, gdp),
+- &sbi->s_flex_groups[flex_group].used_dirs);
++ &fg->free_clusters);
++ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
+ }
+
+ return 1;
+@@ -2961,7 +2986,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
+ return 0;
+ }
+
+-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
+ if (!readonly && (ext4_has_feature_quota(sb) ||
+ ext4_has_feature_project(sb))) {
+ ext4_msg(sb, KERN_ERR,
+@@ -3586,9 +3611,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+ char *orig_data = kstrdup(data, GFP_KERNEL);
+- struct buffer_head *bh;
++ struct buffer_head *bh, **group_desc;
+ struct ext4_super_block *es = NULL;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
++ struct flex_groups **flex_groups;
+ ext4_fsblk_t block;
+ ext4_fsblk_t sb_block = get_sb_block(&data);
+ ext4_fsblk_t logical_sb_block;
+@@ -4242,9 +4268,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ goto failed_mount;
+ }
+ }
+- sbi->s_group_desc = kvmalloc_array(db_count,
+- sizeof(struct buffer_head *),
+- GFP_KERNEL);
++ rcu_assign_pointer(sbi->s_group_desc,
++ kvmalloc_array(db_count,
++ sizeof(struct buffer_head *),
++ GFP_KERNEL));
+ if (sbi->s_group_desc == NULL) {
+ ext4_msg(sb, KERN_ERR, "not enough memory");
+ ret = -ENOMEM;
+@@ -4260,14 +4287,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ for (i = 0; i < db_count; i++) {
++ struct buffer_head *bh;
++
+ block = descriptor_loc(sb, logical_sb_block, i);
+- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
+- if (!sbi->s_group_desc[i]) {
++ bh = sb_bread_unmovable(sb, block);
++ if (!bh) {
+ ext4_msg(sb, KERN_ERR,
+ "can't read group descriptor %d", i);
+ db_count = i;
+ goto failed_mount2;
+ }
++ rcu_read_lock();
++ rcu_dereference(sbi->s_group_desc)[i] = bh;
++ rcu_read_unlock();
+ }
+ sbi->s_gdb_count = db_count;
+ if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+@@ -4553,7 +4585,7 @@ no_journal:
+ err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ GFP_KERNEL);
+ if (!err)
+- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
++ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "insufficient memory");
+@@ -4641,13 +4673,19 @@ failed_mount7:
+ ext4_unregister_li_request(sb);
+ failed_mount6:
+ ext4_mb_release(sb);
+- if (sbi->s_flex_groups)
+- kvfree(sbi->s_flex_groups);
++ rcu_read_lock();
++ flex_groups = rcu_dereference(sbi->s_flex_groups);
++ if (flex_groups) {
++ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++ kvfree(flex_groups[i]);
++ kvfree(flex_groups);
++ }
++ rcu_read_unlock();
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
+ percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++ percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ failed_mount5:
+ ext4_ext_release(sb);
+ ext4_release_system_zone(sb);
+@@ -4676,9 +4714,12 @@ failed_mount3:
+ if (sbi->s_mmp_tsk)
+ kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
++ rcu_read_lock();
++ group_desc = rcu_dereference(sbi->s_group_desc);
+ for (i = 0; i < db_count; i++)
+- brelse(sbi->s_group_desc[i]);
+- kvfree(sbi->s_group_desc);
++ brelse(group_desc[i]);
++ kvfree(group_desc);
++ rcu_read_unlock();
+ failed_mount:
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 709671faaed6..ed9a551882cf 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -882,11 +882,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+ mutex_unlock(&ctx->uring_lock);
+ }
+
+-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+- long min)
++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
++ long min)
+ {
+ int iters = 0, ret = 0;
+
++ /*
++ * We disallow the app entering submit/complete with polling, but we
++ * still need to lock the ring to prevent racing with polled issue
++ * that got punted to a workqueue.
++ */
++ mutex_lock(&ctx->uring_lock);
+ do {
+ int tmin = 0;
+
+@@ -922,21 +928,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ ret = 0;
+ } while (min && !*nr_events && !need_resched());
+
+- return ret;
+-}
+-
+-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+- long min)
+-{
+- int ret;
+-
+- /*
+- * We disallow the app entering submit/complete with polling, but we
+- * still need to lock the ring to prevent racing with polled issue
+- * that got punted to a workqueue.
+- */
+- mutex_lock(&ctx->uring_lock);
+- ret = __io_iopoll_check(ctx, nr_events, min);
+ mutex_unlock(&ctx->uring_lock);
+ return ret;
+ }
+@@ -2721,7 +2712,7 @@ static int io_sq_thread(void *data)
+ */
+ mutex_lock(&ctx->uring_lock);
+ if (!list_empty(&ctx->poll_list))
+- __io_iopoll_check(ctx, &nr_events, 0);
++ io_iopoll_getevents(ctx, &nr_events, 0);
+ else
+ inflight = 0;
+ mutex_unlock(&ctx->uring_lock);
+@@ -2740,16 +2731,6 @@ static int io_sq_thread(void *data)
+
+ to_submit = io_sqring_entries(ctx);
+ if (!to_submit) {
+- /*
+- * We're polling. If we're within the defined idle
+- * period, then let us spin without work before going
+- * to sleep.
+- */
+- if (inflight || !time_after(jiffies, timeout)) {
+- cond_resched();
+- continue;
+- }
+-
+ /*
+ * Drop cur_mm before scheduling, we can't hold it for
+ * long periods (or over schedule()). Do this before
+@@ -2762,6 +2743,16 @@ static int io_sq_thread(void *data)
+ cur_mm = NULL;
+ }
+
++ /*
++ * We're polling. If we're within the defined idle
++ * period, then let us spin without work before going
++ * to sleep.
++ */
++ if (inflight || !time_after(jiffies, timeout)) {
++ cond_resched();
++ continue;
++ }
++
+ prepare_to_wait(&ctx->sqo_wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 3930c68a9c20..b17f05ae6011 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -865,8 +865,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
+ char *frozen_buffer = NULL;
+ unsigned long start_lock, time_lock;
+
+- if (is_handle_aborted(handle))
+- return -EROFS;
+ journal = transaction->t_journal;
+
+ jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
+@@ -1118,6 +1116,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+ struct journal_head *jh;
+ int rc;
+
++ if (is_handle_aborted(handle))
++ return -EROFS;
++
+ if (jbd2_write_access_granted(handle, bh, false))
+ return 0;
+
+@@ -1255,6 +1256,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+ struct journal_head *jh;
+ char *committed_data = NULL;
+
++ if (is_handle_aborted(handle))
++ return -EROFS;
++
+ if (jbd2_write_access_granted(handle, bh, true))
+ return 0;
+
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index d1fdf26ccb33..4010c42e40bd 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -749,6 +749,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
++ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
+
+ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_get_gpe_device(u32 gpe_index,
+diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
+index 94f047a8a845..d7c403d0dd27 100644
+--- a/include/linux/intel-svm.h
++++ b/include/linux/intel-svm.h
+@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
+ BUG();
+ }
+
+-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
+ {
+ return -EINVAL;
+ }
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index aba5ada373d6..e85f714a623e 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -191,7 +191,7 @@ enum {
+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
+
+ /* Irq domain name was allocated in __irq_domain_add() */
+- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
++ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
+
+ /* Irq domain is an IPI domain with virq per cpu */
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index fa0c3dae2094..c44e4cfbcb16 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -1220,6 +1220,7 @@ struct pci_bits {
+ };
+
+ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
+ extern void ata_pci_remove_one(struct pci_dev *pdev);
+
+ #ifdef CONFIG_PM
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index bfa4e2ee94a9..bd5fe0e907e8 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -225,6 +225,8 @@ struct tty_port_client_operations {
+ void (*write_wakeup)(struct tty_port *port);
+ };
+
++extern const struct tty_port_client_operations tty_port_default_client_ops;
++
+ struct tty_port {
+ struct tty_bufhead buf; /* Locked internally */
+ struct tty_struct *tty; /* Back pointer */
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index a1be64c9940f..22c1f579afe3 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -69,4 +69,7 @@
+ /* Hub needs extra delay after resetting its port. */
+ #define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+
++/* device has blacklisted endpoints */
++#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index 533f56733ba8..b71b5c4f418c 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -627,7 +627,6 @@ struct iscsi_reject {
+ #define ISCSI_REASON_BOOKMARK_INVALID 9
+ #define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
+ #define ISCSI_REASON_NEGOTIATION_RESET 11
+-#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
+
+ /* Max. number of Key=Value pairs in a text message */
+ #define MAX_KEY_VALUE_PAIRS 8192
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index 40ab20439fee..a36b7227a15a 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
+ struct list_head list; /* list of all substream for given stream */
+ int stream; /* direction */
+ int number; /* substream number */
+- unsigned int opened: 1, /* open flag */
+- append: 1, /* append flag (merge more streams) */
+- active_sensing: 1; /* send active sensing when close */
++ bool opened; /* open flag */
++ bool append; /* append flag (merge more streams) */
++ bool active_sensing; /* send active sensing when close */
+ int use_count; /* use counter (for output) */
+ size_t bytes;
+ struct snd_rawmidi *rmidi;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index ec97a7072413..fe12ea8dd2b3 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -2368,11 +2368,9 @@ void exit_sem(struct task_struct *tsk)
+ ipc_assert_locked_object(&sma->sem_perm);
+ list_del(&un->list_id);
+
+- /* we are the last process using this ulp, acquiring ulp->lock
+- * isn't required. Besides that, we are also protected against
+- * IPC_RMID as we hold sma->sem_perm lock now
+- */
++ spin_lock(&ulp->lock);
+ list_del_rcu(&un->list_proc);
++ spin_unlock(&ulp->lock);
+
+ /* perform adjustments registered in un */
+ for (i = 0; i < sma->sem_nsems; i++) {
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 5b9da0954a27..3668a0bc18ec 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+
+ ulen = info->jited_prog_len;
+ info->jited_prog_len = aux->offload->jited_len;
+- if (info->jited_prog_len & ulen) {
++ if (info->jited_prog_len && ulen) {
+ uinsns = u64_to_user_ptr(info->jited_prog_insns);
+ ulen = min_t(u32, info->jited_prog_len, ulen);
+ if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index 3924fbe829d4..c9d8eb7f5c02 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
+
+ extern bool irq_can_set_affinity_usr(unsigned int irq);
+
+-extern int irq_select_affinity_usr(unsigned int irq);
+-
+ extern void irq_set_thread_affinity(struct irq_desc *desc);
+
+ extern int irq_do_set_affinity(struct irq_data *data,
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 1753486b440c..55b080101a20 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -442,23 +442,9 @@ int irq_setup_affinity(struct irq_desc *desc)
+ {
+ return irq_select_affinity(irq_desc_get_irq(desc));
+ }
+-#endif
++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
++#endif /* CONFIG_SMP */
+
+-/*
+- * Called when a bogus affinity is set via /proc/irq
+- */
+-int irq_select_affinity_usr(unsigned int irq)
+-{
+- struct irq_desc *desc = irq_to_desc(irq);
+- unsigned long flags;
+- int ret;
+-
+- raw_spin_lock_irqsave(&desc->lock, flags);
+- ret = irq_setup_affinity(desc);
+- raw_spin_unlock_irqrestore(&desc->lock, flags);
+- return ret;
+-}
+-#endif
+
+ /**
+ * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index cfc4f088a0e7..f5958c55406f 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
+ return show_irq_affinity(AFFINITY_LIST, m);
+ }
+
++#ifndef CONFIG_AUTO_IRQ_AFFINITY
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++ /*
++ * If the interrupt is started up already then this fails. The
++ * interrupt is assigned to an online CPU already. There is no
++ * point to move it around randomly. Tell user space that the
++ * selected mask is bogus.
++ *
++ * If not then any change to the affinity is pointless because the
++ * startup code invokes irq_setup_affinity() which will select
++ * a online CPU anyway.
++ */
++ return -EINVAL;
++}
++#else
++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++ return irq_select_affinity(irq);
++}
++#endif
+
+ static ssize_t write_irq_affinity(int type, struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index ce8f6748678a..9154e745f097 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
+ if (static_branch_likely(&psi_disabled))
+ return -EOPNOTSUPP;
+
++ if (!nbytes)
++ return -EINVAL;
++
+ buf_size = min(nbytes, sizeof(buf));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index ed717dd08ff3..81c69c08d1d1 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
+ return true;
+ if (stack_slabs[depot_index] == NULL) {
+ stack_slabs[depot_index] = *prealloc;
++ *prealloc = NULL;
+ } else {
+- stack_slabs[depot_index + 1] = *prealloc;
++ /* If this is the last depot slab, do not touch the next one. */
++ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
++ stack_slabs[depot_index + 1] = *prealloc;
++ *prealloc = NULL;
++ }
+ /*
+ * This smp_store_release pairs with smp_load_acquire() from
+ * |next_slab_inited| above and in stack_depot_save().
+ */
+ smp_store_release(&next_slab_inited, 1);
+ }
+- *prealloc = NULL;
+ return true;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index b5b4e310fe70..ae9044bc9f80 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -418,8 +418,10 @@ int memcg_expand_shrinker_maps(int new_id)
+ if (mem_cgroup_is_root(memcg))
+ continue;
+ ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
+- if (ret)
++ if (ret) {
++ mem_cgroup_iter_break(NULL, memcg);
+ goto unlock;
++ }
+ }
+ unlock:
+ if (!ret)
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 4390dbea4aa5..514cc19c5916 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+ bool downgraded = false;
+ LIST_HEAD(uf);
+
+- brk = untagged_addr(brk);
+-
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+@@ -1583,8 +1581,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
+ struct file *file = NULL;
+ unsigned long retval;
+
+- addr = untagged_addr(addr);
+-
+ if (!(flags & MAP_ANONYMOUS)) {
+ audit_mmap_fd(fd, flags);
+ file = fget(fd);
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 1fc8a29fbe3f..1d98281f7204 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ LIST_HEAD(uf_unmap);
+
+ addr = untagged_addr(addr);
+- new_addr = untagged_addr(new_addr);
+
+ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
+ return ret;
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 69b41b6046a5..a5e5c1c3a2a8 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -884,7 +884,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
+ * Poison uninitialized struct pages in order to catch invalid flags
+ * combinations.
+ */
+- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
++ page_init_poison(memmap, sizeof(struct page) * nr_pages);
+
+ ms = __nr_to_section(section_nr);
+ set_section_nid(section_nr, nid);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index e7f10c4b40f0..7fde5f904c8d 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2530,10 +2530,13 @@ out:
+ /*
+ * Scan types proportional to swappiness and
+ * their relative recent reclaim efficiency.
+- * Make sure we don't miss the last page
+- * because of a round-off error.
++ * Make sure we don't miss the last page on
++ * the offlined memory cgroups because of a
++ * round-off error.
+ */
+- scan = DIV64_U64_ROUND_UP(scan * fraction[file],
++ scan = mem_cgroup_online(memcg) ?
++ div64_u64(scan * fraction[file], denominator) :
++ DIV64_U64_ROUND_UP(scan * fraction[file],
+ denominator);
+ break;
+ case SCAN_FILE:
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index ced3fc8fad7c..6520d9ec1297 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
+ }
+
++#define HASHLIMIT_MAX_SIZE 1048576
++
+ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ struct xt_hashlimit_htable **hinfo,
+ struct hashlimit_cfg3 *cfg,
+@@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+
+ if (cfg->gc_interval == 0 || cfg->expire == 0)
+ return -EINVAL;
++ if (cfg->size > HASHLIMIT_MAX_SIZE) {
++ cfg->size = HASHLIMIT_MAX_SIZE;
++ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
++ }
++ if (cfg->max > HASHLIMIT_MAX_SIZE) {
++ cfg->max = HASHLIMIT_MAX_SIZE;
++ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
++ }
+ if (par->family == NFPROTO_IPV4) {
+ if (cfg->srcmask > 32 || cfg->dstmask > 32)
+ return -EINVAL;
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index dbdbc4f18b5e..c9f34b0a11df 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
+ }
+
+ /*
+- * Final call destruction under RCU.
++ * Final call destruction - but must be done in process context.
+ */
+-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++static void rxrpc_destroy_call(struct work_struct *work)
+ {
+- struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++ struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
+ struct rxrpc_net *rxnet = call->rxnet;
+
+ rxrpc_put_connection(call->conn);
+@@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
+ wake_up_var(&rxnet->nr_calls);
+ }
+
++/*
++ * Final call destruction under RCU.
++ */
++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++{
++ struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++
++ if (in_softirq()) {
++ INIT_WORK(&call->processor, rxrpc_destroy_call);
++ if (!rxrpc_queue_work(&call->processor))
++ BUG();
++ } else {
++ rxrpc_destroy_call(&call->processor);
++ }
++}
++
+ /*
+ * clean up a call
+ */
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 6d9592f0ae1d..cc93157fa950 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
+ event->queue = queue;
+ event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
+ if (real_time) {
+- event->time.time = snd_seq_timer_get_cur_time(q->timer);
++ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
+ event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
+ } else {
+ event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
+@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
+ tmr = queue->timer;
+ status->events = queue->tickq->cells + queue->timeq->cells;
+
+- status->time = snd_seq_timer_get_cur_time(tmr);
++ status->time = snd_seq_timer_get_cur_time(tmr, true);
+ status->tick = snd_seq_timer_get_cur_tick(tmr);
+
+ status->running = tmr->running;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index caf68bf42f13..71a6ea62c3be 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ {
+ unsigned long flags;
+ struct snd_seq_event_cell *cell;
++ snd_seq_tick_time_t cur_tick;
++ snd_seq_real_time_t cur_time;
+
+ if (q == NULL)
+ return;
+@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+
+ __again:
+ /* Process tick queue... */
++ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
+ for (;;) {
+- cell = snd_seq_prioq_cell_out(q->tickq,
+- &q->timer->tick.cur_tick);
++ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
+ if (!cell)
+ break;
+ snd_seq_dispatch_event(cell, atomic, hop);
+ }
+
+ /* Process time queue... */
++ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
+ for (;;) {
+- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
+ if (!cell)
+ break;
+ snd_seq_dispatch_event(cell, atomic, hop);
+@@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
+ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ {
+ struct snd_seq_queue *q = queueptr(queueid);
++ unsigned long flags;
+
+ if (q == NULL)
+ return -EINVAL;
+@@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ return -EPERM;
+ }
+
++ spin_lock_irqsave(&q->owner_lock, flags);
+ q->locked = locked ? 1 : 0;
+ q->owner = client;
++ spin_unlock_irqrestore(&q->owner_lock, flags);
+ queue_access_unlock(q);
+ queuefree(q);
+
+@@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
+ unsigned long flags;
+ int i;
+ struct snd_seq_queue *q;
++ bool matched;
+
+ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ if ((q = queueptr(i)) == NULL)
+ continue;
+ spin_lock_irqsave(&q->owner_lock, flags);
+- if (q->owner == client)
++ matched = (q->owner == client);
++ if (matched)
+ q->klocked = 1;
+ spin_unlock_irqrestore(&q->owner_lock, flags);
+- if (q->owner == client) {
++ if (matched) {
+ if (q->timer->running)
+ snd_seq_timer_stop(q->timer);
+ snd_seq_timer_reset(q->timer);
+@@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ int i, bpm;
+ struct snd_seq_queue *q;
+ struct snd_seq_timer *tmr;
++ bool locked;
++ int owner;
+
+ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ if ((q = queueptr(i)) == NULL)
+@@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ else
+ bpm = 0;
+
++ spin_lock_irq(&q->owner_lock);
++ locked = q->locked;
++ owner = q->owner;
++ spin_unlock_irq(&q->owner_lock);
++
+ snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
+- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
+- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
++ snd_iprintf(buffer, "owned by client : %d\n", owner);
++ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
+ snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
+ snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
+ snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 3bc6095df44d..0b43fc5fe349 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -422,14 +422,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+ }
+
+ /* return current 'real' time. use timeofday() to get better granularity. */
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++ bool adjust_ktime)
+ {
+ snd_seq_real_time_t cur_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ cur_time = tmr->cur_time;
+- if (tmr->running) {
++ if (adjust_ktime && tmr->running) {
+ struct timespec64 tm;
+
+ ktime_get_ts64(&tm);
+@@ -446,7 +447,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ high PPQ values) */
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
+ {
+- return tmr->tick.cur_tick;
++ snd_seq_tick_time_t cur_tick;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tmr->lock, flags);
++ cur_tick = tmr->tick.cur_tick;
++ spin_unlock_irqrestore(&tmr->lock, flags);
++ return cur_tick;
+ }
+
+
+diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
+index 66c3e344eae3..4bec57df8158 100644
+--- a/sound/core/seq/seq_timer.h
++++ b/sound/core/seq/seq_timer.h
+@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
+ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
+ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
+ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++ bool adjust_ktime);
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
+
+ extern int seq_default_timer_class;
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index 886cb7811bd6..2efee794cac6 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
+ if (spk_alloc & (1 << i))
+- j += snprintf(buf + j, buflen - j, " %s",
++ j += scnprintf(buf + j, buflen - j, " %s",
+ cea_speaker_allocation_names[i]);
+ }
+ buf[j] = '\0'; /* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index a2fb19129219..6cb72336433a 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
+ if (pcm & (AC_SUPPCM_BITS_8 << i))
+- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
++ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
+
+ buf[j] = '\0'; /* necessary when j == 0 */
+ }
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index d081fb2880a0..82cf1da2ff12 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
+ if (pcm & (1 << i))
+- j += snprintf(buf + j, buflen - j, " %d",
++ j += scnprintf(buf + j, buflen - j, " %d",
+ alsa_rates[i]);
+
+ buf[j] = '\0'; /* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index fcc34417cbce..6dbe99131bc4 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
+ int i, len = 0;
+ mutex_lock(&codec->user_mutex);
+ snd_array_for_each(&codec->init_verbs, i, v) {
+- len += snprintf(buf + len, PAGE_SIZE - len,
++ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "0x%02x 0x%03x 0x%04x\n",
+ v->nid, v->verb, v->param);
+ }
+@@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
+ int i, len = 0;
+ mutex_lock(&codec->user_mutex);
+ snd_array_for_each(&codec->hints, i, hint) {
+- len += snprintf(buf + len, PAGE_SIZE - len,
++ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s = %s\n", hint->key, hint->val);
+ }
+ mutex_unlock(&codec->user_mutex);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f162e607fc6c..4f78b40831d8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2447,7 +2447,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
++ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
++ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
+index d1dc8e6366dc..71f2d42188c4 100644
+--- a/sound/soc/atmel/Kconfig
++++ b/sound/soc/atmel/Kconfig
+@@ -10,11 +10,11 @@ config SND_ATMEL_SOC
+ if SND_ATMEL_SOC
+
+ config SND_ATMEL_SOC_PDC
+- tristate
++ bool
+ depends on HAS_DMA
+
+ config SND_ATMEL_SOC_DMA
+- tristate
++ bool
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
+ config SND_ATMEL_SOC_SSC
+diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
+index 1f6890ed3738..c7d2989791be 100644
+--- a/sound/soc/atmel/Makefile
++++ b/sound/soc/atmel/Makefile
+@@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
+ snd-soc-atmel-i2s-objs := atmel-i2s.o
+ snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
+
+-obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
+-obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
++# pdc and dma need to both be built-in if any user of
++# ssc is built-in.
++ifdef CONFIG_SND_ATMEL_SOC_PDC
++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
++endif
++ifdef CONFIG_SND_ATMEL_SOC_DMA
++obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
++endif
+ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
+ obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
+ obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index b517e4bc1b87..41b83ecaf008 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1019,12 +1019,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ &fsl_sai_dai, 1);
+ if (ret)
+- return ret;
++ goto err_pm_disable;
+
+- if (sai->soc_data->use_imx_pcm)
+- return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
+- else
+- return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
++ if (sai->soc_data->use_imx_pcm) {
++ ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
++ if (ret)
++ goto err_pm_disable;
++ } else {
++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
++ if (ret)
++ goto err_pm_disable;
++ }
++
++ return ret;
++
++err_pm_disable:
++ pm_runtime_disable(&pdev->dev);
++
++ return ret;
+ }
+
+ static int fsl_sai_remove(struct platform_device *pdev)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b6378f025836..935b5375ecc5 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3888,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
+ runtime->rate = params_rate(params);
+
+ out:
+- if (ret < 0)
+- kfree(runtime);
+-
+ kfree(params);
+ return ret;
+ }
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 1923b0c36bce..3f645200d3a5 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -443,6 +443,10 @@ struct snd_soc_dai_driver skl_dai[] = {
+ .name = "iDisp3 Pin",
+ .ops = &hda_link_dai_ops,
+ },
++{
++ .name = "iDisp4 Pin",
++ .ops = &hda_link_dai_ops,
++},
+ {
+ .name = "Analog CPU DAI",
+ .ops = &hda_link_dai_ops,
+diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
+index 55798bc8eae2..686561df8e13 100644
+--- a/sound/soc/sunxi/sun8i-codec.c
++++ b/sound/soc/sunxi/sun8i-codec.c
+@@ -80,6 +80,7 @@
+
+ #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
+ #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
+@@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ return -EINVAL;
+ }
+ regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
+- BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
++ SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
+ value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
+
+ return 0;
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+index 07f5b462c2ef..aa43e0bd210c 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+@@ -3,6 +3,11 @@
+
+ #include "test_progs.h"
+
++#define TCP_REPAIR 19 /* TCP sock is under repair right now */
++
++#define TCP_REPAIR_ON 1
++#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
++
+ static int connected_socket_v4(void)
+ {
+ struct sockaddr_in addr = {