summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2022-01-20 05:00:40 -0500
committerMike Pagano <mpagano@gentoo.org>2022-01-20 05:00:40 -0500
commit8ccc4e665a43df9a1d94143ccfa1a96a5ebcd056 (patch)
tree5889676bfc42e5abce04e2790ec37917ecccf8ac /1172_linux-5.4.173.patch
parentLinuxpatch 5.4.172 (diff)
downloadlinux-patches-8ccc4e665a43df9a1d94143ccfa1a96a5ebcd056.tar.gz
linux-patches-8ccc4e665a43df9a1d94143ccfa1a96a5ebcd056.tar.bz2
linux-patches-8ccc4e665a43df9a1d94143ccfa1a96a5ebcd056.zip
Linux patch 5.4.1735.4-177
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1172_linux-5.4.173.patch')
-rw-r--r--1172_linux-5.4.173.patch788
1 files changed, 788 insertions, 0 deletions
diff --git a/1172_linux-5.4.173.patch b/1172_linux-5.4.173.patch
new file mode 100644
index 00000000..bba402e5
--- /dev/null
+++ b/1172_linux-5.4.173.patch
@@ -0,0 +1,788 @@
+diff --git a/Makefile b/Makefile
+index 2f914dd223c81..cb9e6cd0d0249 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 172
++SUBLEVEL = 173
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+@@ -1022,7 +1022,7 @@ HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
+ ifdef CONFIG_STACK_VALIDATION
+ has_libelf := $(call try-run,\
+- echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
++ echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
+ ifeq ($(has_libelf),1)
+ objtool_target := tools/objtool FORCE
+ else
+diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
+index 3b69a76d341e7..1626dfc6f6ce6 100644
+--- a/arch/arm/kernel/perf_callchain.c
++++ b/arch/arm/kernel/perf_callchain.c
+@@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail,
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct frame_tail __user *tail;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr,
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stackframe fr;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
+
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+- return perf_guest_cbs->get_guest_ip();
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++ if (guest_cbs && guest_cbs->is_in_guest())
++ return guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+ }
+
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ int misc = 0;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+- if (perf_guest_cbs->is_user_mode())
++ if (guest_cbs && guest_cbs->is_in_guest()) {
++ if (guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index fc388eb60e0b7..64cce0c8560ab 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -743,6 +743,7 @@ config SWP_EMULATE
+ config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ depends on ARCH_SUPPORTS_BIG_ENDIAN
++ depends on !LD_IS_LLD
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+ Note that your board must be properly built and your board
+diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
+index b0e03e052dd1d..b84ec4ce7d8dc 100644
+--- a/arch/arm64/kernel/perf_callchain.c
++++ b/arch/arm64/kernel/perf_callchain.c
+@@ -102,7 +102,9 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -147,9 +149,10 @@ static int callchain_trace(struct stackframe *frame, void *data)
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stackframe frame;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+- return perf_guest_cbs->get_guest_ip();
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++ if (guest_cbs && guest_cbs->is_in_guest())
++ return guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+ }
+
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ int misc = 0;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+- if (perf_guest_cbs->is_user_mode())
++ if (guest_cbs && guest_cbs->is_in_guest()) {
++ if (guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
+index ab55e98ee8f62..35318a635a5fa 100644
+--- a/arch/csky/kernel/perf_callchain.c
++++ b/arch/csky/kernel/perf_callchain.c
+@@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ unsigned long fp = 0;
+
+ /* C-SKY does not support virtualization. */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++ if (guest_cbs && guest_cbs->is_in_guest())
+ return;
+
+ fp = regs->regs[4];
+@@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stackframe fr;
+
+ /* C-SKY does not support virtualization. */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ pr_warn("C-SKY does not support perf in guest mode!");
+ return;
+ }
+diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c
+index 334c2a6cec23d..8a4f9babb1646 100644
+--- a/arch/nds32/kernel/perf_event_cpu.c
++++ b/arch/nds32/kernel/perf_event_cpu.c
+@@ -1363,6 +1363,7 @@ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ unsigned long fp = 0;
+ unsigned long gp = 0;
+ unsigned long lp = 0;
+@@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+
+ leaf_fp = 0;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -1479,9 +1480,10 @@ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stackframe fr;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+@@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+ /* However, NDS32 does not support virtualization */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+- return perf_guest_cbs->get_guest_ip();
++ if (guest_cbs && guest_cbs->is_in_guest())
++ return guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+ }
+
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ int misc = 0;
+
+ /* However, NDS32 does not support virtualization */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+- if (perf_guest_cbs->is_user_mode())
++ if (guest_cbs && guest_cbs->is_in_guest()) {
++ if (guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
+index 8d2804f05cf93..22a93009362d7 100644
+--- a/arch/riscv/kernel/perf_callchain.c
++++ b/arch/riscv/kernel/perf_callchain.c
+@@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ unsigned long fp = 0;
+
+ /* RISC-V does not support perf in guest mode. */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
++ if (guest_cbs && guest_cbs->is_in_guest())
+ return;
+
+ fp = regs->s0;
+@@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task,
+ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
+ /* RISC-V does not support perf in guest mode. */
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ pr_warn("RISC-V does not support perf in guest mode!");
+ return;
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index fd73a8aa89d23..8be5750fe5ac3 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1982,6 +1982,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
+ return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
+ }
+
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
++{
++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
++
++ return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
++}
++
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index b286818d8d54d..49dc00d82e5ea 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4205,10 +4205,15 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
+ spin_lock(&vcpu->kvm->arch.start_stop_lock);
+ online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
+
+- /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
++ /*
++ * Set the VCPU to STOPPED and THEN clear the interrupt flag,
++ * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
++ * have been fully processed. This will ensure that the VCPU
++ * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
++ */
++ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+ kvm_s390_clear_stop_irq(vcpu);
+
+- kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
+ __disable_ibs_on_vcpu(vcpu);
+
+ for (i = 0; i < online_vcpus; i++) {
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 63d94a5253a8f..d497d3e58784b 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -373,6 +373,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
+ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
+ extern struct kvm_device_ops kvm_flic_ops;
+ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
++int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
+ void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
+ int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
+ void __user *buf, int len);
+diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
+index 683036c1c92a8..3dc921e853b6e 100644
+--- a/arch/s390/kvm/sigp.c
++++ b/arch/s390/kvm/sigp.c
+@@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
+ if (!dst_vcpu)
+ return SIGP_CC_NOT_OPERATIONAL;
+
++ /*
++ * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
++ * are processed asynchronously. Until the affected VCPU finishes
++ * its work and calls back into KVM to clear the (RESTART or STOP)
++ * interrupt, we need to return any new non-reset orders "busy".
++ *
++ * This is important because a single VCPU could issue:
++ * 1) SIGP STOP $DESTINATION
++ * 2) SIGP SENSE $DESTINATION
++ *
++ * If the SIGP SENSE would not be rejected as "busy", it could
++ * return an incorrect answer as to whether the VCPU is STOPPED
++ * or OPERATING.
++ */
++ if (order_code != SIGP_INITIAL_CPU_RESET &&
++ order_code != SIGP_CPU_RESET) {
++ /*
++ * Lockless check. Both SIGP STOP and SIGP (RE)START
++ * properly synchronize everything while processing
++ * their orders, while the guest cannot observe a
++ * difference when issuing other orders from two
++ * different VCPUs.
++ */
++ if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
++ kvm_s390_is_restart_irq_pending(dst_vcpu))
++ return SIGP_CC_BUSY;
++ }
++
+ switch (order_code) {
+ case SIGP_SENSE:
+ vcpu->stat.instruction_sigp_sense++;
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 00bccb4d17722..e4f7ac28dcf2b 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2366,10 +2366,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
+ void
+ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct unwind_state state;
+ unsigned long addr;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return;
+ }
+@@ -2475,10 +2476,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ void
+ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ struct stack_frame frame;
+ const unsigned long __user *fp;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
++ if (guest_cbs && guest_cbs->is_in_guest()) {
+ /* TODO: We don't support guest os callchain now */
+ return;
+ }
+@@ -2562,18 +2564,21 @@ static unsigned long code_segment_base(struct pt_regs *regs)
+
+ unsigned long perf_instruction_pointer(struct pt_regs *regs)
+ {
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+- return perf_guest_cbs->get_guest_ip();
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
++
++ if (guest_cbs && guest_cbs->is_in_guest())
++ return guest_cbs->get_guest_ip();
+
+ return regs->ip + code_segment_base(regs);
+ }
+
+ unsigned long perf_misc_flags(struct pt_regs *regs)
+ {
++ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ int misc = 0;
+
+- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+- if (perf_guest_cbs->is_user_mode())
++ if (guest_cbs && guest_cbs->is_in_guest()) {
++ if (guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 70758f99c9e47..b33540e1efa88 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2333,6 +2333,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
+ {
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++ struct perf_guest_info_callbacks *guest_cbs;
+ int bit;
+ int handled = 0;
+
+@@ -2386,9 +2387,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
+ */
+ if (__test_and_clear_bit(55, (unsigned long *)&status)) {
+ handled++;
+- if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
+- perf_guest_cbs->handle_intel_pt_intr))
+- perf_guest_cbs->handle_intel_pt_intr();
++
++ guest_cbs = perf_get_guest_cbs();
++ if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
++ guest_cbs->handle_intel_pt_intr))
++ guest_cbs->handle_intel_pt_intr();
+ else
+ intel_pt_interrupt();
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0aaf40be956ff..1f7dfa5aa42da 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1218,7 +1218,7 @@ static const u32 msrs_to_save_all[] = {
+ MSR_IA32_UMWAIT_CONTROL,
+
+ MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
+- MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
++ MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
+ MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
+ MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
+diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
+index 30d0523014e0d..5e9b00711357b 100644
+--- a/drivers/base/devtmpfs.c
++++ b/drivers/base/devtmpfs.c
+@@ -25,6 +25,7 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/kthread.h>
++#include <linux/fs_context.h>
+ #include <uapi/linux/mount.h>
+ #include "base.h"
+
+@@ -62,8 +63,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
+ const char *dev_name, void *data)
+ {
+ struct super_block *s = mnt->mnt_sb;
++ int err;
++
+ atomic_inc(&s->s_active);
+ down_write(&s->s_umount);
++ err = reconfigure_single(s, flags, data);
++ if (err < 0) {
++ deactivate_locked_super(s);
++ return ERR_PTR(err);
++ }
+ return dget(s->s_root);
+ }
+
+diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
+index 59db70fb45614..314b9bb78e437 100644
+--- a/drivers/firmware/qemu_fw_cfg.c
++++ b/drivers/firmware/qemu_fw_cfg.c
+@@ -385,9 +385,7 @@ static void fw_cfg_sysfs_cache_cleanup(void)
+ struct fw_cfg_sysfs_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+- /* will end up invoking fw_cfg_sysfs_cache_delist()
+- * via each object's release() method (i.e. destructor)
+- */
++ fw_cfg_sysfs_cache_delist(entry);
+ kobject_put(&entry->kobj);
+ }
+ }
+@@ -445,7 +443,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+ {
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+- fw_cfg_sysfs_cache_delist(entry);
+ kfree(entry);
+ }
+
+@@ -598,20 +595,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
+ /* set file entry information */
+ entry->size = be32_to_cpu(f->size);
+ entry->select = be16_to_cpu(f->select);
+- memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
++ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->select);
+- if (err) {
+- kobject_put(&entry->kobj);
+- return err;
+- }
++ if (err)
++ goto err_put_entry;
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+ if (err)
+- goto err_add_raw;
++ goto err_del_entry;
+
+ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
+@@ -620,9 +615,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
+ fw_cfg_sysfs_cache_enlist(entry);
+ return 0;
+
+-err_add_raw:
++err_del_entry:
+ kobject_del(&entry->kobj);
+- kfree(entry);
++err_put_entry:
++ kobject_put(&entry->kobj);
+ return err;
+ }
+
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index 96b85d66e7a87..fe58723fc5ac7 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
+ if (ep == NULL)
+ return -EIO;
+
++ /* Reject broken descriptors. */
++ if (usb_endpoint_maxp(&ep->desc) == 0)
++ return -EIO;
++
+ ret = uvc_init_video_bulk(stream, ep, gfp_flags);
+ }
+
+diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
+index a7e47e068ad9b..7769a9b556c70 100644
+--- a/drivers/mtd/chips/Kconfig
++++ b/drivers/mtd/chips/Kconfig
+@@ -55,12 +55,14 @@ choice
+ LITTLE_ENDIAN_BYTE, if the bytes are reversed.
+
+ config MTD_CFI_NOSWAP
++ depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN
+ bool "NO"
+
+ config MTD_CFI_BE_BYTE_SWAP
+ bool "BIG_ENDIAN_BYTE"
+
+ config MTD_CFI_LE_BYTE_SWAP
++ depends on !ARCH_IXP4XX
+ bool "LITTLE_ENDIAN_BYTE"
+
+ endchoice
+diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
+index bc82305ebb4c2..ffbf4f6cb9cfe 100644
+--- a/drivers/mtd/maps/Kconfig
++++ b/drivers/mtd/maps/Kconfig
+@@ -303,7 +303,7 @@ config MTD_DC21285
+
+ config MTD_IXP4XX
+ tristate "CFI Flash device mapped on Intel IXP4xx based systems"
+- depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
++ depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS
+ help
+ This enables MTD access to flash devices on platforms based
+ on Intel's IXP4xx family of network processors such as the
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+index f070f25bb735a..df7a14320fd29 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+@@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ _initpabias(hw);
+ rtl92c_dm_init(hw);
+ exit:
++ local_irq_disable();
+ local_irq_restore(flags);
+ return err;
+ }
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 138b5b4d621d2..a2367c7aef5b3 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -585,7 +585,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ param->key);
+ }
+
+- if (len > PAGE_SIZE - 2 - size)
++ if (size + len + 2 > PAGE_SIZE)
+ return invalf(fc, "VFS: Legacy: Cumulative options too large");
+ if (strchr(param->key, ',') ||
+ (param->type == fs_value_is_string &&
+diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
+index 2bb916d68576f..023b9bc54b7ce 100644
+--- a/fs/orangefs/orangefs-bufmap.c
++++ b/fs/orangefs/orangefs-bufmap.c
+@@ -179,7 +179,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
+ {
+ kfree(bufmap->page_array);
+ kfree(bufmap->desc_array);
+- kfree(bufmap->buffer_index_array);
++ bitmap_free(bufmap->buffer_index_array);
+ kfree(bufmap);
+ }
+
+@@ -229,8 +229,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
+ bufmap->desc_size = user_desc->size;
+ bufmap->desc_shift = ilog2(bufmap->desc_size);
+
+- bufmap->buffer_index_array =
+- kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
++ bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
+ if (!bufmap->buffer_index_array)
+ goto out_free_bufmap;
+
+@@ -253,7 +252,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
+ out_free_desc_array:
+ kfree(bufmap->desc_array);
+ out_free_index_array:
+- kfree(bufmap->buffer_index_array);
++ bitmap_free(bufmap->buffer_index_array);
+ out_free_bufmap:
+ kfree(bufmap);
+ out:
+diff --git a/fs/super.c b/fs/super.c
+index 877532baf513d..b289356f302fc 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1470,8 +1470,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
+ }
+ EXPORT_SYMBOL(mount_nodev);
+
+-static int reconfigure_single(struct super_block *s,
+- int flags, void *data)
++int reconfigure_single(struct super_block *s,
++ int flags, void *data)
+ {
+ struct fs_context *fc;
+ int ret;
+diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
+index ba8a58754340d..ebcb91a57e865 100644
+--- a/include/linux/fs_context.h
++++ b/include/linux/fs_context.h
+@@ -135,6 +135,8 @@ extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+ extern int vfs_get_tree(struct fs_context *fc);
+ extern void put_fs_context(struct fs_context *fc);
+ extern void fc_drop_locked(struct fs_context *fc);
++int reconfigure_single(struct super_block *s,
++ int flags, void *data);
+
+ /*
+ * sget() wrappers to be called from the ->get_tree() op.
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 68ccc5b1913b4..b7ac395513c0f 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1175,7 +1175,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
+ enum perf_bpf_event_type type,
+ u16 flags);
+
+-extern struct perf_guest_info_callbacks *perf_guest_cbs;
++extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
++static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
++{
++ /*
++ * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading
++ * the callbacks between a !NULL check and dereferences, to ensure
++ * pending stores/changes to the callback pointers are visible before a
++ * non-NULL perf_guest_cbs is visible to readers, and to prevent a
++ * module from unloading callbacks while readers are active.
++ */
++ return rcu_dereference(perf_guest_cbs);
++}
+ extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6ffe3d3e7b06d..7e124f9abb18b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6045,18 +6045,25 @@ static void perf_pending_event(struct irq_work *entry)
+ * Later on, we might change it to a list if there is
+ * another virtualization implementation supporting the callbacks.
+ */
+-struct perf_guest_info_callbacks *perf_guest_cbs;
++struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+ int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+ {
+- perf_guest_cbs = cbs;
++ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
++ return -EBUSY;
++
++ rcu_assign_pointer(perf_guest_cbs, cbs);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
+
+ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+ {
+- perf_guest_cbs = NULL;
++ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
++ return -EINVAL;
++
++ rcu_assign_pointer(perf_guest_cbs, NULL);
++ synchronize_rcu();
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 022799479a722..d06205626cd54 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1926,6 +1926,7 @@ enum {
+ ALC887_FIXUP_ASUS_BASS,
+ ALC887_FIXUP_BASS_CHMAP,
+ ALC1220_FIXUP_GB_DUAL_CODECS,
++ ALC1220_FIXUP_GB_X570,
+ ALC1220_FIXUP_CLEVO_P950,
+ ALC1220_FIXUP_CLEVO_PB51ED,
+ ALC1220_FIXUP_CLEVO_PB51ED_PINS,
+@@ -2115,6 +2116,29 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec,
+ }
+ }
+
++static void alc1220_fixup_gb_x570(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ static const hda_nid_t conn1[] = { 0x0c };
++ static const struct coef_fw gb_x570_coefs[] = {
++ WRITE_COEF(0x1a, 0x01c1),
++ WRITE_COEF(0x1b, 0x0202),
++ WRITE_COEF(0x43, 0x3005),
++ {}
++ };
++
++ switch (action) {
++ case HDA_FIXUP_ACT_PRE_PROBE:
++ snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1);
++ snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1);
++ break;
++ case HDA_FIXUP_ACT_INIT:
++ alc_process_coef_fw(codec, gb_x570_coefs);
++ break;
++ }
++}
++
+ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+@@ -2417,6 +2441,10 @@ static const struct hda_fixup alc882_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_gb_dual_codecs,
+ },
++ [ALC1220_FIXUP_GB_X570] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc1220_fixup_gb_x570,
++ },
+ [ALC1220_FIXUP_CLEVO_P950] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_clevo_p950,
+@@ -2519,7 +2547,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+- SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
++ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
+ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),