summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-08-16 12:59:51 -0400
committerMike Pagano <mpagano@gentoo.org>2023-08-16 12:59:51 -0400
commit40d5c3cfeaa1523339c66bc7bd5e5091545d2c9e (patch)
tree21f3b033760f4a94d51817a15c8228a815025a29 /1253_linux-5.4.254.patch
parentLinux patch 5.4.253 (diff)
downloadlinux-patches-40d5c3cfeaa1523339c66bc7bd5e5091545d2c9e.tar.gz
linux-patches-40d5c3cfeaa1523339c66bc7bd5e5091545d2c9e.tar.bz2
linux-patches-40d5c3cfeaa1523339c66bc7bd5e5091545d2c9e.zip
Linux patch 5.4.2545.4-261
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
Diffstat (limited to '1253_linux-5.4.254.patch')
-rw-r--r--1253_linux-5.4.254.patch1852
1 files changed, 1852 insertions, 0 deletions
diff --git a/1253_linux-5.4.254.patch b/1253_linux-5.4.254.patch
new file mode 100644
index 00000000..a0d8af4d
--- /dev/null
+++ b/1253_linux-5.4.254.patch
@@ -0,0 +1,1852 @@
+diff --git a/Makefile b/Makefile
+index a0e3daefb01d9..bf7299823095f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 253
++SUBLEVEL = 254
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
+index 5d4c76a77a9f3..f1072b10913c1 100644
+--- a/arch/alpha/kernel/setup.c
++++ b/arch/alpha/kernel/setup.c
+@@ -394,8 +394,7 @@ setup_memory(void *kernel_end)
+ extern void setup_memory(void *);
+ #endif /* !CONFIG_DISCONTIGMEM */
+
+-int __init
+-page_is_ram(unsigned long pfn)
++int page_is_ram(unsigned long pfn)
+ {
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index 3613cfb83c6dc..8d7a4a2caf552 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -222,8 +222,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+- if (end >= TASK_SIZE_MAX)
+- end = TASK_SIZE_MAX;
++ if (end >= DEFAULT_MAP_WINDOW)
++ end = DEFAULT_MAP_WINDOW;
+ end -= len;
+
+ if (end > start) {
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 537c0dd4c3d4a..9cbd86cf0deba 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -986,4 +986,6 @@ enum taa_mitigations {
+ TAA_MITIGATION_TSX_DISABLED,
+ };
+
++extern bool gds_ucode_mitigated(void);
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 11f09df72f51a..fcffee447ba1f 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -72,6 +72,7 @@ static const int amd_erratum_1054[] =
+ static const int amd_zenbleed[] =
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
++ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
+
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index fcfe891c1e8e5..0c0c2cb038ad2 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -450,8 +450,6 @@ static bool pku_disabled;
+
+ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
+ {
+- struct pkru_state *pk;
+-
+ /* check the boot processor, plus compile options for PKU: */
+ if (!cpu_feature_enabled(X86_FEATURE_PKU))
+ return;
+@@ -462,9 +460,6 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
+ return;
+
+ cr4_set_bits(X86_CR4_PKE);
+- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
+- if (pk)
+- pk->pkru = init_pkru_value;
+ /*
+ * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
+ * cpuid bit to be set. We need to ensure that we
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 2ee3da99bc1d7..8dd1d1c81c791 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -226,8 +226,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+
+ u64 __read_mostly host_xcr0;
+
+-extern bool gds_ucode_mitigated(void);
+-
+ struct kmem_cache *x86_fpu_cache;
+ EXPORT_SYMBOL_GPL(x86_fpu_cache);
+
+diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
+index c6f84c0b5d7a5..ca77af96033b9 100644
+--- a/arch/x86/mm/pkeys.c
++++ b/arch/x86/mm/pkeys.c
+@@ -10,7 +10,6 @@
+
+ #include <asm/cpufeature.h> /* boot_cpu_has, ... */
+ #include <asm/mmu_context.h> /* vma_pkey() */
+-#include <asm/fpu/internal.h> /* init_fpstate */
+
+ int __execute_only_pkey(struct mm_struct *mm)
+ {
+@@ -154,7 +153,6 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
+ static ssize_t init_pkru_write_file(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+ {
+- struct pkru_state *pk;
+ char buf[32];
+ ssize_t len;
+ u32 new_init_pkru;
+@@ -177,10 +175,6 @@ static ssize_t init_pkru_write_file(struct file *file,
+ return -EINVAL;
+
+ WRITE_ONCE(init_pkru_value, new_init_pkru);
+- pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
+- if (!pk)
+- return -EINVAL;
+- pk->pkru = new_init_pkru;
+ return count;
+ }
+
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 1c39cd12b755a..233c313376797 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -6555,6 +6555,7 @@ err_init_binder_device_failed:
+
+ err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
++ binder_alloc_shrinker_exit();
+
+ return ret;
+ }
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index 7e48ed7c9c8e8..f766e889d2416 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -1037,6 +1037,12 @@ int binder_alloc_shrinker_init(void)
+ return ret;
+ }
+
++void binder_alloc_shrinker_exit(void)
++{
++ unregister_shrinker(&binder_shrinker);
++ list_lru_destroy(&binder_alloc_lru);
++}
++
+ /**
+ * check_buffer() - verify that buffer/offset is safe to access
+ * @alloc: binder_alloc for this proc
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index 288d0f478aa38..02a19afd9506c 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -122,6 +122,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ int is_async);
+ extern void binder_alloc_init(struct binder_alloc *alloc);
+ extern int binder_alloc_shrinker_init(void);
++extern void binder_alloc_shrinker_exit(void);
+ extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+ extern struct binder_buffer *
+ binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
+index e12b754e6398d..60d3c5f09ad67 100644
+--- a/drivers/dma/mcf-edma.c
++++ b/drivers/dma/mcf-edma.c
+@@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- chans = pdata->dma_channels;
++ if (!pdata->dma_channels) {
++ dev_info(&pdev->dev, "setting default channel number to 64");
++ chans = 64;
++ } else {
++ chans = pdata->dma_channels;
++ }
++
+ len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+ mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!mcf_edma)
+@@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
+ mcf_edma->drvdata = &mcf_data;
+ mcf_edma->big_endian = 1;
+
+- if (!mcf_edma->n_chans) {
+- dev_info(&pdev->dev, "setting default channel number to 64");
+- mcf_edma->n_chans = 64;
+- }
+-
+ mutex_init(&mcf_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 3008ab258fa8e..1d7d4b8d810a5 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -402,6 +402,12 @@ enum desc_status {
+ * of a channel can be BUSY at any time.
+ */
+ BUSY,
++ /*
++ * Pause was called while descriptor was BUSY. Due to hardware
++ * limitations, only termination is possible for descriptors
++ * that have been paused.
++ */
++ PAUSED,
+ /*
+ * Sitting on the channel work_list but xfer done
+ * by PL330 core
+@@ -2035,7 +2041,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
+ list_for_each_entry(desc, &pch->work_list, node) {
+
+ /* If already submitted */
+- if (desc->status == BUSY)
++ if (desc->status == BUSY || desc->status == PAUSED)
+ continue;
+
+ ret = pl330_submit_req(pch->thread, desc);
+@@ -2322,6 +2328,7 @@ static int pl330_pause(struct dma_chan *chan)
+ {
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ struct pl330_dmac *pl330 = pch->dmac;
++ struct dma_pl330_desc *desc;
+ unsigned long flags;
+
+ pm_runtime_get_sync(pl330->ddma.dev);
+@@ -2331,6 +2338,10 @@ static int pl330_pause(struct dma_chan *chan)
+ _stop(pch->thread);
+ spin_unlock(&pl330->lock);
+
++ list_for_each_entry(desc, &pch->work_list, node) {
++ if (desc->status == BUSY)
++ desc->status = PAUSED;
++ }
+ spin_unlock_irqrestore(&pch->lock, flags);
+ pm_runtime_mark_last_busy(pl330->ddma.dev);
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
+@@ -2421,7 +2432,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ else if (running && desc == running)
+ transferred =
+ pl330_get_current_xferred_count(pch, desc);
+- else if (desc->status == BUSY)
++ else if (desc->status == BUSY || desc->status == PAUSED)
+ /*
+ * Busy but not running means either just enqueued,
+ * or finished and not yet marked done
+@@ -2438,6 +2449,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ case DONE:
+ ret = DMA_COMPLETE;
+ break;
++ case PAUSED:
++ ret = DMA_PAUSED;
++ break;
+ case PREP:
+ case BUSY:
+ ret = DMA_IN_PROGRESS;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 21a36cf889746..4e28e89b51dc0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -955,7 +955,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
++ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
+ /* Find the native mode if this is a digital panel, if we didn't
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+index 478b4723d0f9c..c88ceb486f3b3 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+@@ -121,6 +121,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
+
+ extern const struct gf100_grctx_func gk110_grctx;
+ void gk110_grctx_generate_r419eb0(struct gf100_gr *);
++void gk110_grctx_generate_r419f78(struct gf100_gr *);
+
+ extern const struct gf100_grctx_func gk110b_grctx;
+ extern const struct gf100_grctx_func gk208_grctx;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+index 304e9d268bad4..f894f82548242 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+@@ -916,7 +916,9 @@ static void
+ gk104_grctx_generate_r419f78(struct gf100_gr *gr)
+ {
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+- nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
++
++ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
++ nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
+ }
+
+ void
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+index 86547cfc38dce..e88740d4e54d4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+@@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
+ nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
+ }
+
++void
++gk110_grctx_generate_r419f78(struct gf100_gr *gr)
++{
++ struct nvkm_device *device = gr->base.engine.subdev.device;
++
++ /* bit 3 set disables loads in fp helper invocations, we need it enabled */
++ nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
++}
++
+ const struct gf100_grctx_func
+ gk110_grctx = {
+ .main = gf100_grctx_generate_main,
+@@ -852,4 +861,5 @@ gk110_grctx = {
+ .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ .r418800 = gk104_grctx_generate_r418800,
+ .r419eb0 = gk110_grctx_generate_r419eb0,
++ .r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+index ebb947bd1446b..086e4d49e1121 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+@@ -101,4 +101,5 @@ gk110b_grctx = {
+ .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ .r418800 = gk104_grctx_generate_r418800,
+ .r419eb0 = gk110_grctx_generate_r419eb0,
++ .r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+index 4d40512b5c998..0bf438c3f7cbc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+@@ -566,4 +566,5 @@ gk208_grctx = {
+ .dist_skip_table = gf117_grctx_generate_dist_skip_table,
+ .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ .r418800 = gk104_grctx_generate_r418800,
++ .r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+index 0b3964e6b36e2..acdf0932a99e1 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+@@ -991,4 +991,5 @@ gm107_grctx = {
+ .r406500 = gm107_grctx_generate_r406500,
+ .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
+ .r419e00 = gm107_grctx_generate_r419e00,
++ .r419f78 = gk110_grctx_generate_r419f78,
+ };
+diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+index b4f394f058636..44553eccb30d0 100644
+--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
++++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+@@ -99,7 +99,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
+ platform_set_drvdata(pdev, indio_dev);
+
+ state->ec = ec->ec_dev;
+- state->msg = devm_kzalloc(&pdev->dev,
++ state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) +
+ max((u16)sizeof(struct ec_params_motion_sense),
+ state->ec->max_response), GFP_KERNEL);
+ if (!state->msg)
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 10924f1220720..65d6bf34614c8 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -12191,6 +12191,7 @@ static void free_cntrs(struct hfi1_devdata *dd)
+
+ if (dd->synth_stats_timer.function)
+ del_timer_sync(&dd->synth_stats_timer);
++ cancel_work_sync(&dd->update_cntr_work);
+ ppd = (struct hfi1_pportdata *)(dd + 1);
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ kfree(ppd->cntrs);
+diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
+index fa09d511a8eda..baf31258f5c90 100644
+--- a/drivers/isdn/mISDN/dsp.h
++++ b/drivers/isdn/mISDN/dsp.h
+@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
+ extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
+ extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
+ extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
+-extern void dsp_cmx_send(void *arg);
++extern void dsp_cmx_send(struct timer_list *arg);
+ extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
+ extern int dsp_cmx_del_conf_member(struct dsp *dsp);
+ extern int dsp_cmx_del_conf(struct dsp_conf *conf);
+diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
+index 6d2088fbaf69c..1b73af5013976 100644
+--- a/drivers/isdn/mISDN/dsp_cmx.c
++++ b/drivers/isdn/mISDN/dsp_cmx.c
+@@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */
+ static int dsp_count_valid; /* if we have last sample count */
+
+ void
+-dsp_cmx_send(void *arg)
++dsp_cmx_send(struct timer_list *arg)
+ {
+ struct dsp_conf *conf;
+ struct dsp_conf_member *member;
+diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
+index 038e72a84b334..5b954012e3948 100644
+--- a/drivers/isdn/mISDN/dsp_core.c
++++ b/drivers/isdn/mISDN/dsp_core.c
+@@ -1200,7 +1200,7 @@ static int __init dsp_init(void)
+ }
+
+ /* set sample timer */
+- timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
++ timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
+ dsp_spl_tl.expires = jiffies + dsp_tics;
+ dsp_spl_jiffies = dsp_spl_tl.expires;
+ add_timer(&dsp_spl_tl);
+diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
+index 52307dce08ba8..0c392b41a858a 100644
+--- a/drivers/mmc/host/moxart-mmc.c
++++ b/drivers/mmc/host/moxart-mmc.c
+@@ -339,13 +339,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
+ return;
+ }
+ for (len = 0; len < remain && len < host->fifo_width;) {
+- /* SCR data must be read in big endian. */
+- if (data->mrq->cmd->opcode == SD_APP_SEND_SCR)
+- *sgp = ioread32be(host->base +
+- REG_DATA_WINDOW);
+- else
+- *sgp = ioread32(host->base +
+- REG_DATA_WINDOW);
++ *sgp = ioread32(host->base + REG_DATA_WINDOW);
+ sgp++;
+ len += 4;
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index dcaefb47d1f2d..afd327e88cf5e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4451,7 +4451,9 @@ void bond_setup(struct net_device *bond_dev)
+
+ bond_dev->hw_features = BOND_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_CTAG_RX |
+- NETIF_F_HW_VLAN_CTAG_FILTER;
++ NETIF_F_HW_VLAN_CTAG_FILTER |
++ NETIF_F_HW_VLAN_STAG_RX |
++ NETIF_F_HW_VLAN_STAG_FILTER;
+
+ bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ bond_dev->features |= bond_dev->hw_features;
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index bc313d85fe13a..ce1ab8d59eb00 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -873,12 +873,22 @@ static int ibmvnic_login(struct net_device *netdev)
+
+ static void release_login_buffer(struct ibmvnic_adapter *adapter)
+ {
++ if (!adapter->login_buf)
++ return;
++
++ dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
++ adapter->login_buf_sz, DMA_TO_DEVICE);
+ kfree(adapter->login_buf);
+ adapter->login_buf = NULL;
+ }
+
+ static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
+ {
++ if (!adapter->login_rsp_buf)
++ return;
++
++ dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
++ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
+ kfree(adapter->login_rsp_buf);
+ adapter->login_rsp_buf = NULL;
+ }
+@@ -4298,11 +4308,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
+ struct ibmvnic_login_buffer *login = adapter->login_buf;
+ int i;
+
+- dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
+- DMA_TO_DEVICE);
+- dma_unmap_single(dev, adapter->login_rsp_buf_token,
+- adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
+-
+ /* If the number of queues requested can't be allocated by the
+ * server, the login response will return with code 1. We will need
+ * to resend the login buffer with fewer queues requested.
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index 61fcfd8b39b4a..7fa805bb0e38c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -211,8 +211,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
+ host_total_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_total_vfs);
+ kvfree(out);
+- if (host_total_vfs)
+- return host_total_vfs;
++ return host_total_vfs;
+ }
+
+ done:
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 309a0dd16bdc1..51cc8768d910c 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1672,7 +1672,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
+ if (zerocopy)
+ return false;
+
+- if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
++ if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+ return false;
+
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index d5d7b2f98edc9..6de507d88f5f6 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -905,6 +905,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ goto out_cleanup_connect_q;
+
+ if (!new) {
++ nvme_start_freeze(&ctrl->ctrl);
+ nvme_start_queues(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+@@ -913,6 +914,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ * to be safe.
+ */
+ ret = -ENODEV;
++ nvme_unfreeze(&ctrl->ctrl);
+ goto out_wait_freeze_timed_out;
+ }
+ blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
+@@ -958,7 +960,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+ {
+ if (ctrl->ctrl.queue_count > 1) {
+- nvme_start_freeze(&ctrl->ctrl);
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 4250081595c14..61296032ce6de 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1707,6 +1707,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ goto out_cleanup_connect_q;
+
+ if (!new) {
++ nvme_start_freeze(ctrl);
+ nvme_start_queues(ctrl);
+ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ /*
+@@ -1715,6 +1716,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ * to be safe.
+ */
+ ret = -ENODEV;
++ nvme_unfreeze(ctrl);
+ goto out_wait_freeze_timed_out;
+ }
+ blk_mq_update_nr_hw_queues(ctrl->tagset,
+@@ -1837,7 +1839,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ if (ctrl->queue_count <= 1)
+ return;
+ blk_mq_quiesce_queue(ctrl->admin_q);
+- nvme_start_freeze(ctrl);
+ nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
+index 0068963bb933b..0b7e9b1a89466 100644
+--- a/drivers/scsi/53c700.c
++++ b/drivers/scsi/53c700.c
+@@ -1581,7 +1581,7 @@ NCR_700_intr(int irq, void *dev_id)
+ printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
+ #endif
+ resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
+- } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
++ } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
+ dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
+ int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
+ int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index 898a0bdf8df67..711252e52d8e1 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
+ return 0;
+
+ err_out:
++ put_device(&rc->dev);
+ list_del(&rc->node);
+ rd->component_count--;
+ put_device(component_dev);
+diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
+index 5b313226f11c6..0330890cc55d7 100644
+--- a/drivers/scsi/scsi_proc.c
++++ b/drivers/scsi/scsi_proc.c
+@@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *ppos)
+ {
+ int host, channel, id, lun;
+- char *buffer, *p;
++ char *buffer, *end, *p;
+ int err;
+
+ if (!buf || length > PAGE_SIZE)
+@@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ goto out;
+
+ err = -EINVAL;
+- if (length < PAGE_SIZE)
+- buffer[length] = '\0';
+- else if (buffer[PAGE_SIZE-1])
+- goto out;
++ if (length < PAGE_SIZE) {
++ end = buffer + length;
++ *end = '\0';
++ } else {
++ end = buffer + PAGE_SIZE - 1;
++ if (*end)
++ goto out;
++ }
+
+ /*
+ * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
+@@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ if (!strncmp("scsi add-single-device", buffer, 22)) {
+ p = buffer + 23;
+
+- host = simple_strtoul(p, &p, 0);
+- channel = simple_strtoul(p + 1, &p, 0);
+- id = simple_strtoul(p + 1, &p, 0);
+- lun = simple_strtoul(p + 1, &p, 0);
++ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
++ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+
+ err = scsi_add_single_device(host, channel, id, lun);
+
+@@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
+ } else if (!strncmp("scsi remove-single-device", buffer, 25)) {
+ p = buffer + 26;
+
+- host = simple_strtoul(p, &p, 0);
+- channel = simple_strtoul(p + 1, &p, 0);
+- id = simple_strtoul(p + 1, &p, 0);
+- lun = simple_strtoul(p + 1, &p, 0);
++ host = (p < end) ? simple_strtoul(p, &p, 0) : 0;
++ channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++ id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
++ lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0;
+
+ err = scsi_remove_single_device(host, channel, id, lun);
+ }
+diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
+index 7cf871323b2c4..c445853c623e2 100644
+--- a/drivers/scsi/snic/snic_disc.c
++++ b/drivers/scsi/snic/snic_disc.c
+@@ -317,6 +317,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
+ "Snic Tgt: device_add, with err = %d\n",
+ ret);
+
++ put_device(&tgt->dev);
+ put_device(&snic->shost->shost_gendev);
+ spin_lock_irqsave(snic->shost->host_lock, flags);
+ list_del(&tgt->list);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 8d1b19b2322f5..823088c7b199e 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1526,10 +1526,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ */
+ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+ {
+-#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+- if (scmnd->device->host->transportt == fc_transport_template)
+- return fc_eh_timed_out(scmnd);
+-#endif
+ return BLK_EH_RESET_TIMER;
+ }
+
+diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
+index ed204cbb63ea1..67d00d0ef621c 100644
+--- a/drivers/usb/common/usb-conn-gpio.c
++++ b/drivers/usb/common/usb-conn-gpio.c
+@@ -38,6 +38,7 @@ struct usb_conn_info {
+ struct gpio_desc *vbus_gpiod;
+ int id_irq;
+ int vbus_irq;
++ bool initial_detection;
+ };
+
+ /**
+@@ -82,11 +83,13 @@ static void usb_conn_detect_cable(struct work_struct *work)
+ dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n",
+ info->last_role, role, id, vbus);
+
+- if (info->last_role == role) {
++ if (!info->initial_detection && info->last_role == role) {
+ dev_warn(info->dev, "repeated role: %d\n", role);
+ return;
+ }
+
++ info->initial_detection = false;
++
+ if (info->last_role == USB_ROLE_HOST)
+ regulator_disable(info->vbus);
+
+@@ -206,6 +209,7 @@ static int usb_conn_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, info);
+
+ /* Perform initial detection */
++ info->initial_detection = true;
+ usb_conn_queue_dwork(info, 0);
+
+ return 0;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index bda9fd7157e47..73d3408c6a65e 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3589,9 +3589,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
+ u32 reg;
+
+ if (pm_runtime_suspended(dwc->dev)) {
++ dwc->pending_events = true;
++ /*
++ * Trigger runtime resume. The get() function will be balanced
++ * after processing the pending events in dwc3_process_pending
++ * events().
++ */
+ pm_runtime_get(dwc->dev);
+ disable_irq_nosync(dwc->irq_gadget);
+- dwc->pending_events = true;
+ return IRQ_HANDLED;
+ }
+
+@@ -3827,6 +3832,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+ {
+ if (dwc->pending_events) {
+ dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
++ dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf);
++ pm_runtime_put(dwc->dev);
+ dwc->pending_events = false;
+ enable_irq(dwc->irq_gadget);
+ }
+diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
+index de62421d96709..dcc4778d1ae99 100644
+--- a/drivers/usb/storage/alauda.c
++++ b/drivers/usb/storage/alauda.c
+@@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data)
+ rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe,
+ command, 0xc0, 0, 1, data, 2);
+
+- usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
++ if (rc == USB_STOR_XFER_GOOD)
++ usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]);
+
+ return rc;
+ }
+@@ -454,10 +455,14 @@ static int alauda_init_media(struct us_data *us)
+ static int alauda_check_media(struct us_data *us)
+ {
+ struct alauda_info *info = (struct alauda_info *) us->extra;
+- unsigned char status[2];
++ unsigned char *status = us->iobuf;
+ int rc;
+
+ rc = alauda_get_media_status(us, status);
++ if (rc != USB_STOR_XFER_GOOD) {
++ status[0] = 0xF0; /* Pretend there's no media */
++ status[1] = 0;
++ }
+
+ /* Check for no media or door open */
+ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10)
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 19d2104c04629..e47f53e780890 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3989,8 +3989,11 @@ have_block_group:
+ ret = 0;
+ }
+
+- if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
++ if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
++ if (!cache_block_group_error)
++ cache_block_group_error = -EIO;
+ goto loop;
++ }
+
+ /*
+ * Ok we want to try and use the cluster allocator, so
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 95ddeb4777970..04788940afafc 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4024,11 +4024,12 @@ retry:
+ free_extent_buffer(eb);
+
+ /*
+- * the filesystem may choose to bump up nr_to_write.
++ * The filesystem may choose to bump up nr_to_write.
+ * We have to make sure to honor the new nr_to_write
+- * at any time
++ * at any time.
+ */
+- nr_to_write_done = wbc->nr_to_write <= 0;
++ nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
++ wbc->nr_to_write <= 0);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 53ec342eb787c..4eed9500f33a7 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -1112,9 +1112,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
+
+ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
+ {
++ struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
+ struct buffer_head *ibh;
+ int err;
+
++ /*
++ * Do not dirty inodes after the log writer has been detached
++ * and its nilfs_root struct has been freed.
++ */
++ if (unlikely(nilfs_purging(nilfs)))
++ return 0;
++
+ err = nilfs_load_inode_block(inode, &ibh);
+ if (unlikely(err)) {
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index d9e0b2b2b5552..04e1e671b6134 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2845,6 +2845,7 @@ void nilfs_detach_log_writer(struct super_block *sb)
+ nilfs_segctor_destroy(nilfs->ns_writer);
+ nilfs->ns_writer = NULL;
+ }
++ set_nilfs_purging(nilfs);
+
+ /* Force to free the list of dirty files */
+ spin_lock(&nilfs->ns_inode_lock);
+@@ -2857,4 +2858,5 @@ void nilfs_detach_log_writer(struct super_block *sb)
+ up_write(&nilfs->ns_segctor_sem);
+
+ nilfs_dispose_list(nilfs, &garbage_list, 1);
++ clear_nilfs_purging(nilfs);
+ }
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index 380a543c5b19b..de6e24d80eb65 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -29,6 +29,7 @@ enum {
+ THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
+ THE_NILFS_GC_RUNNING, /* gc process is running */
+ THE_NILFS_SB_DIRTY, /* super block is dirty */
++ THE_NILFS_PURGING, /* disposing dirty files for cleanup */
+ };
+
+ /**
+@@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init)
+ THE_NILFS_FNS(DISCONTINUED, discontinued)
+ THE_NILFS_FNS(GC_RUNNING, gc_running)
+ THE_NILFS_FNS(SB_DIRTY, sb_dirty)
++THE_NILFS_FNS(PURGING, purging)
+
+ /*
+ * Mount option operations
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 69b9ccbe1ad0f..2dfa3331604e4 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -436,6 +436,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ return NULL;
+
++ if (iftype == NL80211_IFTYPE_AP_VLAN)
++ iftype = NL80211_IFTYPE_AP;
++
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *data =
+ &sband->iftype_data[i];
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 7ab13f515749d..a2d1ec4aba1a0 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1013,6 +1013,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
+
+ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+
++static inline bool nft_use_inc(u32 *use)
++{
++ if (*use == UINT_MAX)
++ return false;
++
++ (*use)++;
++
++ return true;
++}
++
++static inline void nft_use_dec(u32 *use)
++{
++ WARN_ON_ONCE((*use)-- == 0);
++}
++
++/* For error and abort path: restore use counter to previous state. */
++static inline void nft_use_inc_restore(u32 *use)
++{
++ WARN_ON_ONCE(!nft_use_inc(use));
++}
++
++#define nft_use_dec_restore nft_use_dec
++
+ /**
+ * struct nft_table - nf_tables table
+ *
+@@ -1082,8 +1105,8 @@ struct nft_object {
+ struct list_head list;
+ struct rhlist_head rhlhead;
+ struct nft_object_hash_key key;
+- u32 genmask:2,
+- use:30;
++ u32 genmask:2;
++ u32 use;
+ u64 handle;
+ /* runtime data below here */
+ const struct nft_object_ops *ops ____cacheline_aligned;
+@@ -1185,8 +1208,8 @@ struct nft_flowtable {
+ int hooknum;
+ int priority;
+ int ops_len;
+- u32 genmask:2,
+- use:30;
++ u32 genmask:2;
++ u32 use;
+ u64 handle;
+ /* runtime data below here */
+ struct nf_hook_ops *ops ____cacheline_aligned;
+diff --git a/net/dccp/output.c b/net/dccp/output.c
+index 6433187a5cc44..7490b4f09bbba 100644
+--- a/net/dccp/output.c
++++ b/net/dccp/output.c
+@@ -185,7 +185,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
+
+ /* And store cached results */
+ icsk->icsk_pmtu_cookie = pmtu;
+- dp->dccps_mss_cache = cur_mps;
++ WRITE_ONCE(dp->dccps_mss_cache, cur_mps);
+
+ return cur_mps;
+ }
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 3a3a2e1800be7..cd868556452ec 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -644,7 +644,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
+ return dccp_getsockopt_service(sk, len,
+ (__be32 __user *)optval, optlen);
+ case DCCP_SOCKOPT_GET_CUR_MPS:
+- val = dp->dccps_mss_cache;
++ val = READ_ONCE(dp->dccps_mss_cache);
+ break;
+ case DCCP_SOCKOPT_AVAILABLE_CCIDS:
+ return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
+@@ -766,7 +766,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ trace_dccp_probe(sk, len);
+
+- if (len > dp->dccps_mss_cache)
++ if (len > READ_ONCE(dp->dccps_mss_cache))
+ return -EMSGSIZE;
+
+ lock_sock(sk);
+@@ -799,6 +799,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ goto out_discard;
+ }
+
++ /* We need to check dccps_mss_cache after socket is locked. */
++ if (len > dp->dccps_mss_cache) {
++ rc = -EMSGSIZE;
++ goto out_discard;
++ }
++
+ skb_reserve(skb, sk->sk_prot->max_header);
+ rc = memcpy_from_msg(skb_put(skb, len), msg, len);
+ if (rc != 0)
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 118e19cabb72b..74977ec77c576 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -196,7 +196,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
+ static inline int ndisc_is_useropt(const struct net_device *dev,
+ struct nd_opt_hdr *opt)
+ {
+- return opt->nd_opt_type == ND_OPT_RDNSS ||
++ return opt->nd_opt_type == ND_OPT_PREFIX_INFO ||
++ opt->nd_opt_type == ND_OPT_RDNSS ||
+ opt->nd_opt_type == ND_OPT_DNSSL ||
+ opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
+ ndisc_ops_is_useropt(dev, opt->nd_opt_type);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7d22bc8aa2787..a1a1f715fb624 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -282,7 +282,7 @@ static int nft_delchain(struct nft_ctx *ctx)
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+- ctx->table->use--;
++ nft_use_dec(&ctx->table->use);
+ nft_deactivate_next(ctx->net, ctx->chain);
+
+ return 0;
+@@ -323,7 +323,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
+ /* You cannot delete the same rule twice */
+ if (nft_is_active_next(ctx->net, rule)) {
+ nft_deactivate_next(ctx->net, rule);
+- ctx->chain->use--;
++ nft_use_dec(&ctx->chain->use);
+ return 0;
+ }
+ return -ENOENT;
+@@ -412,7 +412,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
+ return err;
+
+ nft_deactivate_next(ctx->net, set);
+- ctx->table->use--;
++ nft_use_dec(&ctx->table->use);
+
+ return err;
+ }
+@@ -444,7 +444,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
+ return err;
+
+ nft_deactivate_next(ctx->net, obj);
+- ctx->table->use--;
++ nft_use_dec(&ctx->table->use);
+
+ return err;
+ }
+@@ -478,7 +478,7 @@ static int nft_delflowtable(struct nft_ctx *ctx,
+ return err;
+
+ nft_deactivate_next(ctx->net, flowtable);
+- ctx->table->use--;
++ nft_use_dec(&ctx->table->use);
+
+ return err;
+ }
+@@ -1715,9 +1715,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ struct nft_rule **rules;
+ int err;
+
+- if (table->use == UINT_MAX)
+- return -EOVERFLOW;
+-
+ if (nla[NFTA_CHAIN_HOOK]) {
+ struct nft_chain_hook hook;
+ struct nf_hook_ops *ops;
+@@ -1794,6 +1791,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ if (err < 0)
+ goto err1;
+
++ if (!nft_use_inc(&table->use)) {
++ err = -EMFILE;
++ goto err_use;
++ }
++
+ err = rhltable_insert_key(&table->chains_ht, chain->name,
+ &chain->rhlhead, nft_chain_ht_params);
+ if (err)
+@@ -1811,11 +1813,12 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ if (nft_is_base_chain(chain))
+ nft_trans_chain_policy(trans) = policy;
+
+- table->use++;
+ list_add_tail_rcu(&chain->list, &table->chains);
+
+ return 0;
+ err2:
++ nft_use_dec_restore(&table->use);
++err_use:
+ nf_tables_unregister_hook(net, table, chain);
+ err1:
+ nf_tables_chain_destroy(ctx);
+@@ -2831,9 +2834,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ return -EINVAL;
+ handle = nf_tables_alloc_handle(table);
+
+- if (chain->use == UINT_MAX)
+- return -EOVERFLOW;
+-
+ if (nla[NFTA_RULE_POSITION]) {
+ pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+ old_rule = __nft_rule_lookup(chain, pos_handle);
+@@ -2915,16 +2915,21 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ expr = nft_expr_next(expr);
+ }
+
++ if (!nft_use_inc(&chain->use)) {
++ err = -EMFILE;
++ goto err2;
++ }
++
+ if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
+ if (trans == NULL) {
+ err = -ENOMEM;
+- goto err2;
++ goto err_destroy_flow_rule;
+ }
+ err = nft_delrule(&ctx, old_rule);
+ if (err < 0) {
+ nft_trans_destroy(trans);
+- goto err2;
++ goto err_destroy_flow_rule;
+ }
+
+ list_add_tail_rcu(&rule->list, &old_rule->list);
+@@ -2932,7 +2937,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
+ if (!trans) {
+ err = -ENOMEM;
+- goto err2;
++ goto err_destroy_flow_rule;
+ }
+
+ if (nlh->nlmsg_flags & NLM_F_APPEND) {
+@@ -2948,7 +2953,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ }
+ }
+ kvfree(info);
+- chain->use++;
+
+ if (nft_net->validate_state == NFT_VALIDATE_DO)
+ return nft_table_validate(net, table);
+@@ -2962,6 +2966,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ }
+
+ return 0;
++
++err_destroy_flow_rule:
++ nft_use_dec_restore(&chain->use);
+ err2:
+ nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR);
+ nf_tables_rule_destroy(&ctx, rule);
+@@ -3775,10 +3782,15 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ if (ops->privsize != NULL)
+ size = ops->privsize(nla, &desc);
+
++ if (!nft_use_inc(&table->use)) {
++ err = -EMFILE;
++ goto err1;
++ }
++
+ set = kvzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
+ if (!set) {
+ err = -ENOMEM;
+- goto err1;
++ goto err_alloc;
+ }
+
+ name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL);
+@@ -3825,7 +3837,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ goto err4;
+
+ list_add_tail_rcu(&set->list, &table->sets);
+- table->use++;
++
+ return 0;
+
+ err4:
+@@ -3834,6 +3846,8 @@ err3:
+ kfree(set->name);
+ err2:
+ kvfree(set);
++err_alloc:
++ nft_use_dec_restore(&table->use);
+ err1:
+ module_put(to_set_type(ops)->owner);
+ return err;
+@@ -3920,9 +3934,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *i;
+ struct nft_set_iter iter;
+
+- if (set->use == UINT_MAX)
+- return -EOVERFLOW;
+-
+ if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
+ return -EBUSY;
+
+@@ -3947,10 +3958,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ return iter.err;
+ }
+ bind:
++ if (!nft_use_inc(&set->use))
++ return -EMFILE;
++
+ binding->chain = ctx->chain;
+ list_add_tail_rcu(&binding->list, &set->bindings);
+ nft_set_trans_bind(ctx, set);
+- set->use++;
+
+ return 0;
+ }
+@@ -3974,7 +3987,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+ if (nft_set_is_anonymous(set))
+ nft_clear(ctx->net, set);
+
+- set->use++;
++ nft_use_inc_restore(&set->use);
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+
+@@ -3990,17 +4003,17 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ else
+ list_del_rcu(&binding->list);
+
+- set->use--;
++ nft_use_dec(&set->use);
+ break;
+ case NFT_TRANS_PREPARE:
+ if (nft_set_is_anonymous(set))
+ nft_deactivate_next(ctx->net, set);
+
+- set->use--;
++ nft_use_dec(&set->use);
+ return;
+ case NFT_TRANS_ABORT:
+ case NFT_TRANS_RELEASE:
+- set->use--;
++ nft_use_dec(&set->use);
+ /* fall through */
+ default:
+ nf_tables_unbind_set(ctx, set, binding,
+@@ -4585,7 +4598,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+ }
+ }
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+- (*nft_set_ext_obj(ext))->use--;
++ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
+ kfree(elem);
+ }
+ EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
+@@ -4706,8 +4719,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ set->objtype, genmask);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
++ obj = NULL;
++ goto err2;
++ }
++
++ if (!nft_use_inc(&obj->use)) {
++ err = -EMFILE;
++ obj = NULL;
+ goto err2;
+ }
++
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
+ }
+
+@@ -4772,10 +4793,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ udata->len = ulen - 1;
+ nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen);
+ }
+- if (obj) {
++ if (obj)
+ *nft_set_ext_obj(ext) = obj;
+- obj->use++;
+- }
+
+ trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+ if (trans == NULL)
+@@ -4821,13 +4840,14 @@ err6:
+ err5:
+ kfree(trans);
+ err4:
+- if (obj)
+- obj->use--;
+ kfree(elem.priv);
+ err3:
+ if (nla[NFTA_SET_ELEM_DATA] != NULL)
+ nft_data_release(&elem.data.val, desc.type);
+ err2:
++ if (obj)
++ nft_use_dec_restore(&obj->use);
++
+ nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+ err1:
+ return err;
+@@ -4887,11 +4907,14 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
+ */
+ void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
+ {
++ struct nft_chain *chain;
++
+ if (type == NFT_DATA_VERDICT) {
+ switch (data->verdict.code) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+- data->verdict.chain->use++;
++ chain = data->verdict.chain;
++ nft_use_inc_restore(&chain->use);
+ break;
+ }
+ }
+@@ -4906,7 +4929,7 @@ static void nft_set_elem_activate(const struct net *net,
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ nft_data_hold(nft_set_ext_data(ext), set->dtype);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+- (*nft_set_ext_obj(ext))->use++;
++ nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use);
+ }
+
+ static void nft_set_elem_deactivate(const struct net *net,
+@@ -4918,7 +4941,7 @@ static void nft_set_elem_deactivate(const struct net *net,
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ nft_data_release(nft_set_ext_data(ext), set->dtype);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
+- (*nft_set_ext_obj(ext))->use--;
++ nft_use_dec(&(*nft_set_ext_obj(ext))->use);
+ }
+
+ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
+@@ -5375,9 +5398,14 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
+
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
++ if (!nft_use_inc(&table->use))
++ return -EMFILE;
++
+ type = nft_obj_type_get(net, objtype);
+- if (IS_ERR(type))
+- return PTR_ERR(type);
++ if (IS_ERR(type)) {
++ err = PTR_ERR(type);
++ goto err_type;
++ }
+
+ obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
+ if (IS_ERR(obj)) {
+@@ -5403,7 +5431,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
+ goto err4;
+
+ list_add_tail_rcu(&obj->list, &table->objects);
+- table->use++;
++
+ return 0;
+ err4:
+ /* queued in transaction log */
+@@ -5417,6 +5445,9 @@ err2:
+ kfree(obj);
+ err1:
+ module_put(type->owner);
++err_type:
++ nft_use_dec_restore(&table->use);
++
+ return err;
+ }
+
+@@ -5761,7 +5792,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
+ case NFT_TRANS_PREPARE:
+ case NFT_TRANS_ABORT:
+ case NFT_TRANS_RELEASE:
+- flowtable->use--;
++ nft_use_dec(&flowtable->use);
+ /* fall through */
+ default:
+ return;
+@@ -5967,9 +5998,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+
+ nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
+
++ if (!nft_use_inc(&table->use))
++ return -EMFILE;
++
+ flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
+- if (!flowtable)
+- return -ENOMEM;
++ if (!flowtable) {
++ err = -ENOMEM;
++ goto flowtable_alloc;
++ }
+
+ flowtable->table = table;
+ flowtable->handle = nf_tables_alloc_handle(table);
+@@ -6023,7 +6059,6 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+ goto err6;
+
+ list_add_tail_rcu(&flowtable->list, &table->flowtables);
+- table->use++;
+
+ return 0;
+ err6:
+@@ -6041,6 +6076,9 @@ err2:
+ kfree(flowtable->name);
+ err1:
+ kfree(flowtable);
++flowtable_alloc:
++ nft_use_dec_restore(&table->use);
++
+ return err;
+ }
+
+@@ -7035,7 +7073,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ */
+ if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+ !list_empty(&nft_trans_set(trans)->bindings))
+- trans->ctx.table->use--;
++ nft_use_dec(&trans->ctx.table->use);
+
+ nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+ NFT_MSG_NEWSET, GFP_KERNEL);
+@@ -7188,7 +7226,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ kfree(nft_trans_chain_name(trans));
+ nft_trans_destroy(trans);
+ } else {
+- trans->ctx.table->use--;
++ nft_use_dec_restore(&trans->ctx.table->use);
+ nft_chain_del(trans->ctx.chain);
+ nf_tables_unregister_hook(trans->ctx.net,
+ trans->ctx.table,
+@@ -7196,25 +7234,25 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ }
+ break;
+ case NFT_MSG_DELCHAIN:
+- trans->ctx.table->use++;
++ nft_use_inc_restore(&trans->ctx.table->use);
+ nft_clear(trans->ctx.net, trans->ctx.chain);
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_NEWRULE:
+- trans->ctx.chain->use--;
++ nft_use_dec_restore(&trans->ctx.chain->use);
+ list_del_rcu(&nft_trans_rule(trans)->list);
+ nft_rule_expr_deactivate(&trans->ctx,
+ nft_trans_rule(trans),
+ NFT_TRANS_ABORT);
+ break;
+ case NFT_MSG_DELRULE:
+- trans->ctx.chain->use++;
++ nft_use_inc_restore(&trans->ctx.chain->use);
+ nft_clear(trans->ctx.net, nft_trans_rule(trans));
+ nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_NEWSET:
+- trans->ctx.table->use--;
++ nft_use_dec_restore(&trans->ctx.table->use);
+ if (nft_trans_set_bound(trans)) {
+ nft_trans_destroy(trans);
+ break;
+@@ -7222,7 +7260,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ list_del_rcu(&nft_trans_set(trans)->list);
+ break;
+ case NFT_MSG_DELSET:
+- trans->ctx.table->use++;
++ nft_use_inc_restore(&trans->ctx.table->use);
+ nft_clear(trans->ctx.net, nft_trans_set(trans));
+ nft_trans_destroy(trans);
+ break;
+@@ -7249,23 +7287,23 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
+ nft_trans_destroy(trans);
+ } else {
+- trans->ctx.table->use--;
++ nft_use_dec_restore(&trans->ctx.table->use);
+ nft_obj_del(nft_trans_obj(trans));
+ }
+ break;
+ case NFT_MSG_DELOBJ:
+- trans->ctx.table->use++;
++ nft_use_inc_restore(&trans->ctx.table->use);
+ nft_clear(trans->ctx.net, nft_trans_obj(trans));
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_NEWFLOWTABLE:
+- trans->ctx.table->use--;
++ nft_use_dec_restore(&trans->ctx.table->use);
+ list_del_rcu(&nft_trans_flowtable(trans)->list);
+ nft_unregister_flowtable_net_hooks(net,
+ nft_trans_flowtable(trans));
+ break;
+ case NFT_MSG_DELFLOWTABLE:
+- trans->ctx.table->use++;
++ nft_use_inc_restore(&trans->ctx.table->use);
+ nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+ nft_trans_destroy(trans);
+ break;
+@@ -7685,8 +7723,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+ return PTR_ERR(chain);
+ if (nft_is_base_chain(chain))
+ return -EOPNOTSUPP;
++ if (!nft_use_inc(&chain->use))
++ return -EMFILE;
+
+- chain->use++;
+ data->verdict.chain = chain;
+ break;
+ }
+@@ -7698,10 +7737,13 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+
+ static void nft_verdict_uninit(const struct nft_data *data)
+ {
++ struct nft_chain *chain;
++
+ switch (data->verdict.code) {
+ case NFT_JUMP:
+ case NFT_GOTO:
+- data->verdict.chain->use--;
++ chain = data->verdict.chain;
++ nft_use_dec(&chain->use);
+ break;
+ }
+ }
+@@ -7855,11 +7897,11 @@ int __nft_release_basechain(struct nft_ctx *ctx)
+ nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
+ list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
+ list_del(&rule->list);
+- ctx->chain->use--;
++ nft_use_dec(&ctx->chain->use);
+ nf_tables_rule_release(ctx, rule);
+ }
+ nft_chain_del(ctx->chain);
+- ctx->table->use--;
++ nft_use_dec(&ctx->table->use);
+ nf_tables_chain_destroy(ctx);
+
+ return 0;
+@@ -7896,29 +7938,29 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
+ ctx.chain = chain;
+ list_for_each_entry_safe(rule, nr, &chain->rules, list) {
+ list_del(&rule->list);
+- chain->use--;
++ nft_use_dec(&chain->use);
+ nf_tables_rule_release(&ctx, rule);
+ }
+ }
+ list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+ list_del(&flowtable->list);
+- table->use--;
++ nft_use_dec(&table->use);
+ nf_tables_flowtable_destroy(flowtable);
+ }
+ list_for_each_entry_safe(set, ns, &table->sets, list) {
+ list_del(&set->list);
+- table->use--;
++ nft_use_dec(&table->use);
+ nft_set_destroy(set);
+ }
+ list_for_each_entry_safe(obj, ne, &table->objects, list) {
+ nft_obj_del(obj);
+- table->use--;
++ nft_use_dec(&table->use);
+ nft_obj_destroy(&ctx, obj);
+ }
+ list_for_each_entry_safe(chain, nc, &table->chains, list) {
+ ctx.chain = chain;
+ nft_chain_del(chain);
+- table->use--;
++ nft_use_dec(&table->use);
+ nf_tables_chain_destroy(&ctx);
+ }
+ list_del(&table->list);
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index ff5ac173e8979..ca5d55a1d7d9c 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -171,8 +171,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
+ if (IS_ERR(flowtable))
+ return PTR_ERR(flowtable);
+
++ if (!nft_use_inc(&flowtable->use))
++ return -EMFILE;
++
+ priv->flowtable = flowtable;
+- flowtable->use++;
+
+ return nf_ct_netns_get(ctx->net, ctx->family);
+ }
+@@ -191,7 +193,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
+ {
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+- priv->flowtable->use++;
++ nft_use_inc_restore(&priv->flowtable->use);
+ }
+
+ static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 7032b80592b20..f5028479c0287 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
+ if (IS_ERR(obj))
+ return -ENOENT;
+
++ if (!nft_use_inc(&obj->use))
++ return -EMFILE;
++
+ nft_objref_priv(expr) = obj;
+- obj->use++;
+
+ return 0;
+ }
+@@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
+ if (phase == NFT_TRANS_COMMIT)
+ return;
+
+- obj->use--;
++ nft_use_dec(&obj->use);
+ }
+
+ static void nft_objref_activate(const struct nft_ctx *ctx,
+@@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
+ {
+ struct nft_object *obj = nft_objref_priv(expr);
+
+- obj->use++;
++ nft_use_inc_restore(&obj->use);
+ }
+
+ static struct nft_expr_type nft_objref_type;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index fbe3434dcdc1e..0582abf5ab710 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -363,18 +363,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
+ {
+ union tpacket_uhdr h;
+
++ /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
++
+ h.raw = frame;
+ switch (po->tp_version) {
+ case TPACKET_V1:
+- h.h1->tp_status = status;
++ WRITE_ONCE(h.h1->tp_status, status);
+ flush_dcache_page(pgv_to_page(&h.h1->tp_status));
+ break;
+ case TPACKET_V2:
+- h.h2->tp_status = status;
++ WRITE_ONCE(h.h2->tp_status, status);
+ flush_dcache_page(pgv_to_page(&h.h2->tp_status));
+ break;
+ case TPACKET_V3:
+- h.h3->tp_status = status;
++ WRITE_ONCE(h.h3->tp_status, status);
+ flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+ break;
+ default:
+@@ -391,17 +393,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame)
+
+ smp_rmb();
+
++ /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
++
+ h.raw = frame;
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ flush_dcache_page(pgv_to_page(&h.h1->tp_status));
+- return h.h1->tp_status;
++ return READ_ONCE(h.h1->tp_status);
+ case TPACKET_V2:
+ flush_dcache_page(pgv_to_page(&h.h2->tp_status));
+- return h.h2->tp_status;
++ return READ_ONCE(h.h2->tp_status);
+ case TPACKET_V3:
+ flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+- return h.h3->tp_status;
++ return READ_ONCE(h.h3->tp_status);
+ default:
+ WARN(1, "TPACKET version not supported.\n");
+ BUG();
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 69034c8cc3b86..265a02b6ad099 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -773,12 +773,10 @@ static void dist_free(struct disttable *d)
+ * signed 16 bit values.
+ */
+
+-static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+- const struct nlattr *attr)
++static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
+ {
+ size_t n = nla_len(attr)/sizeof(__s16);
+ const __s16 *data = nla_data(attr);
+- spinlock_t *root_lock;
+ struct disttable *d;
+ int i;
+
+@@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+ for (i = 0; i < n; i++)
+ d->table[i] = data[i];
+
+- root_lock = qdisc_root_sleeping_lock(sch);
+-
+- spin_lock_bh(root_lock);
+- swap(*tbl, d);
+- spin_unlock_bh(root_lock);
+-
+- dist_free(d);
++ *tbl = d;
+ return 0;
+ }
+
+@@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ {
+ struct netem_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_NETEM_MAX + 1];
++ struct disttable *delay_dist = NULL;
++ struct disttable *slot_dist = NULL;
+ struct tc_netem_qopt *qopt;
+ struct clgstate old_clg;
+ int old_loss_model = CLG_RANDOM;
+@@ -969,6 +963,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ if (ret < 0)
+ return ret;
+
++ if (tb[TCA_NETEM_DELAY_DIST]) {
++ ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
++ if (ret)
++ goto table_free;
++ }
++
++ if (tb[TCA_NETEM_SLOT_DIST]) {
++ ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
++ if (ret)
++ goto table_free;
++ }
++
+ sch_tree_lock(sch);
+ /* backup q->clg and q->loss_model */
+ old_clg = q->clg;
+@@ -978,26 +984,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+ ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+ if (ret) {
+ q->loss_model = old_loss_model;
++ q->clg = old_clg;
+ goto unlock;
+ }
+ } else {
+ q->loss_model = CLG_RANDOM;
+ }
+
+- if (tb[TCA_NETEM_DELAY_DIST]) {
+- ret = get_dist_table(sch, &q->delay_dist,
+- tb[TCA_NETEM_DELAY_DIST]);
+- if (ret)
+- goto get_table_failure;
+- }
+-
+- if (tb[TCA_NETEM_SLOT_DIST]) {
+- ret = get_dist_table(sch, &q->slot_dist,
+- tb[TCA_NETEM_SLOT_DIST]);
+- if (ret)
+- goto get_table_failure;
+- }
+-
++ if (delay_dist)
++ swap(q->delay_dist, delay_dist);
++ if (slot_dist)
++ swap(q->slot_dist, slot_dist);
+ sch->limit = qopt->limit;
+
+ q->latency = PSCHED_TICKS2NS(qopt->latency);
+@@ -1047,17 +1044,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+
+ unlock:
+ sch_tree_unlock(sch);
+- return ret;
+
+-get_table_failure:
+- /* recover clg and loss_model, in case of
+- * q->clg and q->loss_model were modified
+- * in get_loss_clg()
+- */
+- q->clg = old_clg;
+- q->loss_model = old_loss_model;
+-
+- goto unlock;
++table_free:
++ dist_free(delay_dist);
++ dist_free(slot_dist);
++ return ret;
+ }
+
+ static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
+index a61c7bcbc72da..63f468bf8245c 100644
+--- a/tools/testing/radix-tree/regression1.c
++++ b/tools/testing/radix-tree/regression1.c
+@@ -177,7 +177,7 @@ void regression1_test(void)
+ nr_threads = 2;
+ pthread_barrier_init(&worker_barrier, NULL, nr_threads);
+
+- threads = malloc(nr_threads * sizeof(pthread_t *));
++ threads = malloc(nr_threads * sizeof(*threads));
+
+ for (i = 0; i < nr_threads; i++) {
+ arg = i;
+diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
+index 215e1067f0376..82ceca6aab965 100644
+--- a/tools/testing/selftests/rseq/Makefile
++++ b/tools/testing/selftests/rseq/Makefile
+@@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+
++top_srcdir = ../../../..
++
+ CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
+- $(CLANG_FLAGS)
++ $(CLANG_FLAGS) -I$(top_srcdir)/tools/include
+ LDLIBS += -lpthread -ldl
+
+ # Own dependencies because we only want to build against 1st prerequisite, but
+diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
+index b736a5169aad0..e20191fb40d49 100644
+--- a/tools/testing/selftests/rseq/rseq.c
++++ b/tools/testing/selftests/rseq/rseq.c
+@@ -29,6 +29,8 @@
+ #include <dlfcn.h>
+ #include <stddef.h>
+
++#include <linux/compiler.h>
++
+ #include "../kselftest.h"
+ #include "rseq.h"
+