diff options
author | Alice Ferrazzi <alicef@gentoo.org> | 2023-04-20 20:17:13 +0900 |
---|---|---|
committer | Alice Ferrazzi <alicef@gentoo.org> | 2023-04-20 20:17:13 +0900 |
commit | 3eb5aeeab04f3ee6f5a28624465d859027f74799 (patch) | |
tree | 96f36ebdeced06748d7687e4a37bbecf8a6967f8 | |
parent | Linux patch 5.4.240 (diff) | |
download | linux-patches-3eb5aeeab04f3ee6f5a28624465d859027f74799.tar.gz linux-patches-3eb5aeeab04f3ee6f5a28624465d859027f74799.tar.bz2 linux-patches-3eb5aeeab04f3ee6f5a28624465d859027f74799.zip |
Linux patch 5.4.2415.4-247
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1240_linux-5.4.241.patch | 3476 |
2 files changed, 3480 insertions, 0 deletions
diff --git a/0000_README b/0000_README index fa0e5af9..c3c10ecc 100644 --- a/0000_README +++ b/0000_README @@ -1003,6 +1003,10 @@ Patch: 1239_linux-5.4.240.patch From: https://www.kernel.org Desc: Linux 5.4.240 +Patch: 1240_linux-5.4.241.patch +From: https://www.kernel.org +Desc: Linux 5.4.241 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1240_linux-5.4.241.patch b/1240_linux-5.4.241.patch new file mode 100644 index 00000000..c390717a --- /dev/null +++ b/1240_linux-5.4.241.patch @@ -0,0 +1,3476 @@ +diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst +index 4c91abad7b35c..71ac38739d274 100644 +--- a/Documentation/sound/hd-audio/models.rst ++++ b/Documentation/sound/hd-audio/models.rst +@@ -702,7 +702,7 @@ ref + no-jd + BIOS setup but without jack-detection + intel +- Intel DG45* mobos ++ Intel D*45* mobos + dell-m6-amic + Dell desktops/laptops with analog mics + dell-m6-dmic +diff --git a/Makefile b/Makefile +index ff3e24e55f565..d9bbe9524e082 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 4 +-SUBLEVEL = 240 ++SUBLEVEL = 241 + EXTRAVERSION = + NAME = Kleptomaniac Octopus + +diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c +index 8126f15b8e09a..6b019915b0c8f 100644 +--- a/arch/mips/lasat/picvue_proc.c ++++ b/arch/mips/lasat/picvue_proc.c +@@ -39,7 +39,7 @@ static void pvc_display(unsigned long data) + pvc_write_string(pvc_lines[i], 0, i); + } + +-static DECLARE_TASKLET(pvc_display_tasklet, &pvc_display, 0); ++static DECLARE_TASKLET_OLD(pvc_display_tasklet, &pvc_display); + + static int pvc_line_proc_show(struct seq_file *m, void *v) + { +diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c +index 9ea65611fba0b..fff04d2859765 100644 +--- a/arch/x86/kernel/sysfb_efi.c ++++ b/arch/x86/kernel/sysfb_efi.c +@@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { + "IdeaPad Duet 3 10IGL5"), + }, + }, ++ { ++ /* Lenovo Yoga Book X91F / X91L */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ /* Non exact match to match F + L versions */ ++ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), ++ }, ++ }, + {}, + }; + +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index 76959a7d88c82..94291e0ddcb7a 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -7,6 +7,7 @@ + #include <linux/dmi.h> + #include <linux/pci.h> + #include <linux/vgaarb.h> ++#include <asm/amd_nb.h> + #include <asm/hpet.h> + #include <asm/pci_x86.h> + +@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev) + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma); + + #endif ++ ++#ifdef CONFIG_AMD_NB ++ ++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008 ++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L ++ ++static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev) ++{ ++ u32 data; ++ ++ if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) { ++ data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK; ++ if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data)) ++ pci_err(dev, "Failed to write data 0x%x\n", data); ++ } else { ++ pci_err(dev, "Failed to read data\n"); ++ } ++} ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0); ++#endif +diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c +index ce49820caa97f..01e54450c846f 100644 +--- a/crypto/asymmetric_keys/pkcs7_verify.c ++++ b/crypto/asymmetric_keys/pkcs7_verify.c +@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7, + } + + if (sinfo->msgdigest_len != sig->digest_size) { +- pr_debug("Sig %u: Invalid digest size (%u)\n", +- sinfo->index, sinfo->msgdigest_len); ++ pr_warn("Sig %u: Invalid digest size (%u)\n", ++ sinfo->index, sinfo->msgdigest_len); + ret = -EBADMSG; + goto error; + } + + if (memcmp(sig->digest, sinfo->msgdigest, + sinfo->msgdigest_len) != 0) { +- pr_debug("Sig %u: Message digest doesn't match\n", +- sinfo->index); ++ pr_warn("Sig %u: Message digest doesn't match\n", ++ sinfo->index); + ret = -EKEYREJECTED; + goto error; + } +@@ -488,7 +488,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, + const void *data, size_t datalen) + { + if (pkcs7->data) { +- pr_debug("Data already supplied\n"); ++ pr_warn("Data already supplied\n"); + return -EINVAL; + } + pkcs7->data = data; +diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c +index cc9dbcecaacaa..0701bb161b63f 100644 +--- a/crypto/asymmetric_keys/verify_pefile.c ++++ b/crypto/asymmetric_keys/verify_pefile.c +@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, + break; + + default: +- pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic); ++ pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic); + return -ELIBBAD; + } + +@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen, + ctx->certs_size = ddir->certs.size; + + if (!ddir->certs.virtual_address || !ddir->certs.size) { +- pr_debug("Unsigned PE binary\n"); ++ pr_warn("Unsigned PE binary\n"); + return -ENODATA; + } + +@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, + unsigned len; + + if (ctx->sig_len < sizeof(wrapper)) { +- pr_debug("Signature wrapper too short\n"); ++ pr_warn("Signature wrapper too short\n"); + return -ELIBBAD; + } + +@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf, + pr_debug("sig wrapper = { %x, %x, %x }\n", + wrapper.length, wrapper.revision, wrapper.cert_type); + +- /* Both pesign and sbsign round up the length of certificate table +- * (in optional header data directories) to 8 byte alignment. ++ /* sbsign rounds up the length of certificate table (in optional ++ * header data directories) to 8 byte alignment. However, the PE ++ * specification states that while entries are 8-byte aligned, this is ++ * not included in their length, and as a result, pesign has not ++ * rounded up since 0.110. + */ +- if (round_up(wrapper.length, 8) != ctx->sig_len) { +- pr_debug("Signature wrapper len wrong\n"); ++ if (wrapper.length > ctx->sig_len) { ++ pr_warn("Signature wrapper bigger than sig len (%x > %x)\n", ++ ctx->sig_len, wrapper.length); + return -ELIBBAD; + } + if (wrapper.revision != WIN_CERT_REVISION_2_0) { +- pr_debug("Signature is not revision 2.0\n"); ++ pr_warn("Signature is not revision 2.0\n"); + return -ENOTSUPP; + } + if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) { +- pr_debug("Signature certificate type is not PKCS\n"); ++ pr_warn("Signature certificate type is not PKCS\n"); + return -ENOTSUPP; + } + +@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, + ctx->sig_offset += sizeof(wrapper); + ctx->sig_len -= sizeof(wrapper); + if (ctx->sig_len < 4) { +- pr_debug("Signature data missing\n"); ++ pr_warn("Signature data missing\n"); + return -EKEYREJECTED; + } + +@@ -194,7 +198,7 @@ check_len: + return 0; + } + not_pkcs7: +- pr_debug("Signature data not PKCS#7\n"); ++ pr_warn("Signature data not PKCS#7\n"); + return -ELIBBAD; + } + +@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, + digest_size = crypto_shash_digestsize(tfm); + + if (digest_size != ctx->digest_len) { +- pr_debug("Digest size mismatch (%zx != %x)\n", +- digest_size, ctx->digest_len); ++ pr_warn("Digest size mismatch (%zx != %x)\n", ++ digest_size, ctx->digest_len); + ret = -EBADMSG; + goto error_no_desc; + } +@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen, + * PKCS#7 certificate. + */ + if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { +- pr_debug("Digest mismatch\n"); ++ pr_warn("Digest mismatch\n"); + ret = -EKEYREJECTED; + } else { + pr_debug("The digests match!\n"); +diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c +index e0b0256896250..576cb2d0708f6 100644 +--- a/drivers/gpio/gpio-davinci.c ++++ b/drivers/gpio/gpio-davinci.c +@@ -333,7 +333,7 @@ static struct irq_chip gpio_irqchip = { + .irq_enable = gpio_irq_enable, + .irq_disable = gpio_irq_disable, + .irq_set_type = gpio_irq_type, +- .flags = IRQCHIP_SET_TYPE_MASKED, ++ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, + }; + + static void gpio_irq_handler(struct irq_desc *desc) +diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c +index 8768073794fbf..6106fa7c43028 100644 +--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c ++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c +@@ -284,10 +284,17 @@ static const struct dmi_system_id orientation_data[] = { + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, +- }, { /* Lenovo Yoga Book X90F / X91F / X91L */ ++ }, { /* Lenovo Yoga Book X90F / X90L */ + .matches = { +- /* Non exact match to match all versions */ +- DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), ++ }, ++ .driver_data = (void *)&lcd1200x1920_rightside_up, ++ }, { /* Lenovo Yoga Book X91F / X91L */ ++ .matches = { ++ /* Non exact match to match F + L versions */ ++ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* OneGX1 Pro */ +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c +index f1007c50565b6..b17f3022db5a4 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c +@@ -502,6 +502,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, + if (IS_ERR(pages[i])) { + mutex_unlock(&bo->base.pages_lock); + ret = PTR_ERR(pages[i]); ++ pages[i] = NULL; + goto err_pages; + } + } +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c +index 83dccdeef9069..da63e09fb0d59 100644 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c +@@ -156,7 +156,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) + writel_relaxed(config->ss_pe_cmp[i], + drvdata->base + TRCSSPCICRn(i)); + } +- for (i = 0; i < drvdata->nr_addr_cmp; i++) { ++ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { + writeq_relaxed(config->addr_val[i], + drvdata->base + TRCACVRn(i)); + writeq_relaxed(config->addr_acc[i], +diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c +index 13c17afe7102e..4fac2591b6618 100644 +--- a/drivers/i2c/busses/i2c-imx-lpi2c.c ++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c +@@ -468,6 +468,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter, + if (num == 1 && msgs[0].len == 0) + goto stop; + ++ lpi2c_imx->rx_buf = NULL; ++ lpi2c_imx->tx_buf = NULL; + lpi2c_imx->delivered = 0; + lpi2c_imx->msglen = msgs[i].len; + init_completion(&lpi2c_imx->complete); +diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c +index ca8b3ecfa93d1..1c3595c8a761a 100644 +--- a/drivers/i2c/busses/i2c-ocores.c ++++ b/drivers/i2c/busses/i2c-ocores.c +@@ -343,18 +343,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c) + * ocores_isr(), we just add our polling code around it. + * + * It can run in atomic context ++ * ++ * Return: 0 on success, -ETIMEDOUT on timeout + */ +-static void ocores_process_polling(struct ocores_i2c *i2c) ++static int ocores_process_polling(struct ocores_i2c *i2c) + { +- while (1) { +- irqreturn_t ret; +- int err; ++ irqreturn_t ret; ++ int err = 0; + ++ while (1) { + err = ocores_poll_wait(i2c); +- if (err) { +- i2c->state = STATE_ERROR; ++ if (err) + break; /* timeout */ +- } + + ret = ocores_isr(-1, i2c); + if (ret == IRQ_NONE) +@@ -365,13 +365,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c) + break; + } + } ++ ++ return err; + } + + static int ocores_xfer_core(struct ocores_i2c *i2c, + struct i2c_msg *msgs, int num, + bool polling) + { +- int ret; ++ int ret = 0; + u8 ctrl; + + ctrl = oc_getreg(i2c, OCI2C_CONTROL); +@@ -389,15 +391,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c, + oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START); + + if (polling) { +- ocores_process_polling(i2c); ++ ret = ocores_process_polling(i2c); + } else { +- ret = wait_event_timeout(i2c->wait, +- (i2c->state == STATE_ERROR) || +- (i2c->state == STATE_DONE), HZ); +- if (ret == 0) { +- ocores_process_timeout(i2c); +- return -ETIMEDOUT; +- } ++ if (wait_event_timeout(i2c->wait, ++ (i2c->state == STATE_ERROR) || ++ (i2c->state == STATE_DONE), HZ) == 0) ++ ret = -ETIMEDOUT; ++ } ++ if (ret) { ++ ocores_process_timeout(i2c); ++ return ret; + } + + return (i2c->state == STATE_DONE) ? num : -EIO; +diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c +index 7a1a9fe470728..1dee372e4199f 100644 +--- a/drivers/iio/adc/ti-ads7950.c ++++ b/drivers/iio/adc/ti-ads7950.c +@@ -635,6 +635,7 @@ static int ti_ads7950_probe(struct spi_device *spi) + st->chip.label = dev_name(&st->spi->dev); + st->chip.parent = &st->spi->dev; + st->chip.owner = THIS_MODULE; ++ st->chip.can_sleep = true; + st->chip.base = -1; + st->chip.ngpio = TI_ADS7950_NUM_GPIOS; + st->chip.get_direction = ti_ads7950_get_direction; +diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c +index 81677795e57a5..080a3721874d0 100644 +--- a/drivers/iio/dac/cio-dac.c ++++ b/drivers/iio/dac/cio-dac.c +@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev, + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + +- /* DAC can only accept up to a 16-bit value */ +- if ((unsigned int)val > 65535) ++ /* DAC can only accept up to a 12-bit value */ ++ if ((unsigned int)val > 4095) + return -EINVAL; + + priv->chan_out_states[chan->channel] = val; +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index 93a7ff1bd02c7..f61933dacbfd8 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -521,6 +521,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, + + ret = device->ops.create_ah(ah, ah_attr, flags, udata); + if (ret) { ++ if (ah->sgid_attr) ++ rdma_put_gid_attr(ah->sgid_attr); + kfree(ah); + return ERR_PTR(ret); + } +diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c +index c06b5322d470e..c103b44d3d666 100644 +--- a/drivers/mtd/mtdblock.c ++++ b/drivers/mtd/mtdblock.c +@@ -150,7 +150,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, + mtdblk->cache_state = STATE_EMPTY; + ret = mtd_read(mtd, sect_start, sect_size, + &retlen, mtdblk->cache_data); +- if (ret) ++ if (ret && !mtd_is_bitflip(ret)) + return ret; + if (retlen != sect_size) + return -EIO; +@@ -185,8 +185,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, + pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", + mtd->name, pos, len); + +- if (!sect_size) +- return mtd_read(mtd, pos, len, &retlen, buf); ++ if (!sect_size) { ++ ret = mtd_read(mtd, pos, len, &retlen, buf); ++ if (ret && !mtd_is_bitflip(ret)) ++ return ret; ++ return 0; ++ } + + while (len > 0) { + unsigned long sect_start = (pos/sect_size)*sect_size; +@@ -206,7 +210,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, + memcpy (buf, mtdblk->cache_data + offset, size); + } else { + ret = mtd_read(mtd, pos, size, &retlen, buf); +- if (ret) ++ if (ret && !mtd_is_bitflip(ret)) + return ret; + if (retlen != size) + return -EIO; +diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c +index 240b493abb86f..312738124ea10 100644 +--- a/drivers/mtd/nand/raw/meson_nand.c ++++ b/drivers/mtd/nand/raw/meson_nand.c +@@ -276,7 +276,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, + + if (raw) { + len = mtd->writesize + mtd->oobsize; +- cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); ++ cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + return; + } +@@ -540,7 +540,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len) + if (ret) + goto out; + +- cmd = NFC_CMD_N2M | (len & GENMASK(5, 0)); ++ cmd = NFC_CMD_N2M | (len & GENMASK(13, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); +@@ -564,7 +564,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) + if (ret) + return ret; + +- cmd = NFC_CMD_M2N | (len & GENMASK(5, 0)); ++ cmd = NFC_CMD_M2N | (len & GENMASK(13, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); +diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +index 5c06e0b4d4ef3..ad4d944ada0c1 100644 +--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c ++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c +@@ -1592,6 +1592,9 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, + if (IS_ERR(sdrt)) + return PTR_ERR(sdrt); + ++ if (sdrt->tRC_min < 30000) ++ return -EOPNOTSUPP; ++ + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c +index 13f8292ceea52..f29ed9102ce91 100644 +--- a/drivers/mtd/ubi/build.c ++++ b/drivers/mtd/ubi/build.c +@@ -644,12 +644,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) + ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); + ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + +- if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) > +- ubi->vid_hdr_alsize)) { +- ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset); +- return -EINVAL; +- } +- + dbg_gen("min_io_size %d", ubi->min_io_size); + dbg_gen("max_write_size %d", ubi->max_write_size); + dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); +@@ -667,6 +661,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) + ubi->vid_hdr_aloffset; + } + ++ /* ++ * Memory allocation for VID header is ubi->vid_hdr_alsize ++ * which is described in comments in io.c. ++ * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds ++ * ubi->vid_hdr_alsize, so that all vid header operations ++ * won't access memory out of bounds. ++ */ ++ if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) { ++ ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)" ++ " + VID header size(%zu) > VID header aligned size(%d).", ++ ubi->vid_hdr_offset, ubi->vid_hdr_shift, ++ UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize); ++ return -EINVAL; ++ } ++ + /* Similar for the data offset */ + ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; + ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index 4f88433d4adc7..fd0e8f948c3da 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -576,6 +576,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, + * @vol_id: the volume ID that last used this PEB + * @lnum: the last used logical eraseblock number for the PEB + * @torture: if the physical eraseblock has to be tortured ++ * @nested: denotes whether the work_sem is already held + * + * This function returns zero in case of success and a %-ENOMEM in case of + * failure. +@@ -1060,8 +1061,6 @@ out_unlock: + * __erase_worker - physical eraseblock erase worker function. + * @ubi: UBI device description object + * @wl_wrk: the work object +- * @shutdown: non-zero if the worker has to free memory and exit +- * because the WL sub-system is shutting down + * + * This function erases a physical eraseblock and perform torture testing if + * needed. It also takes care about marking the physical eraseblock bad if +@@ -1111,7 +1110,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) + int err1; + + /* Re-schedule the LEB for erasure */ +- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); ++ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true); + if (err1) { + spin_lock(&ubi->wl_lock); + wl_entry_destroy(ubi, e); +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c +index d948b582f4c97..12dd18cbdba34 100644 +--- a/drivers/net/ethernet/cadence/macb_main.c ++++ b/drivers/net/ethernet/cadence/macb_main.c +@@ -719,6 +719,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) + } + #endif + addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); ++#ifdef CONFIG_MACB_USE_HWSTAMP ++ if (bp->hw_dma_cap & HW_DMA_CAP_PTP) ++ addr &= ~GEM_BIT(DMA_RXVALID); ++#endif + return addr; + } + +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +index af38d3d73291c..4814046cfc78e 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +@@ -629,7 +629,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) + int i, err, ring; + + if (dev->flags & QLCNIC_NEED_FLR) { +- pci_reset_function(dev->pdev); ++ err = pci_reset_function(dev->pdev); ++ if (err) { ++ dev_err(&dev->pdev->dev, ++ "Adapter reset failed (%d). Please reboot\n", ++ err); ++ return err; ++ } + dev->flags &= ~QLCNIC_NEED_FLR; + } + +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c +index 70b9a7bfe4ec6..201470d540d87 100644 +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -4503,7 +4503,7 @@ static int niu_alloc_channels(struct niu *np) + + err = niu_rbr_fill(np, rp, GFP_KERNEL); + if (err) +- return err; ++ goto out_err; + } + + tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c +index ca3f18aa16acb..887dc57704402 100644 +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -770,6 +770,34 @@ static const struct pinconf_ops amd_pinconf_ops = { + .pin_config_group_set = amd_pinconf_group_set, + }; + ++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev) ++{ ++ struct pinctrl_desc *desc = gpio_dev->pctrl->desc; ++ unsigned long flags; ++ u32 pin_reg, mask; ++ int i; ++ ++ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | ++ BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) | ++ BIT(WAKE_CNTRL_OFF_S4); ++ ++ for (i = 0; i < desc->npins; i++) { ++ int pin = desc->pins[i].number; ++ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); ++ ++ if (!pd) ++ continue; ++ ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags); ++ ++ pin_reg = readl(gpio_dev->base + i * 4); ++ pin_reg &= ~mask; ++ writel(pin_reg, gpio_dev->base + i * 4); ++ ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); ++ } ++} ++ + #ifdef CONFIG_PM_SLEEP + static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) + { +@@ -852,6 +880,7 @@ static int amd_gpio_probe(struct platform_device *pdev) + int irq_base; + struct resource *res; + struct amd_gpio *gpio_dev; ++ struct gpio_irq_chip *girq; + + gpio_dev = devm_kzalloc(&pdev->dev, + sizeof(struct amd_gpio), GFP_KERNEL); +@@ -913,6 +942,18 @@ static int amd_gpio_probe(struct platform_device *pdev) + return PTR_ERR(gpio_dev->pctrl); + } + ++ /* Disable and mask interrupts */ ++ amd_gpio_irq_init(gpio_dev); ++ ++ girq = &gpio_dev->gc.irq; ++ girq->chip = &amd_gpio_irqchip; ++ /* This will let us handle the parent IRQ in the driver */ ++ girq->parent_handler = NULL; ++ girq->num_parents = 0; ++ girq->parents = NULL; ++ girq->default_type = IRQ_TYPE_NONE; ++ girq->handler = handle_simple_irq; ++ + ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev); + if (ret) + return ret; +@@ -924,17 +965,6 @@ static int amd_gpio_probe(struct platform_device *pdev) + goto out2; + } + +- ret = gpiochip_irqchip_add(&gpio_dev->gc, +- &amd_gpio_irqchip, +- 0, +- handle_simple_irq, +- IRQ_TYPE_NONE); +- if (ret) { +- dev_err(&pdev->dev, "could not add irqchip\n"); +- ret = -ENODEV; +- goto out2; +- } +- + ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, gpio_dev); + if (ret) +diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c +index 6cc7c3910e098..0f80fdf88253c 100644 +--- a/drivers/power/supply/cros_usbpd-charger.c ++++ b/drivers/power/supply/cros_usbpd-charger.c +@@ -282,7 +282,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port) + port->psy_current_max = 0; + break; + default: +- dev_err(dev, "Port %d: default case!\n", port->port_number); ++ dev_dbg(dev, "Port %d: default case!\n", port->port_number); + port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP; + } + +diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c +index 89497448d2177..ad4321f2b6f87 100644 +--- a/drivers/pwm/pwm-cros-ec.c ++++ b/drivers/pwm/pwm-cros-ec.c +@@ -125,6 +125,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + + state->enabled = (ret > 0); + state->period = EC_PWM_MAX_DUTY; ++ state->polarity = PWM_POLARITY_NORMAL; + + /* Note that "disabled" and "duty cycle == 0" are treated the same */ + state->duty_cycle = ret; +diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c +index 892d853d48a1a..b30d664bf7d57 100644 +--- a/drivers/pwm/pwm-sprd.c ++++ b/drivers/pwm/pwm-sprd.c +@@ -109,6 +109,7 @@ static void sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + duty = val & SPRD_PWM_DUTY_MSK; + tmp = (prescale + 1) * NSEC_PER_SEC * duty; + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate); ++ state->polarity = PWM_POLARITY_NORMAL; + + /* Disable PWM clocks if the PWM channel is not in enable state. */ + if (!state->enabled) +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c +index 1707d6d144d21..6a1428d453f3e 100644 +--- a/drivers/scsi/ses.c ++++ b/drivers/scsi/ses.c +@@ -503,9 +503,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, + int i; + struct ses_component *scomp; + +- if (!edev->component[0].scratch) +- return 0; +- + for (i = 0; i < edev->components; i++) { + scomp = edev->component[i].scratch; + if (scomp->addr != efd->addr) +@@ -596,8 +593,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, + components++, + type_ptr[0], + name); +- else ++ else if (components < edev->components) + ecomp = &edev->component[components++]; ++ else ++ ecomp = ERR_PTR(-EINVAL); + + if (!IS_ERR(ecomp)) { + if (addl_desc_ptr) { +@@ -728,11 +727,6 @@ static int ses_intf_add(struct device *cdev, + components += type_ptr[1]; + } + +- if (components == 0) { +- sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n"); +- goto err_free; +- } +- + ses_dev->page1 = buf; + ses_dev->page1_len = len; + buf = NULL; +@@ -774,9 +768,11 @@ static int ses_intf_add(struct device *cdev, + buf = NULL; + } + page2_not_supported: +- scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); +- if (!scomp) +- goto err_free; ++ if (components > 0) { ++ scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); ++ if (!scomp) ++ goto err_free; ++ } + + edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev), + components, &ses_enclosure_callbacks); +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index cac136e9d5e0c..b722ab9415285 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -807,11 +807,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port) + struct lpuart_port, port); + unsigned long stat = lpuart32_read(port, UARTSTAT); + unsigned long sfifo = lpuart32_read(port, UARTFIFO); ++ unsigned long ctrl = lpuart32_read(port, UARTCTRL); + + if (sport->dma_tx_in_progress) + return 0; + +- if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) ++ /* ++ * LPUART Transmission Complete Flag may never be set while queuing a break ++ * character, so avoid checking for transmission complete when UARTCTRL_SBK ++ * is asserted. ++ */ ++ if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK) + return TIOCSER_TEMT; + + return 0; +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index c066bb7f07b01..95db67c07c340 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -2925,6 +2925,13 @@ static int sci_init_single(struct platform_device *dev, + sci_port->irqs[i] = platform_get_irq(dev, i); + } + ++ /* ++ * The fourth interrupt on SCI port is transmit end interrupt, so ++ * shuffle the interrupts. ++ */ ++ if (p->type == PORT_SCI) ++ swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]); ++ + /* The SCI generates several interrupts. They can be muxed together or + * connected to different interrupt lines. In the muxed case only one + * interrupt resource is specified as there is only one interrupt ID. +@@ -2990,7 +2997,7 @@ static int sci_init_single(struct platform_device *dev, + port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags; + port->fifosize = sci_port->params->fifosize; + +- if (port->type == PORT_SCI) { ++ if (port->type == PORT_SCI && !dev->dev.of_node) { + if (sci_port->reg_size >= 0x20) + port->regshift = 2; + else +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index b8915790a20af..eb78091d1b35e 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -9,6 +9,7 @@ + */ + + #include <linux/pci.h> ++#include <linux/iommu.h> + #include <linux/iopoll.h> + #include <linux/irq.h> + #include <linux/log2.h> +@@ -226,6 +227,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) + static void xhci_zero_64b_regs(struct xhci_hcd *xhci) + { + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; ++ struct iommu_domain *domain; + int err, i; + u64 val; + u32 intrs; +@@ -244,7 +246,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) + * an iommu. Doing anything when there is no iommu is definitely + * unsafe... + */ +- if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev)) ++ domain = iommu_get_domain_for_dev(dev); ++ if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || ++ domain->type == IOMMU_DOMAIN_IDENTITY) + return; + + xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 4cd46fc9fcad4..05d93eeffccc6 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -121,6 +121,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ ++ { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */ + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index c1839091edf5c..e51ed6b45a474 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */ ++ .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, +@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */ + .driver_info = RSVD(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */ ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */ ++ .driver_info = NCTRL(2) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), +diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c +index c840f03996fb6..ca22a05179d1e 100644 +--- a/drivers/usb/typec/altmodes/displayport.c ++++ b/drivers/usb/typec/altmodes/displayport.c +@@ -100,8 +100,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con) + if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC && + pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK) + pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK; +- else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) ++ else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) { + pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK; ++ /* Default to pin assign C if available */ ++ if (pin_assign & BIT(DP_PIN_ASSIGN_C)) ++ pin_assign = BIT(DP_PIN_ASSIGN_C); ++ } + + if (!pin_assign) + return -EINVAL; +diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c +index f0f1e3b2e4639..4cbe6ba527541 100644 +--- a/drivers/watchdog/sbsa_gwdt.c ++++ b/drivers/watchdog/sbsa_gwdt.c +@@ -121,6 +121,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, + struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); + + wdd->timeout = timeout; ++ timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000); + + if (action) + writel(gwdt->clk * timeout, +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index b94d68035c5d9..4536d8aea1967 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2239,6 +2239,23 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) + + fs_info->csum_shash = csum_shash; + ++ /* ++ * Check if the checksum implementation is a fast accelerated one. ++ * As-is this is a bit of a hack and should be replaced once the csum ++ * implementations provide that information themselves. ++ */ ++ switch (csum_type) { ++ case BTRFS_CSUM_TYPE_CRC32: ++ if (!strstr(crypto_shash_driver_name(csum_shash), "generic")) ++ set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); ++ break; ++ default: ++ break; ++ } ++ ++ btrfs_info(fs_info, "using %s (%s) checksum algorithm", ++ btrfs_super_csum_name(csum_type), ++ crypto_shash_driver_name(csum_shash)); + return 0; + } + +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 1a69bdb96fb2a..8d21019bbbab5 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -1567,8 +1567,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, + } else { + snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); + btrfs_sb(s)->bdev_holder = fs_type; +- if (!strstr(crc32c_impl(), "generic")) +- set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); + error = btrfs_fill_super(s, fs_devices, data); + } + if (!error) +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h +index a5fab9afd699f..2dde83a969680 100644 +--- a/fs/cifs/cifsproto.h ++++ b/fs/cifs/cifsproto.h +@@ -602,7 +602,7 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, + + static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options) + { +- if (backup_cred(cifs_sb)) ++ if (cifs_sb && (backup_cred(cifs_sb))) + return options | CREATE_OPEN_BACKUP_INTENT; + else + return options; +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 68e783272c629..a82d757a23419 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -2343,7 +2343,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon, + FS_FULL_SIZE_INFORMATION, + SMB2_O_INFO_FILESYSTEM, + sizeof(struct smb2_fs_full_size_info), +- &rsp_iov, &buftype, NULL); ++ &rsp_iov, &buftype, cifs_sb); + if (rc) + goto qfs_exit; + +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c +index ffc2b838b123c..771733396eab2 100644 +--- a/fs/nfsd/nfs4callback.c ++++ b/fs/nfsd/nfs4callback.c +@@ -840,8 +840,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r + if (!kcred) + return NULL; + +- kcred->uid = ses->se_cb_sec.uid; +- kcred->gid = ses->se_cb_sec.gid; ++ kcred->fsuid = ses->se_cb_sec.uid; ++ kcred->fsgid = ses->se_cb_sec.gid; + return kcred; + } + } +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 11914b3585b34..7765a7f9963ce 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -2609,11 +2609,10 @@ static int nilfs_segctor_thread(void *arg) + goto loop; + + end_thread: +- spin_unlock(&sci->sc_state_lock); +- + /* end sync. */ + sci->sc_task = NULL; + wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */ ++ spin_unlock(&sci->sc_state_lock); + return 0; + } + +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c +index b1015e97f37b7..94cf515ec602a 100644 +--- a/fs/nilfs2/super.c ++++ b/fs/nilfs2/super.c +@@ -477,6 +477,7 @@ static void nilfs_put_super(struct super_block *sb) + up_write(&nilfs->ns_sem); + } + ++ nilfs_sysfs_delete_device_group(nilfs); + iput(nilfs->ns_sufile); + iput(nilfs->ns_cpfile); + iput(nilfs->ns_dat); +@@ -1103,6 +1104,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) + nilfs_put_root(fsroot); + + failed_unload: ++ nilfs_sysfs_delete_device_group(nilfs); + iput(nilfs->ns_sufile); + iput(nilfs->ns_cpfile); + iput(nilfs->ns_dat); +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c +index 6541e29a8b200..24f626e7d012a 100644 +--- a/fs/nilfs2/the_nilfs.c ++++ b/fs/nilfs2/the_nilfs.c +@@ -87,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs) + { + might_sleep(); + if (nilfs_init(nilfs)) { +- nilfs_sysfs_delete_device_group(nilfs); + brelse(nilfs->ns_sbh[0]); + brelse(nilfs->ns_sbh[1]); + } +@@ -275,6 +274,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) + goto failed; + } + ++ err = nilfs_sysfs_create_device_group(sb); ++ if (unlikely(err)) ++ goto sysfs_error; ++ + if (valid_fs) + goto skip_recovery; + +@@ -336,6 +339,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) + goto failed; + + failed_unload: ++ nilfs_sysfs_delete_device_group(nilfs); ++ ++ sysfs_error: + iput(nilfs->ns_cpfile); + iput(nilfs->ns_sufile); + iput(nilfs->ns_dat); +@@ -668,10 +674,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) + if (err) + goto failed_sbh; + +- err = nilfs_sysfs_create_device_group(sb); +- if (err) +- goto failed_sbh; +- + set_nilfs_init(nilfs); + err = 0; + out: +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c +index 3d5e09f7e3a7c..f5b16120c64db 100644 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c +@@ -456,7 +456,7 @@ xfs_attr_shortform_bytesfit( + int offset; + + /* rounded down */ +- offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; ++ offset = (XFS_LITINO(mp) - bytes) >> 3; + + if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) { + minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; +@@ -523,8 +523,7 @@ xfs_attr_shortform_bytesfit( + minforkoff = roundup(minforkoff, 8) >> 3; + + /* attr fork btree root can have at least this many key/ptr pairs */ +- maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) - +- XFS_BMDR_SPACE_CALC(MINABTPTRS); ++ maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); + maxforkoff = maxforkoff >> 3; /* rounded down */ + + if (offset >= maxforkoff) +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index d900e3e6c9337..1e0fab62cd7dc 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -192,14 +192,12 @@ xfs_default_attroffset( + struct xfs_mount *mp = ip->i_mount; + uint offset; + +- if (mp->m_sb.sb_inodesize == 256) { +- offset = XFS_LITINO(mp, ip->i_d.di_version) - +- XFS_BMDR_SPACE_CALC(MINABTPTRS); +- } else { ++ if (mp->m_sb.sb_inodesize == 256) ++ offset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); ++ else + offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); +- } + +- ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); ++ ASSERT(offset < XFS_LITINO(mp)); + return offset; + } + +diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c +index 8c43cac15832b..121251651fead 100644 +--- a/fs/xfs/libxfs/xfs_btree.c ++++ b/fs/xfs/libxfs/xfs_btree.c +@@ -354,20 +354,17 @@ xfs_btree_free_block( + */ + void + xfs_btree_del_cursor( +- xfs_btree_cur_t *cur, /* btree cursor */ +- int error) /* del because of error */ ++ struct xfs_btree_cur *cur, /* btree cursor */ ++ int error) /* del because of error */ + { +- int i; /* btree level */ ++ int i; /* btree level */ + + /* +- * Clear the buffer pointers, and release the buffers. +- * If we're doing this in the face of an error, we +- * need to make sure to inspect all of the entries +- * in the bc_bufs array for buffers to be unlocked. +- * This is because some of the btree code works from +- * level n down to 0, and if we get an error along +- * the way we won't have initialized all the entries +- * down to 0. ++ * Clear the buffer pointers and release the buffers. If we're doing ++ * this because of an error, inspect all of the entries in the bc_bufs ++ * array for buffers to be unlocked. This is because some of the btree ++ * code works from level n down to 0, and if we get an error along the ++ * way we won't have initialized all the entries down to 0. + */ + for (i = 0; i < cur->bc_nlevels; i++) { + if (cur->bc_bufs[i]) +@@ -375,15 +372,10 @@ xfs_btree_del_cursor( + else if (!error) + break; + } +- /* +- * Can't free a bmap cursor without having dealt with the +- * allocated indirect blocks' accounting. +- */ ++ + ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || +- cur->bc_private.b.allocated == 0); +- /* +- * Free the cursor. +- */ ++ cur->bc_private.b.allocated == 0 || ++ XFS_FORCED_SHUTDOWN(cur->bc_mp)); + kmem_zone_free(xfs_btree_cur_zone, cur); + } + +diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h +index 1f24473121f0c..31fa9ab2ab612 100644 +--- a/fs/xfs/libxfs/xfs_format.h ++++ b/fs/xfs/libxfs/xfs_format.h +@@ -497,6 +497,23 @@ static inline bool xfs_sb_version_hascrc(struct xfs_sb *sbp) + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5; + } + ++/* ++ * v5 file systems support V3 inodes only, earlier file systems support ++ * v2 and v1 inodes. ++ */ ++static inline bool xfs_sb_version_has_v3inode(struct xfs_sb *sbp) ++{ ++ return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5; ++} ++ ++static inline bool xfs_dinode_good_version(struct xfs_sb *sbp, ++ uint8_t version) ++{ ++ if (xfs_sb_version_has_v3inode(sbp)) ++ return version == 3; ++ return version == 1 || version == 2; ++} ++ + static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp) + { + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5; +@@ -946,8 +963,12 @@ typedef enum xfs_dinode_fmt { + /* + * Inode size for given fs. + */ +-#define XFS_LITINO(mp, version) \ +- ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version))) ++#define XFS_DINODE_SIZE(sbp) \ ++ (xfs_sb_version_has_v3inode(sbp) ? \ ++ sizeof(struct xfs_dinode) : \ ++ offsetof(struct xfs_dinode, di_crc)) ++#define XFS_LITINO(mp) \ ++ ((mp)->m_sb.sb_inodesize - XFS_DINODE_SIZE(&(mp)->m_sb)) + + /* + * Inode data & attribute fork sizes, per inode. +@@ -956,13 +977,9 @@ typedef enum xfs_dinode_fmt { + #define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3)) + + #define XFS_DFORK_DSIZE(dip,mp) \ +- (XFS_DFORK_Q(dip) ? \ +- XFS_DFORK_BOFF(dip) : \ +- XFS_LITINO(mp, (dip)->di_version)) ++ (XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp)) + #define XFS_DFORK_ASIZE(dip,mp) \ +- (XFS_DFORK_Q(dip) ? \ +- XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \ +- 0) ++ (XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0) + #define XFS_DFORK_SIZE(dip,mp,w) \ + ((w) == XFS_DATA_FORK ? \ + XFS_DFORK_DSIZE(dip, mp) : \ +diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c +index c3e0c2f61be4d..391e441d43a03 100644 +--- a/fs/xfs/libxfs/xfs_ialloc.c ++++ b/fs/xfs/libxfs/xfs_ialloc.c +@@ -303,7 +303,7 @@ xfs_ialloc_inode_init( + * That means for v3 inode we log the entire buffer rather than just the + * inode cores. + */ +- if (xfs_sb_version_hascrc(&mp->m_sb)) { ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) { + version = 3; + ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno)); + +@@ -339,7 +339,7 @@ xfs_ialloc_inode_init( + xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); + for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { + int ioffset = i << mp->m_sb.sb_inodelog; +- uint isize = xfs_dinode_size(version); ++ uint isize = XFS_DINODE_SIZE(&mp->m_sb); + + free = xfs_make_iptr(mp, fbuf, i); + free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); +@@ -2818,7 +2818,7 @@ xfs_ialloc_setup_geometry( + * cannot change the behavior. + */ + igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE; +- if (xfs_sb_version_hascrc(&mp->m_sb)) { ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) { + int new_size = igeo->inode_cluster_size_raw; + + new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE; +diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c +index 28ab3c5255e18..962e95dcdbffa 100644 +--- a/fs/xfs/libxfs/xfs_inode_buf.c ++++ b/fs/xfs/libxfs/xfs_inode_buf.c +@@ -44,17 +44,6 @@ xfs_inobp_check( + } + #endif + +-bool +-xfs_dinode_good_version( +- struct xfs_mount *mp, +- __u8 version) +-{ +- if (xfs_sb_version_hascrc(&mp->m_sb)) +- return version == 3; +- +- return version == 1 || version == 2; +-} +- + /* + * If we are doing readahead on an inode buffer, we might be in log recovery + * reading an inode allocation buffer that hasn't yet been replayed, and hence +@@ -93,7 +82,7 @@ xfs_inode_buf_verify( + dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); + unlinked_ino = be32_to_cpu(dip->di_next_unlinked); + di_ok = xfs_verify_magic16(bp, dip->di_magic) && +- xfs_dinode_good_version(mp, dip->di_version) && ++ xfs_dinode_good_version(&mp->m_sb, dip->di_version) && + xfs_verify_agino_or_null(mp, agno, unlinked_ino); + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, + XFS_ERRTAG_ITOBP_INOTOBP))) { +@@ -205,26 +194,23 @@ xfs_inode_from_disk( + struct xfs_icdinode *to = &ip->i_d; + struct inode *inode = VFS_I(ip); + +- + /* + * Convert v1 inodes immediately to v2 inode format as this is the + * minimum inode version format we support in the rest of the code. ++ * They will also be unconditionally written back to disk as v2 inodes. + */ +- to->di_version = from->di_version; +- if (to->di_version == 1) { ++ if (unlikely(from->di_version == 1)) { + set_nlink(inode, be16_to_cpu(from->di_onlink)); +- to->di_projid_lo = 0; +- to->di_projid_hi = 0; +- to->di_version = 2; ++ to->di_projid = 0; + } else { + set_nlink(inode, be32_to_cpu(from->di_nlink)); +- to->di_projid_lo = be16_to_cpu(from->di_projid_lo); +- to->di_projid_hi = be16_to_cpu(from->di_projid_hi); ++ to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 | ++ be16_to_cpu(from->di_projid_lo); + } + + to->di_format = from->di_format; +- to->di_uid = be32_to_cpu(from->di_uid); +- to->di_gid = be32_to_cpu(from->di_gid); ++ i_uid_write(inode, be32_to_cpu(from->di_uid)); ++ i_gid_write(inode, be32_to_cpu(from->di_gid)); + to->di_flushiter = be16_to_cpu(from->di_flushiter); + + /* +@@ -253,7 +239,7 @@ xfs_inode_from_disk( + to->di_dmstate = be16_to_cpu(from->di_dmstate); + to->di_flags = be16_to_cpu(from->di_flags); + +- if (to->di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { + inode_set_iversion_queried(inode, + be64_to_cpu(from->di_changecount)); + to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec); +@@ -275,12 +261,11 @@ xfs_inode_to_disk( + to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); + to->di_onlink = 0; + +- to->di_version = from->di_version; + to->di_format = from->di_format; +- to->di_uid = cpu_to_be32(from->di_uid); +- to->di_gid = cpu_to_be32(from->di_gid); +- to->di_projid_lo = cpu_to_be16(from->di_projid_lo); +- to->di_projid_hi = cpu_to_be16(from->di_projid_hi); ++ to->di_uid = cpu_to_be32(i_uid_read(inode)); ++ to->di_gid = cpu_to_be32(i_gid_read(inode)); ++ to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff); ++ to->di_projid_hi = cpu_to_be16(from->di_projid >> 16); + + memset(to->di_pad, 0, sizeof(to->di_pad)); + to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec); +@@ -304,7 +289,8 @@ xfs_inode_to_disk( + to->di_dmstate = cpu_to_be16(from->di_dmstate); + to->di_flags = cpu_to_be16(from->di_flags); + +- if (from->di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { ++ to->di_version = 3; + to->di_changecount = cpu_to_be64(inode_peek_iversion(inode)); + to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); + to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); +@@ -316,6 +302,7 @@ xfs_inode_to_disk( + uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); + to->di_flushiter = 0; + } else { ++ to->di_version = 2; + to->di_flushiter = cpu_to_be16(from->di_flushiter); + } + } +@@ -429,7 +416,7 @@ xfs_dinode_verify_forkoff( + case XFS_DINODE_FMT_LOCAL: /* fall through ... */ + case XFS_DINODE_FMT_EXTENTS: /* fall through ... */ + case XFS_DINODE_FMT_BTREE: +- if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3)) ++ if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3)) + return __this_address; + break; + default: +@@ -455,7 +442,7 @@ xfs_dinode_verify( + + /* Verify v3 integrity information first */ + if (dip->di_version >= 3) { +- if (!xfs_sb_version_hascrc(&mp->m_sb)) ++ if (!xfs_sb_version_has_v3inode(&mp->m_sb)) + return __this_address; + if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, + XFS_DINODE_CRC_OFF)) +@@ -630,12 +617,11 @@ xfs_iread( + + /* shortcut IO on inode allocation if possible */ + if ((iget_flags & XFS_IGET_CREATE) && +- xfs_sb_version_hascrc(&mp->m_sb) && ++ xfs_sb_version_has_v3inode(&mp->m_sb) && + !(mp->m_flags & XFS_MOUNT_IKEEP)) { + /* initialise the on-disk inode core */ + memset(&ip->i_d, 0, sizeof(ip->i_d)); + VFS_I(ip)->i_generation = prandom_u32(); +- ip->i_d.di_version = 3; + return 0; + } + +@@ -677,7 +663,6 @@ xfs_iread( + * Partial initialisation of the in-core inode. Just the bits + * that xfs_ialloc won't overwrite or relies on being correct. + */ +- ip->i_d.di_version = dip->di_version; + VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen); + ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); + +@@ -691,7 +676,6 @@ xfs_iread( + VFS_I(ip)->i_mode = 0; + } + +- ASSERT(ip->i_d.di_version >= 2); + ip->i_delayed_blks = 0; + + /* +diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h +index ab0f841653174..80b574579a21b 100644 +--- a/fs/xfs/libxfs/xfs_inode_buf.h ++++ b/fs/xfs/libxfs/xfs_inode_buf.h +@@ -16,13 +16,9 @@ struct xfs_dinode; + * format specific structures at the appropriate time. + */ + struct xfs_icdinode { +- int8_t di_version; /* inode version */ + int8_t di_format; /* format of di_c data */ + uint16_t di_flushiter; /* incremented on flush */ +- uint32_t di_uid; /* owner's user id */ +- uint32_t di_gid; /* owner's group id */ +- uint16_t di_projid_lo; /* lower part of owner's project id */ +- uint16_t di_projid_hi; /* higher part of owner's project id */ ++ uint32_t di_projid; /* owner's project id */ + xfs_fsize_t di_size; /* number of bytes in file */ + xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */ + xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ +@@ -62,8 +58,6 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); + void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, + struct xfs_dinode *to); + +-bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version); +- + #if defined(DEBUG) + void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); + #else +diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c +index 93357072b19da..e758d74b2b629 100644 +--- a/fs/xfs/libxfs/xfs_inode_fork.c ++++ b/fs/xfs/libxfs/xfs_inode_fork.c +@@ -183,7 +183,7 @@ xfs_iformat_local( + */ + if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { + xfs_warn(ip->i_mount, +- "corrupt inode %Lu (bad size %d for local fork, size = %d).", ++ "corrupt inode %Lu (bad size %d for local fork, size = %zd).", + (unsigned long long) ip->i_ino, size, + XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); + xfs_inode_verifier_error(ip, -EFSCORRUPTED, +diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h +index 7b845c052fb45..a84a1557d11ce 100644 +--- a/fs/xfs/libxfs/xfs_inode_fork.h ++++ b/fs/xfs/libxfs/xfs_inode_fork.h +@@ -46,14 +46,9 @@ struct xfs_ifork { + (ip)->i_afp : \ + (ip)->i_cowfp)) + #define XFS_IFORK_DSIZE(ip) \ +- (XFS_IFORK_Q(ip) ? \ +- XFS_IFORK_BOFF(ip) : \ +- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version)) ++ (XFS_IFORK_Q(ip) ? XFS_IFORK_BOFF(ip) : XFS_LITINO((ip)->i_mount)) + #define XFS_IFORK_ASIZE(ip) \ +- (XFS_IFORK_Q(ip) ? \ +- XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \ +- XFS_IFORK_BOFF(ip) : \ +- 0) ++ (XFS_IFORK_Q(ip) ? XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : 0) + #define XFS_IFORK_SIZE(ip,w) \ + ((w) == XFS_DATA_FORK ? \ + XFS_IFORK_DSIZE(ip) : \ +diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h +index e5f97c69b3205..d3b255f427893 100644 +--- a/fs/xfs/libxfs/xfs_log_format.h ++++ b/fs/xfs/libxfs/xfs_log_format.h +@@ -424,12 +424,10 @@ struct xfs_log_dinode { + /* structure must be padded to 64 bit alignment */ + }; + +-static inline uint xfs_log_dinode_size(int version) +-{ +- if (version == 3) +- return sizeof(struct xfs_log_dinode); +- return offsetof(struct xfs_log_dinode, di_next_unlinked); +-} ++#define xfs_log_dinode_size(mp) \ ++ (xfs_sb_version_has_v3inode(&(mp)->m_sb) ? \ ++ sizeof(struct xfs_log_dinode) : \ ++ offsetof(struct xfs_log_dinode, di_next_unlinked)) + + /* + * Buffer Log Format defintions +diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c +index 824073a839acb..8ece346def977 100644 +--- a/fs/xfs/libxfs/xfs_trans_resv.c ++++ b/fs/xfs/libxfs/xfs_trans_resv.c +@@ -187,7 +187,7 @@ xfs_calc_inode_chunk_res( + XFS_FSB_TO_B(mp, 1)); + if (alloc) { + /* icreate tx uses ordered buffers */ +- if (xfs_sb_version_hascrc(&mp->m_sb)) ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) + return res; + size = XFS_FSB_TO_B(mp, 1); + } +diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c +index 3f2292c7835ca..6788b0ca85eb7 100644 +--- a/fs/xfs/xfs_acl.c ++++ b/fs/xfs/xfs_acl.c +@@ -66,10 +66,12 @@ xfs_acl_from_disk( + + switch (acl_e->e_tag) { + case ACL_USER: +- acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id)); ++ acl_e->e_uid = make_kuid(&init_user_ns, ++ be32_to_cpu(ace->ae_id)); + break; + case ACL_GROUP: +- acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id)); ++ acl_e->e_gid = make_kgid(&init_user_ns, ++ be32_to_cpu(ace->ae_id)); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: +@@ -102,10 +104,12 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) + ace->ae_tag = cpu_to_be32(acl_e->e_tag); + switch (acl_e->e_tag) { + case ACL_USER: +- ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid)); ++ ace->ae_id = cpu_to_be32( ++ from_kuid(&init_user_ns, acl_e->e_uid)); + break; + case ACL_GROUP: +- ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid)); ++ ace->ae_id = cpu_to_be32( ++ from_kgid(&init_user_ns, acl_e->e_gid)); + break; + default: + ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID); +diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c +index 5eab15dde4e61..2462dabb5ab84 100644 +--- a/fs/xfs/xfs_bmap_util.c ++++ b/fs/xfs/xfs_bmap_util.c +@@ -1624,12 +1624,12 @@ xfs_swap_extent_forks( + * event of a crash. Set the owner change log flags now and leave the + * bmbt scan as the last step. + */ +- if (ip->i_d.di_version == 3 && +- ip->i_d.di_format == XFS_DINODE_FMT_BTREE) +- (*target_log_flags) |= XFS_ILOG_DOWNER; +- if (tip->i_d.di_version == 3 && +- tip->i_d.di_format == XFS_DINODE_FMT_BTREE) +- (*src_log_flags) |= XFS_ILOG_DOWNER; ++ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { ++ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) ++ (*target_log_flags) |= XFS_ILOG_DOWNER; ++ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) ++ (*src_log_flags) |= XFS_ILOG_DOWNER; ++ } + + /* + * Swap the data forks of the inodes +@@ -1664,7 +1664,7 @@ xfs_swap_extent_forks( + (*src_log_flags) |= XFS_ILOG_DEXT; + break; + case XFS_DINODE_FMT_BTREE: +- ASSERT(ip->i_d.di_version < 3 || ++ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) || + (*src_log_flags & XFS_ILOG_DOWNER)); + (*src_log_flags) |= XFS_ILOG_DBROOT; + break; +@@ -1676,7 +1676,7 @@ xfs_swap_extent_forks( + break; + case XFS_DINODE_FMT_BTREE: + (*target_log_flags) |= XFS_ILOG_DBROOT; +- ASSERT(tip->i_d.di_version < 3 || ++ ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) || + (*target_log_flags & XFS_ILOG_DOWNER)); + break; + } +diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c +index b1452117e4429..f98260ed6d51e 100644 +--- a/fs/xfs/xfs_buf_item.c ++++ b/fs/xfs/xfs_buf_item.c +@@ -328,7 +328,7 @@ xfs_buf_item_format( + * occurs during recovery. + */ + if (bip->bli_flags & XFS_BLI_INODE_BUF) { +- if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) || ++ if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) || + !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && + xfs_log_item_in_current_chkpt(lip))) + bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF; +diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c +index 6231b155e7f34..672286f1762f0 100644 +--- a/fs/xfs/xfs_dquot.c ++++ b/fs/xfs/xfs_dquot.c +@@ -859,11 +859,11 @@ xfs_qm_id_for_quotatype( + { + switch (type) { + case XFS_DQ_USER: +- return ip->i_d.di_uid; ++ return i_uid_read(VFS_I(ip)); + case XFS_DQ_GROUP: +- return ip->i_d.di_gid; ++ return i_gid_read(VFS_I(ip)); + case XFS_DQ_PROJ: +- return xfs_get_projid(ip); ++ return ip->i_d.di_projid; + } + ASSERT(0); + return 0; +diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c +index e9acd58248f90..182b70464b71a 100644 +--- a/fs/xfs/xfs_error.c ++++ b/fs/xfs/xfs_error.c +@@ -335,7 +335,7 @@ xfs_corruption_error( + int linenum, + xfs_failaddr_t failaddr) + { +- if (level <= xfs_error_level) ++ if (buf && level <= xfs_error_level) + xfs_hex_dump(buf, bufsize); + xfs_error_report(tag, level, mp, filename, linenum, failaddr); + xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair"); +diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c +index 2183d87be4cfb..ef17c1f6db322 100644 +--- a/fs/xfs/xfs_extent_busy.c ++++ b/fs/xfs/xfs_extent_busy.c +@@ -344,7 +344,6 @@ xfs_extent_busy_trim( + ASSERT(*len > 0); + + spin_lock(&args->pag->pagb_lock); +-restart: + fbno = *bno; + flen = *len; + rbp = args->pag->pagb_tree.rb_node; +@@ -363,19 +362,6 @@ restart: + continue; + } + +- /* +- * If this is a metadata allocation, try to reuse the busy +- * extent instead of trimming the allocation. +- */ +- if (!xfs_alloc_is_userdata(args->datatype) && +- !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) { +- if (!xfs_extent_busy_update_extent(args->mp, args->pag, +- busyp, fbno, flen, +- false)) +- goto restart; +- continue; +- } +- + if (bbno <= fbno) { + /* start overlap */ + +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c +index a1135b86e79f9..f1451642ce383 100644 +--- a/fs/xfs/xfs_icache.c ++++ b/fs/xfs/xfs_icache.c +@@ -289,6 +289,8 @@ xfs_reinit_inode( + uint64_t version = inode_peek_iversion(inode); + umode_t mode = inode->i_mode; + dev_t dev = inode->i_rdev; ++ kuid_t uid = inode->i_uid; ++ kgid_t gid = inode->i_gid; + + error = inode_init_always(mp->m_super, inode); + +@@ -297,6 +299,8 @@ xfs_reinit_inode( + inode_set_iversion_queried(inode, version); + inode->i_mode = mode; + inode->i_rdev = dev; ++ inode->i_uid = uid; ++ inode->i_gid = gid; + return error; + } + +@@ -1430,7 +1434,7 @@ xfs_inode_match_id( + return 0; + + if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && +- xfs_get_projid(ip) != eofb->eof_prid) ++ ip->i_d.di_projid != eofb->eof_prid) + return 0; + + return 1; +@@ -1454,7 +1458,7 @@ xfs_inode_match_id_union( + return 1; + + if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && +- xfs_get_projid(ip) == eofb->eof_prid) ++ ip->i_d.di_projid == eofb->eof_prid) + return 1; + + return 0; +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c +index 02f77a359972e..568a9332efd29 100644 +--- a/fs/xfs/xfs_inode.c ++++ b/fs/xfs/xfs_inode.c +@@ -750,6 +750,7 @@ xfs_ialloc( + xfs_buf_t **ialloc_context, + xfs_inode_t **ipp) + { ++ struct inode *dir = pip ? VFS_I(pip) : NULL; + struct xfs_mount *mp = tp->t_mountp; + xfs_ino_t ino; + xfs_inode_t *ip; +@@ -795,26 +796,17 @@ xfs_ialloc( + return error; + ASSERT(ip != NULL); + inode = VFS_I(ip); +- +- /* +- * We always convert v1 inodes to v2 now - we only support filesystems +- * with >= v2 inode capability, so there is no reason for ever leaving +- * an inode in v1 format. +- */ +- if (ip->i_d.di_version == 1) +- ip->i_d.di_version = 2; +- +- inode->i_mode = mode; + set_nlink(inode, nlink); +- ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid()); +- ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid()); + inode->i_rdev = rdev; +- xfs_set_projid(ip, prid); ++ ip->i_d.di_projid = prid; + +- if (pip && XFS_INHERIT_GID(pip)) { +- ip->i_d.di_gid = pip->i_d.di_gid; +- if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) +- inode->i_mode |= S_ISGID; ++ if (dir && !(dir->i_mode & S_ISGID) && ++ (mp->m_flags & XFS_MOUNT_GRPID)) { ++ inode->i_uid = current_fsuid(); ++ inode->i_gid = dir->i_gid; ++ inode->i_mode = mode; ++ } else { ++ inode_init_owner(inode, dir, mode); + } + + /* +@@ -822,9 +814,8 @@ xfs_ialloc( + * ID or one of the supplementary group IDs, the S_ISGID bit is cleared + * (and only if the irix_sgid_inherit compatibility variable is set). + */ +- if ((irix_sgid_inherit) && +- (inode->i_mode & S_ISGID) && +- (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) ++ if (irix_sgid_inherit && ++ (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid)) + inode->i_mode &= ~S_ISGID; + + ip->i_d.di_size = 0; +@@ -841,7 +832,7 @@ xfs_ialloc( + ip->i_d.di_dmstate = 0; + ip->i_d.di_flags = 0; + +- if (ip->i_d.di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) { + inode_set_iversion(inode, 1); + ip->i_d.di_flags2 = 0; + ip->i_d.di_cowextsize = 0; +@@ -849,7 +840,6 @@ xfs_ialloc( + ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec; + } + +- + flags = XFS_ILOG_CORE; + switch (mode & S_IFMT) { + case S_IFIFO: +@@ -902,20 +892,13 @@ xfs_ialloc( + + ip->i_d.di_flags |= di_flags; + } +- if (pip && +- (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && +- pip->i_d.di_version == 3 && +- ip->i_d.di_version == 3) { +- uint64_t di_flags2 = 0; +- ++ if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) { + if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { +- di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; ++ ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; + ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; + } + if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) +- di_flags2 |= XFS_DIFLAG2_DAX; +- +- ip->i_d.di_flags2 |= di_flags2; ++ ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX; + } + /* FALLTHROUGH */ + case S_IFLNK: +@@ -1117,7 +1100,6 @@ xfs_bumplink( + { + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); + +- ASSERT(ip->i_d.di_version > 1); + inc_nlink(VFS_I(ip)); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + } +@@ -1153,8 +1135,7 @@ xfs_create( + /* + * Make sure that we have allocated dquot(s) on disk. + */ +- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), +- xfs_kgid_to_gid(current_fsgid()), prid, ++ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, + &udqp, &gdqp, &pdqp); + if (error) +@@ -1304,8 +1285,7 @@ xfs_create_tmpfile( + /* + * Make sure that we have allocated dquot(s) on disk. + */ +- error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()), +- xfs_kgid_to_gid(current_fsgid()), prid, ++ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, + &udqp, &gdqp, &pdqp); + if (error) +@@ -1418,7 +1398,7 @@ xfs_link( + * the tree quota mechanism could be circumvented. + */ + if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && +- (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { ++ tdp->i_d.di_projid != sip->i_d.di_projid)) { + error = -EXDEV; + goto error_return; + } +@@ -3299,7 +3279,7 @@ xfs_rename( + * tree quota mechanism would be circumvented. + */ + if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && +- (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { ++ target_dp->i_d.di_projid != src_ip->i_d.di_projid)) { + error = -EXDEV; + goto out_trans_cancel; + } +@@ -3831,7 +3811,6 @@ xfs_iflush_int( + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); + ASSERT(iip != NULL && iip->ili_fields != 0); +- ASSERT(ip->i_d.di_version > 1); + + /* set *dip = inode's place in the buffer */ + dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); +@@ -3892,7 +3871,7 @@ xfs_iflush_int( + * backwards compatibility with old kernels that predate logging all + * inode changes. + */ +- if (ip->i_d.di_version < 3) ++ if (!xfs_sb_version_has_v3inode(&mp->m_sb)) + ip->i_d.di_flushiter++; + + /* Check the inline fork data before we write out. */ +diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h +index e493d491b7cc7..62b963d3b23d9 100644 +--- a/fs/xfs/xfs_inode.h ++++ b/fs/xfs/xfs_inode.h +@@ -177,30 +177,11 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags) + return ret; + } + +-/* +- * Project quota id helpers (previously projid was 16bit only +- * and using two 16bit values to hold new 32bit projid was chosen +- * to retain compatibility with "old" filesystems). +- */ +-static inline prid_t +-xfs_get_projid(struct xfs_inode *ip) +-{ +- return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo; +-} +- +-static inline void +-xfs_set_projid(struct xfs_inode *ip, +- prid_t projid) +-{ +- ip->i_d.di_projid_hi = (uint16_t) (projid >> 16); +- ip->i_d.di_projid_lo = (uint16_t) (projid & 0xffff); +-} +- + static inline prid_t + xfs_get_initial_prid(struct xfs_inode *dp) + { + if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) +- return xfs_get_projid(dp); ++ return dp->i_d.di_projid; + + return XFS_PROJID_DEFAULT; + } +diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c +index 83b8f5655636e..83bf96b6cf5d6 100644 +--- a/fs/xfs/xfs_inode_item.c ++++ b/fs/xfs/xfs_inode_item.c +@@ -125,7 +125,7 @@ xfs_inode_item_size( + + *nvecs += 2; + *nbytes += sizeof(struct xfs_inode_log_format) + +- xfs_log_dinode_size(ip->i_d.di_version); ++ xfs_log_dinode_size(ip->i_mount); + + xfs_inode_item_data_fork_size(iip, nvecs, nbytes); + if (XFS_IFORK_Q(ip)) +@@ -305,13 +305,11 @@ xfs_inode_to_log_dinode( + struct inode *inode = VFS_I(ip); + + to->di_magic = XFS_DINODE_MAGIC; +- +- to->di_version = from->di_version; + to->di_format = from->di_format; +- to->di_uid = from->di_uid; +- to->di_gid = from->di_gid; +- to->di_projid_lo = from->di_projid_lo; +- to->di_projid_hi = from->di_projid_hi; ++ to->di_uid = i_uid_read(inode); ++ to->di_gid = i_gid_read(inode); ++ to->di_projid_lo = from->di_projid & 0xffff; ++ to->di_projid_hi = from->di_projid >> 16; + + memset(to->di_pad, 0, sizeof(to->di_pad)); + memset(to->di_pad3, 0, sizeof(to->di_pad3)); +@@ -339,7 +337,8 @@ xfs_inode_to_log_dinode( + /* log a dummy value to ensure log structure is fully initialised */ + to->di_next_unlinked = NULLAGINO; + +- if (from->di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) { ++ to->di_version = 3; + to->di_changecount = inode_peek_iversion(inode); + to->di_crtime.t_sec = from->di_crtime.t_sec; + to->di_crtime.t_nsec = from->di_crtime.t_nsec; +@@ -351,6 +350,7 @@ xfs_inode_to_log_dinode( + uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); + to->di_flushiter = 0; + } else { ++ to->di_version = 2; + to->di_flushiter = from->di_flushiter; + } + } +@@ -370,7 +370,7 @@ xfs_inode_item_format_core( + + dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE); + xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn); +- xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version)); ++ xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount)); + } + + /* +@@ -395,8 +395,6 @@ xfs_inode_item_format( + struct xfs_log_iovec *vecp = NULL; + struct xfs_inode_log_format *ilf; + +- ASSERT(ip->i_d.di_version > 1); +- + ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT); + ilf->ilf_type = XFS_LI_INODE; + ilf->ilf_ino = ip->i_ino; +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c +index 7b7a009425e21..e7356e5272608 100644 +--- a/fs/xfs/xfs_ioctl.c ++++ b/fs/xfs/xfs_ioctl.c +@@ -1144,7 +1144,7 @@ xfs_fill_fsxattr( + fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; + fa->fsx_cowextsize = ip->i_d.di_cowextsize << + ip->i_mount->m_sb.sb_blocklog; +- fa->fsx_projid = xfs_get_projid(ip); ++ fa->fsx_projid = ip->i_d.di_projid; + + if (attr) { + if (ip->i_afp) { +@@ -1299,7 +1299,7 @@ xfs_ioctl_setattr_xflags( + + /* diflags2 only valid for v3 inodes. */ + di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); +- if (di_flags2 && ip->i_d.di_version < 3) ++ if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb)) + return -EINVAL; + + ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags); +@@ -1510,8 +1510,7 @@ xfs_ioctl_setattr_check_cowextsize( + if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE)) + return 0; + +- if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb) || +- ip->i_d.di_version != 3) ++ if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb)) + return -EINVAL; + + if (fa->fsx_cowextsize == 0) +@@ -1572,9 +1571,9 @@ xfs_ioctl_setattr( + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp)) { +- code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, +- ip->i_d.di_gid, fa->fsx_projid, +- XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); ++ code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid, ++ VFS_I(ip)->i_gid, fa->fsx_projid, ++ XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp); + if (code) + return code; + } +@@ -1597,7 +1596,7 @@ xfs_ioctl_setattr( + } + + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) && +- xfs_get_projid(ip) != fa->fsx_projid) { ++ ip->i_d.di_projid != fa->fsx_projid) { + code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, + capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); + if (code) /* out of quota */ +@@ -1634,13 +1633,12 @@ xfs_ioctl_setattr( + VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID); + + /* Change the ownerships and register project quota modifications */ +- if (xfs_get_projid(ip) != fa->fsx_projid) { ++ if (ip->i_d.di_projid != fa->fsx_projid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { + olddquot = xfs_qm_vop_chown(tp, ip, + &ip->i_pdquot, pdqp); + } +- ASSERT(ip->i_d.di_version > 1); +- xfs_set_projid(ip, fa->fsx_projid); ++ ip->i_d.di_projid = fa->fsx_projid; + } + + /* +@@ -1652,7 +1650,7 @@ xfs_ioctl_setattr( + ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; + else + ip->i_d.di_extsize = 0; +- if (ip->i_d.di_version == 3 && ++ if (xfs_sb_version_has_v3inode(&mp->m_sb) && + (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)) + ip->i_d.di_cowextsize = fa->fsx_cowextsize >> + mp->m_sb.sb_blocklog; +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c +index 80dd05f8f1afc..a7efc8896e5ee 100644 +--- a/fs/xfs/xfs_iops.c ++++ b/fs/xfs/xfs_iops.c +@@ -517,7 +517,7 @@ xfs_vn_getattr( + stat->blocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + +- if (ip->i_d.di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) { + if (request_mask & STATX_BTIME) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = ip->i_d.di_crtime.t_sec; +@@ -666,9 +666,7 @@ xfs_setattr_nonsize( + */ + ASSERT(udqp == NULL); + ASSERT(gdqp == NULL); +- error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid), +- xfs_kgid_to_gid(gid), +- xfs_get_projid(ip), ++ error = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid, + qflags, &udqp, &gdqp, NULL); + if (error) + return error; +@@ -737,7 +735,6 @@ xfs_setattr_nonsize( + olddquot1 = xfs_qm_vop_chown(tp, ip, + &ip->i_udquot, udqp); + } +- ip->i_d.di_uid = xfs_kuid_to_uid(uid); + inode->i_uid = uid; + } + if (!gid_eq(igid, gid)) { +@@ -749,7 +746,6 @@ xfs_setattr_nonsize( + olddquot2 = xfs_qm_vop_chown(tp, ip, + &ip->i_gdquot, gdqp); + } +- ip->i_d.di_gid = xfs_kgid_to_gid(gid); + inode->i_gid = gid; + } + } +@@ -1288,9 +1284,6 @@ xfs_setup_inode( + /* make the inode look hashed for the writeback code */ + inode_fake_hash(inode); + +- inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid); +- inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid); +- + i_size_write(inode, ip->i_d.di_size); + xfs_diflags_to_iflags(inode, ip); + +diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c +index 884950adbd164..42e93779374c6 100644 +--- a/fs/xfs/xfs_itable.c ++++ b/fs/xfs/xfs_itable.c +@@ -84,10 +84,10 @@ xfs_bulkstat_one_int( + /* xfs_iget returns the following without needing + * further change. + */ +- buf->bs_projectid = xfs_get_projid(ip); ++ buf->bs_projectid = ip->i_d.di_projid; + buf->bs_ino = ino; +- buf->bs_uid = dic->di_uid; +- buf->bs_gid = dic->di_gid; ++ buf->bs_uid = i_uid_read(inode); ++ buf->bs_gid = i_gid_read(inode); + buf->bs_size = dic->di_size; + + buf->bs_nlink = inode->i_nlink; +@@ -110,7 +110,7 @@ xfs_bulkstat_one_int( + buf->bs_forkoff = XFS_IFORK_BOFF(ip); + buf->bs_version = XFS_BULKSTAT_VERSION_V5; + +- if (dic->di_version == 3) { ++ if (xfs_sb_version_has_v3inode(&mp->m_sb)) { + if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE) + buf->bs_cowextsize_blks = dic->di_cowextsize; + } +diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h +index ca15105681cac..4f6f09157f0d9 100644 +--- a/fs/xfs/xfs_linux.h ++++ b/fs/xfs/xfs_linux.h +@@ -163,32 +163,6 @@ struct xstats { + + extern struct xstats xfsstats; + +-/* Kernel uid/gid conversion. These are used to convert to/from the on disk +- * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally. +- * The conversion here is type only, the value will remain the same since we +- * are converting to the init_user_ns. The uid is later mapped to a particular +- * user namespace value when crossing the kernel/user boundary. +- */ +-static inline uint32_t xfs_kuid_to_uid(kuid_t uid) +-{ +- return from_kuid(&init_user_ns, uid); +-} +- +-static inline kuid_t xfs_uid_to_kuid(uint32_t uid) +-{ +- return make_kuid(&init_user_ns, uid); +-} +- +-static inline uint32_t xfs_kgid_to_gid(kgid_t gid) +-{ +- return from_kgid(&init_user_ns, gid); +-} +- +-static inline kgid_t xfs_gid_to_kgid(uint32_t gid) +-{ +- return make_kgid(&init_user_ns, gid); +-} +- + static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev) + { + return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev)); +@@ -243,6 +217,12 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count, + #endif /* XFS_WARN */ + #endif /* DEBUG */ + ++#define XFS_IS_CORRUPT(mp, expr) \ ++ (unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \ ++ NULL, 0, __FILE__, __LINE__, \ ++ __this_address), \ ++ true : false) ++ + #define STATIC static noinline + + #ifdef CONFIG_XFS_RT +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 6c60cdd10d330..84f6c8628db5d 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -2879,8 +2879,8 @@ xfs_recover_inode_owner_change( + return -ENOMEM; + + /* instantiate the inode */ ++ ASSERT(dip->di_version >= 3); + xfs_inode_from_disk(ip, dip); +- ASSERT(ip->i_d.di_version >= 3); + + error = xfs_iformat_fork(ip, dip); + if (error) +@@ -3018,7 +3018,7 @@ xlog_recover_inode_pass2( + * superblock flag to determine whether we need to look at di_flushiter + * to skip replay when the on disk inode is newer than the log one + */ +- if (!xfs_sb_version_hascrc(&mp->m_sb) && ++ if (!xfs_sb_version_has_v3inode(&mp->m_sb) && + ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) { + /* + * Deal with the wrap case, DI_MAX_FLUSH is less +@@ -3089,7 +3089,7 @@ xlog_recover_inode_pass2( + error = -EFSCORRUPTED; + goto out_release; + } +- isize = xfs_log_dinode_size(ldip->di_version); ++ isize = xfs_log_dinode_size(mp); + if (unlikely(item->ri_buf[1].i_len > isize)) { + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", + XFS_ERRLEVEL_LOW, mp, ldip, +diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c +index 2860966af6c20..2277f21c4f14a 100644 +--- a/fs/xfs/xfs_mount.c ++++ b/fs/xfs/xfs_mount.c +@@ -674,6 +674,47 @@ xfs_check_summary_counts( + return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount); + } + ++/* ++ * Flush and reclaim dirty inodes in preparation for unmount. Inodes and ++ * internal inode structures can be sitting in the CIL and AIL at this point, ++ * so we need to unpin them, write them back and/or reclaim them before unmount ++ * can proceed. ++ * ++ * An inode cluster that has been freed can have its buffer still pinned in ++ * memory because the transaction is still sitting in a iclog. The stale inodes ++ * on that buffer will be pinned to the buffer until the transaction hits the ++ * disk and the callbacks run. Pushing the AIL will skip the stale inodes and ++ * may never see the pinned buffer, so nothing will push out the iclog and ++ * unpin the buffer. ++ * ++ * Hence we need to force the log to unpin everything first. However, log ++ * forces don't wait for the discards they issue to complete, so we have to ++ * explicitly wait for them to complete here as well. ++ * ++ * Then we can tell the world we are unmounting so that error handling knows ++ * that the filesystem is going away and we should error out anything that we ++ * have been retrying in the background. This will prevent never-ending ++ * retries in AIL pushing from hanging the unmount. ++ * ++ * Finally, we can push the AIL to clean all the remaining dirty objects, then ++ * reclaim the remaining inodes that are still in memory at this point in time. ++ */ ++static void ++xfs_unmount_flush_inodes( ++ struct xfs_mount *mp) ++{ ++ xfs_log_force(mp, XFS_LOG_SYNC); ++ xfs_extent_busy_wait_all(mp); ++ flush_workqueue(xfs_discard_wq); ++ ++ mp->m_flags |= XFS_MOUNT_UNMOUNTING; ++ ++ xfs_ail_push_all_sync(mp->m_ail); ++ cancel_delayed_work_sync(&mp->m_reclaim_work); ++ xfs_reclaim_inodes(mp, SYNC_WAIT); ++ xfs_health_unmount(mp); ++} ++ + /* + * This function does the following on an initial mount of a file system: + * - reads the superblock from disk and init the mount struct +@@ -1047,7 +1088,7 @@ xfs_mountfs( + /* Clean out dquots that might be in memory after quotacheck. */ + xfs_qm_unmount(mp); + /* +- * Cancel all delayed reclaim work and reclaim the inodes directly. ++ * Flush all inode reclamation work and flush the log. + * We have to do this /after/ rtunmount and qm_unmount because those + * two will have scheduled delayed reclaim for the rt/quota inodes. + * +@@ -1057,11 +1098,8 @@ xfs_mountfs( + * qm_unmount_quotas and therefore rely on qm_unmount to release the + * quota inodes. + */ +- cancel_delayed_work_sync(&mp->m_reclaim_work); +- xfs_reclaim_inodes(mp, SYNC_WAIT); +- xfs_health_unmount(mp); ++ xfs_unmount_flush_inodes(mp); + out_log_dealloc: +- mp->m_flags |= XFS_MOUNT_UNMOUNTING; + xfs_log_mount_cancel(mp); + out_fail_wait: + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) +@@ -1102,47 +1140,7 @@ xfs_unmountfs( + xfs_rtunmount_inodes(mp); + xfs_irele(mp->m_rootip); + +- /* +- * We can potentially deadlock here if we have an inode cluster +- * that has been freed has its buffer still pinned in memory because +- * the transaction is still sitting in a iclog. The stale inodes +- * on that buffer will have their flush locks held until the +- * transaction hits the disk and the callbacks run. the inode +- * flush takes the flush lock unconditionally and with nothing to +- * push out the iclog we will never get that unlocked. hence we +- * need to force the log first. +- */ +- xfs_log_force(mp, XFS_LOG_SYNC); +- +- /* +- * Wait for all busy extents to be freed, including completion of +- * any discard operation. +- */ +- xfs_extent_busy_wait_all(mp); +- flush_workqueue(xfs_discard_wq); +- +- /* +- * We now need to tell the world we are unmounting. This will allow +- * us to detect that the filesystem is going away and we should error +- * out anything that we have been retrying in the background. This will +- * prevent neverending retries in AIL pushing from hanging the unmount. +- */ +- mp->m_flags |= XFS_MOUNT_UNMOUNTING; +- +- /* +- * Flush all pending changes from the AIL. +- */ +- xfs_ail_push_all_sync(mp->m_ail); +- +- /* +- * And reclaim all inodes. At this point there should be no dirty +- * inodes and none should be pinned or locked, but use synchronous +- * reclaim just to be sure. We can stop background inode reclaim +- * here as well if it is still running. +- */ +- cancel_delayed_work_sync(&mp->m_reclaim_work); +- xfs_reclaim_inodes(mp, SYNC_WAIT); +- xfs_health_unmount(mp); ++ xfs_unmount_flush_inodes(mp); + + xfs_qm_unmount(mp); + +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 6b23ebd3f54f0..6b108a4de08ff 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -331,23 +331,23 @@ xfs_qm_dqattach_locked( + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + + if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { +- error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, +- doalloc, &ip->i_udquot); ++ error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)), ++ XFS_DQ_USER, doalloc, &ip->i_udquot); + if (error) + goto done; + ASSERT(ip->i_udquot); + } + + if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { +- error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, +- doalloc, &ip->i_gdquot); ++ error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)), ++ XFS_DQ_GROUP, doalloc, &ip->i_gdquot); + if (error) + goto done; + ASSERT(ip->i_gdquot); + } + + if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { +- error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, ++ error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, + doalloc, &ip->i_pdquot); + if (error) + goto done; +@@ -1630,8 +1630,8 @@ xfs_qm_dqfree_one( + int + xfs_qm_vop_dqalloc( + struct xfs_inode *ip, +- xfs_dqid_t uid, +- xfs_dqid_t gid, ++ kuid_t uid, ++ kgid_t gid, + prid_t prid, + uint flags, + struct xfs_dquot **O_udqpp, +@@ -1639,6 +1639,8 @@ xfs_qm_vop_dqalloc( + struct xfs_dquot **O_pdqpp) + { + struct xfs_mount *mp = ip->i_mount; ++ struct inode *inode = VFS_I(ip); ++ struct user_namespace *user_ns = inode->i_sb->s_user_ns; + struct xfs_dquot *uq = NULL; + struct xfs_dquot *gq = NULL; + struct xfs_dquot *pq = NULL; +@@ -1652,7 +1654,7 @@ xfs_qm_vop_dqalloc( + xfs_ilock(ip, lockflags); + + if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) +- gid = ip->i_d.di_gid; ++ gid = inode->i_gid; + + /* + * Attach the dquot(s) to this inode, doing a dquot allocation +@@ -1667,7 +1669,7 @@ xfs_qm_vop_dqalloc( + } + + if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { +- if (ip->i_d.di_uid != uid) { ++ if (!uid_eq(inode->i_uid, uid)) { + /* + * What we need is the dquot that has this uid, and + * if we send the inode to dqget, the uid of the inode +@@ -1678,7 +1680,8 @@ xfs_qm_vop_dqalloc( + * holding ilock. + */ + xfs_iunlock(ip, lockflags); +- error = xfs_qm_dqget(mp, uid, XFS_DQ_USER, true, &uq); ++ error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), ++ XFS_DQ_USER, true, &uq); + if (error) { + ASSERT(error != -ENOENT); + return error; +@@ -1699,9 +1702,10 @@ xfs_qm_vop_dqalloc( + } + } + if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { +- if (ip->i_d.di_gid != gid) { ++ if (!gid_eq(inode->i_gid, gid)) { + xfs_iunlock(ip, lockflags); +- error = xfs_qm_dqget(mp, gid, XFS_DQ_GROUP, true, &gq); ++ error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), ++ XFS_DQ_GROUP, true, &gq); + if (error) { + ASSERT(error != -ENOENT); + goto error_rele; +@@ -1715,7 +1719,7 @@ xfs_qm_vop_dqalloc( + } + } + if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { +- if (xfs_get_projid(ip) != prid) { ++ if (ip->i_d.di_projid != prid) { + xfs_iunlock(ip, lockflags); + error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ, + true, &pq); +@@ -1827,7 +1831,7 @@ xfs_qm_vop_chown_reserve( + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; + + if (XFS_IS_UQUOTA_ON(mp) && udqp && +- ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { ++ i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) { + udq_delblks = udqp; + /* + * If there are delayed allocation blocks, then we have to +@@ -1840,7 +1844,7 @@ xfs_qm_vop_chown_reserve( + } + } + if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && +- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { ++ i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) { + gdq_delblks = gdqp; + if (delblks) { + ASSERT(ip->i_gdquot); +@@ -1849,7 +1853,7 @@ xfs_qm_vop_chown_reserve( + } + + if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && +- xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { ++ ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) { + prjflags = XFS_QMOPT_ENOSPC; + pdq_delblks = pdqp; + if (delblks) { +@@ -1937,20 +1941,21 @@ xfs_qm_vop_create_dqattach( + + if (udqp && XFS_IS_UQUOTA_ON(mp)) { + ASSERT(ip->i_udquot == NULL); +- ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); ++ ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id)); + + ip->i_udquot = xfs_qm_dqhold(udqp); + xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); + } + if (gdqp && XFS_IS_GQUOTA_ON(mp)) { + ASSERT(ip->i_gdquot == NULL); +- ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); ++ ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id)); ++ + ip->i_gdquot = xfs_qm_dqhold(gdqp); + xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); + } + if (pdqp && XFS_IS_PQUOTA_ON(mp)) { + ASSERT(ip->i_pdquot == NULL); +- ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); ++ ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id)); + + ip->i_pdquot = xfs_qm_dqhold(pdqp); + xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); +diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c +index b784a3751fe25..fc2fa418919f7 100644 +--- a/fs/xfs/xfs_qm_bhv.c ++++ b/fs/xfs/xfs_qm_bhv.c +@@ -60,7 +60,7 @@ xfs_qm_statvfs( + struct xfs_mount *mp = ip->i_mount; + struct xfs_dquot *dqp; + +- if (!xfs_qm_dqget(mp, xfs_get_projid(ip), XFS_DQ_PROJ, false, &dqp)) { ++ if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQ_PROJ, false, &dqp)) { + xfs_fill_statvfs_from_dquot(statp, dqp); + xfs_qm_dqput(dqp); + } +diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h +index efe42ae7a2f38..aa8fc1f55fbd7 100644 +--- a/fs/xfs/xfs_quota.h ++++ b/fs/xfs/xfs_quota.h +@@ -86,7 +86,7 @@ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, + struct xfs_mount *, struct xfs_dquot *, + struct xfs_dquot *, struct xfs_dquot *, int64_t, long, uint); + +-extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t, ++extern int xfs_qm_vop_dqalloc(struct xfs_inode *, kuid_t, kgid_t, + prid_t, uint, struct xfs_dquot **, struct xfs_dquot **, + struct xfs_dquot **); + extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, +@@ -109,7 +109,7 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *); + + #else + static inline int +-xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid, ++xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid, + prid_t prid, uint flags, struct xfs_dquot **udqp, + struct xfs_dquot **gdqp, struct xfs_dquot **pdqp) + { +diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c +index 9e73d2b29911d..e802cbc9daadd 100644 +--- a/fs/xfs/xfs_super.c ++++ b/fs/xfs/xfs_super.c +@@ -490,10 +490,12 @@ xfs_showargs( + seq_printf(m, ",swidth=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + +- if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) +- seq_puts(m, ",usrquota"); +- else if (mp->m_qflags & XFS_UQUOTA_ACCT) +- seq_puts(m, ",uqnoenforce"); ++ if (mp->m_qflags & XFS_UQUOTA_ACCT) { ++ if (mp->m_qflags & XFS_UQUOTA_ENFD) ++ seq_puts(m, ",usrquota"); ++ else ++ seq_puts(m, ",uqnoenforce"); ++ } + + if (mp->m_qflags & XFS_PQUOTA_ACCT) { + if (mp->m_qflags & XFS_PQUOTA_ENFD) +diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c +index ed66fd2de3273..a2037e22ebdab 100644 +--- a/fs/xfs/xfs_symlink.c ++++ b/fs/xfs/xfs_symlink.c +@@ -191,9 +191,7 @@ xfs_symlink( + /* + * Make sure that we have allocated dquot(s) on disk. + */ +- error = xfs_qm_vop_dqalloc(dp, +- xfs_kuid_to_uid(current_fsuid()), +- xfs_kgid_to_gid(current_fsgid()), prid, ++ error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, + XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, + &udqp, &gdqp, &pdqp); + if (error) +@@ -203,7 +201,7 @@ xfs_symlink( + * The symlink will fit into the inode data fork? + * There can't be any attributes so we get the whole variable part. + */ +- if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version)) ++ if (pathlen <= XFS_LITINO(mp)) + fs_blocks = 0; + else + fs_blocks = xfs_symlink_blocks(mp, pathlen); +@@ -316,6 +314,7 @@ xfs_symlink( + } + ASSERT(pathlen == 0); + } ++ i_size_write(VFS_I(ip), ip->i_d.di_size); + + /* + * Create the directory entry for the symlink. +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c +index c1238a2dbd6a5..4e43d415161de 100644 +--- a/fs/xfs/xfs_trans_dquot.c ++++ b/fs/xfs/xfs_trans_dquot.c +@@ -15,6 +15,7 @@ + #include "xfs_trans_priv.h" + #include "xfs_quota.h" + #include "xfs_qm.h" ++#include "xfs_error.h" + + STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); + +@@ -700,9 +701,14 @@ xfs_trans_dqresv( + XFS_TRANS_DQ_RES_INOS, + ninos); + } +- ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); +- ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); +- ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); ++ ++ if (XFS_IS_CORRUPT(mp, ++ dqp->q_res_bcount < be64_to_cpu(dqp->q_core.d_bcount)) || ++ XFS_IS_CORRUPT(mp, ++ dqp->q_res_rtbcount < be64_to_cpu(dqp->q_core.d_rtbcount)) || ++ XFS_IS_CORRUPT(mp, ++ dqp->q_res_icount < be64_to_cpu(dqp->q_core.d_icount))) ++ goto error_corrupt; + + xfs_dqunlock(dqp); + return 0; +@@ -712,6 +718,10 @@ error_return: + if (flags & XFS_QMOPT_ENOSPC) + return -ENOSPC; + return -EDQUOT; ++error_corrupt: ++ xfs_dqunlock(dqp); ++ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); ++ return -EFSCORRUPTED; + } + + +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h +index 9141f22632864..52c1c6feef899 100644 +--- a/include/linux/ftrace.h ++++ b/include/linux/ftrace.h +@@ -712,7 +712,7 @@ static inline void __ftrace_enabled_restore(int enabled) + #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) + #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) + +-static inline unsigned long get_lock_parent_ip(void) ++static __always_inline unsigned long get_lock_parent_ip(void) + { + unsigned long addr = CALLER_ADDR0; + +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index c7f4526ca64e2..5d50ac2f26525 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -2180,11 +2180,15 @@ out_unlock: + static void cpuset_cancel_attach(struct cgroup_taskset *tset) + { + struct cgroup_subsys_state *css; ++ struct cpuset *cs; + + cgroup_taskset_first(tset, &css); ++ cs = css_cs(css); + + percpu_down_write(&cpuset_rwsem); +- css_cs(css)->attach_in_progress--; ++ cs->attach_in_progress--; ++ if (!cs->attach_in_progress) ++ wake_up(&cpuset_attach_wq); + percpu_up_write(&cpuset_rwsem); + } + +diff --git a/kernel/events/core.c b/kernel/events/core.c +index a1c89b675b0b9..1ef924d6a385e 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -10798,7 +10798,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) + /* + * If its not a per-cpu rb, it must be the same task. + */ +- if (output_event->cpu == -1 && output_event->ctx != event->ctx) ++ if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) + goto out; + + /* +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c +index d40ae18fe6617..a5f1dd7b6dc33 100644 +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex); + + static struct irq_domain *irq_default_domain; + ++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, ++ unsigned int nr_irqs, int node, void *arg, ++ bool realloc, const struct irq_affinity_desc *affinity); + static void irq_domain_check_hierarchy(struct irq_domain *domain); + + struct irqchip_fwid { +@@ -672,6 +675,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) + } + EXPORT_SYMBOL_GPL(irq_create_direct_mapping); + ++static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain, ++ irq_hw_number_t hwirq, ++ const struct irq_affinity_desc *affinity) ++{ ++ struct device_node *of_node = irq_domain_get_of_node(domain); ++ int virq; ++ ++ pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); ++ ++ /* Allocate a virtual interrupt number */ ++ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), ++ affinity); ++ if (virq <= 0) { ++ pr_debug("-> virq allocation failed\n"); ++ return 0; ++ } ++ ++ if (irq_domain_associate_locked(domain, virq, hwirq)) { ++ irq_free_desc(virq); ++ return 0; ++ } ++ ++ pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", ++ hwirq, of_node_full_name(of_node), virq); ++ ++ return virq; ++} ++ + /** + * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space + * @domain: domain owning this hardware interrupt or NULL for default domain +@@ -684,47 +715,31 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); + * on the number returned from that call. + */ + unsigned int irq_create_mapping_affinity(struct irq_domain *domain, +- irq_hw_number_t hwirq, +- const struct irq_affinity_desc *affinity) ++ irq_hw_number_t hwirq, ++ const struct irq_affinity_desc *affinity) + { +- struct device_node *of_node; + int virq; + +- pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); +- +- /* Look for default domain if nececssary */ ++ /* Look for default domain if necessary */ + if (domain == NULL) + domain = irq_default_domain; + if (domain == NULL) { + WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); + return 0; + } +- pr_debug("-> using domain @%p\n", domain); + +- of_node = irq_domain_get_of_node(domain); ++ mutex_lock(&irq_domain_mutex); + + /* Check if mapping already exists */ + virq = irq_find_mapping(domain, hwirq); + if (virq) { +- pr_debug("-> existing mapping on virq %d\n", virq); +- return virq; +- } +- +- /* Allocate a virtual interrupt number */ +- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), +- affinity); +- if (virq <= 0) { +- pr_debug("-> virq allocation failed\n"); +- return 0; +- } +- +- if (irq_domain_associate(domain, virq, hwirq)) { +- irq_free_desc(virq); +- return 0; ++ pr_debug("existing mapping on virq %d\n", virq); ++ goto out; + } + +- pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", +- hwirq, of_node_full_name(of_node), virq); ++ virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); ++out: ++ mutex_unlock(&irq_domain_mutex); + + return virq; + } +@@ -828,6 +843,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) + if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) + type &= IRQ_TYPE_SENSE_MASK; + ++ mutex_lock(&irq_domain_mutex); ++ + /* + * If we've already configured this interrupt, + * don't do it again, or hell will break loose. +@@ -840,7 +857,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) + * interrupt number. + */ + if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) +- return virq; ++ goto out; + + /* + * If the trigger type has not been set yet, then set +@@ -848,35 +865,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) + */ + if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { + irq_data = irq_get_irq_data(virq); +- if (!irq_data) +- return 0; ++ if (!irq_data) { ++ virq = 0; ++ goto out; ++ } + + irqd_set_trigger_type(irq_data, type); +- return virq; ++ goto out; + } + + pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", + hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); +- return 0; ++ virq = 0; ++ goto out; + } + + if (irq_domain_is_hierarchy(domain)) { +- virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); +- if (virq <= 0) +- return 0; ++ virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, ++ fwspec, false, NULL); ++ if (virq <= 0) { ++ virq = 0; ++ goto out; ++ } + } else { + /* Create mapping */ +- virq = irq_create_mapping(domain, hwirq); ++ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL); + if (!virq) +- return virq; ++ goto out; + } + + irq_data = irq_get_irq_data(virq); +- if (WARN_ON(!irq_data)) +- return 0; ++ if (WARN_ON(!irq_data)) { ++ virq = 0; ++ goto out; ++ } + + /* Store trigger type */ + irqd_set_trigger_type(irq_data, type); ++out: ++ mutex_unlock(&irq_domain_mutex); + + return virq; + } +@@ -1344,6 +1371,45 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, + return domain->ops->alloc(domain, irq_base, nr_irqs, arg); + } + ++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, ++ unsigned int nr_irqs, int node, void *arg, ++ bool realloc, const struct irq_affinity_desc *affinity) ++{ ++ int i, ret, virq; ++ ++ if (realloc && irq_base >= 0) { ++ virq = irq_base; ++ } else { ++ virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, ++ affinity); ++ if (virq < 0) { ++ pr_debug("cannot allocate IRQ(base %d, count %d)\n", ++ irq_base, nr_irqs); ++ return virq; ++ } ++ } ++ ++ if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { ++ pr_debug("cannot allocate memory for IRQ%d\n", virq); ++ ret = -ENOMEM; ++ goto out_free_desc; ++ } ++ ++ ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); ++ if (ret < 0) ++ goto out_free_irq_data; ++ for (i = 0; i < nr_irqs; i++) ++ irq_domain_insert_irq(virq + i); ++ ++ return virq; ++ ++out_free_irq_data: ++ irq_domain_free_irq_data(virq, nr_irqs); ++out_free_desc: ++ irq_free_descs(virq, nr_irqs); ++ return ret; ++} ++ + /** + * __irq_domain_alloc_irqs - Allocate IRQs from domain + * @domain: domain to allocate from +@@ -1370,7 +1436,7 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc, const struct irq_affinity_desc *affinity) + { +- int i, ret, virq; ++ int ret; + + if (domain == NULL) { + domain = irq_default_domain; +@@ -1378,40 +1444,11 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + return -EINVAL; + } + +- if (realloc && irq_base >= 0) { +- virq = irq_base; +- } else { +- virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, +- affinity); +- if (virq < 0) { +- pr_debug("cannot allocate IRQ(base %d, count %d)\n", +- irq_base, nr_irqs); +- return virq; +- } +- } +- +- if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { +- pr_debug("cannot allocate memory for IRQ%d\n", virq); +- ret = -ENOMEM; +- goto out_free_desc; +- } +- + mutex_lock(&irq_domain_mutex); +- ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); +- if (ret < 0) { +- mutex_unlock(&irq_domain_mutex); +- goto out_free_irq_data; +- } +- for (i = 0; i < nr_irqs; i++) +- irq_domain_insert_irq(virq + i); ++ ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg, ++ realloc, affinity); + mutex_unlock(&irq_domain_mutex); + +- return virq; +- +-out_free_irq_data: +- irq_domain_free_irq_data(virq, nr_irqs); +-out_free_desc: +- irq_free_descs(virq, nr_irqs); + return ret; + } + +@@ -1772,6 +1809,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, + irq_set_handler_data(virq, handler_data); + } + ++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, ++ unsigned int nr_irqs, int node, void *arg, ++ bool realloc, const struct irq_affinity_desc *affinity) ++{ ++ return -EINVAL; ++} ++ + static void irq_domain_check_hierarchy(struct irq_domain *domain) + { + } +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 58809fffc8171..869eeba2005af 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -2532,6 +2532,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) + if (RB_WARN_ON(cpu_buffer, + rb_is_reader_page(cpu_buffer->tail_page))) + return; ++ /* ++ * No need for a memory barrier here, as the update ++ * of the tail_page did it for this page. ++ */ + local_set(&cpu_buffer->commit_page->page->commit, + rb_page_write(cpu_buffer->commit_page)); + rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); +@@ -2545,6 +2549,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) + while (rb_commit_index(cpu_buffer) != + rb_page_write(cpu_buffer->commit_page)) { + ++ /* Make sure the readers see the content of what is committed. */ ++ smp_wmb(); + local_set(&cpu_buffer->commit_page->page->commit, + rb_page_write(cpu_buffer->commit_page)); + RB_WARN_ON(cpu_buffer, +@@ -3920,7 +3926,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + + /* + * Make sure we see any padding after the write update +- * (see rb_reset_tail()) ++ * (see rb_reset_tail()). ++ * ++ * In addition, a writer may be writing on the reader page ++ * if the page has not been fully filled, so the read barrier ++ * is also needed to make sure we see the content of what is ++ * committed by the writer (see rb_set_commit_to_write()). + */ + smp_rmb(); + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 306fbe14747bf..d068124815bc3 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -8542,6 +8542,7 @@ static int __remove_instance(struct trace_array *tr) + ftrace_destroy_function_files(tr); + tracefs_remove_recursive(tr->dir); + free_trace_buffers(tr); ++ clear_tracing_err_log(tr); + + for (i = 0; i < tr->nr_topts; i++) { + kfree(tr->topts[i].topts); +diff --git a/mm/swapfile.c b/mm/swapfile.c +index d6bdacaedc95c..5e444be061a86 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -672,6 +672,7 @@ static void __del_from_avail_list(struct swap_info_struct *p) + { + int nid; + ++ assert_spin_locked(&p->lock); + for_each_node(nid) + plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); + } +@@ -2579,8 +2580,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) + spin_unlock(&swap_lock); + goto out_dput; + } +- del_from_avail_list(p); + spin_lock(&p->lock); ++ del_from_avail_list(p); + if (p->prio < 0) { + struct swap_info_struct *si = p; + int nid; +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c +index 67e9a7085e13b..4a16c5dd1576d 100644 +--- a/net/9p/trans_xen.c ++++ b/net/9p/trans_xen.c +@@ -299,6 +299,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv) + write_unlock(&xen_9pfs_lock); + + for (i = 0; i < priv->num_rings; i++) { ++ struct xen_9pfs_dataring *ring = &priv->rings[i]; ++ ++ cancel_work_sync(&ring->work); ++ + if (!priv->rings[i].intf) + break; + if (priv->rings[i].irq > 0) +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index ac98e3b37ab47..a94ec229ad2ea 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session) + static void hidp_del_timer(struct hidp_session *session) + { + if (session->idle_to > 0) +- del_timer(&session->timer); ++ del_timer_sync(&session->timer); + } + + static void hidp_process_report(struct hidp_session *session, int type, +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 4f286c76a50d4..3c559a177761b 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -4368,33 +4368,27 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, + + BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); + +- mutex_lock(&conn->chan_lock); +- +- chan = __l2cap_get_chan_by_scid(conn, dcid); ++ chan = l2cap_get_chan_by_scid(conn, dcid); + if (!chan) { +- mutex_unlock(&conn->chan_lock); + cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); + return 0; + } + +- l2cap_chan_hold(chan); +- l2cap_chan_lock(chan); +- + rsp.dcid = cpu_to_le16(chan->scid); + rsp.scid = cpu_to_le16(chan->dcid); + l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); + + chan->ops->set_shutdown(chan); + ++ mutex_lock(&conn->chan_lock); + l2cap_chan_del(chan, ECONNRESET); ++ mutex_unlock(&conn->chan_lock); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + +- mutex_unlock(&conn->chan_lock); +- + return 0; + } + +@@ -4414,33 +4408,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, + + BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); + +- mutex_lock(&conn->chan_lock); +- +- chan = __l2cap_get_chan_by_scid(conn, scid); ++ chan = l2cap_get_chan_by_scid(conn, scid); + if (!chan) { + mutex_unlock(&conn->chan_lock); + return 0; + } + +- l2cap_chan_hold(chan); +- l2cap_chan_lock(chan); +- + if (chan->state != BT_DISCONN) { + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +- mutex_unlock(&conn->chan_lock); + return 0; + } + ++ mutex_lock(&conn->chan_lock); + l2cap_chan_del(chan, 0); ++ mutex_unlock(&conn->chan_lock); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + +- mutex_unlock(&conn->chan_lock); +- + return 0; + } + +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c +index 9c8c7c5dc9c36..42fb83605d4c2 100644 +--- a/net/can/j1939/transport.c ++++ b/net/can/j1939/transport.c +@@ -600,7 +600,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv, + /* reserve CAN header */ + skb_reserve(skb, offsetof(struct can_frame, data)); + +- memcpy(skb->cb, re_skcb, sizeof(skb->cb)); ++ /* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */ ++ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb)); ++ ++ memcpy(skb->cb, re_skcb, sizeof(*re_skcb)); + skcb = j1939_skb_to_cb(skb); + if (swap_src_dst) + j1939_skbcb_swap(skcb); +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 78bbb912e5025..9b263a5c0f36f 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -136,6 +136,20 @@ static void queue_process(struct work_struct *work) + } + } + ++static int netif_local_xmit_active(struct net_device *dev) ++{ ++ int i; ++ ++ for (i = 0; i < dev->num_tx_queues; i++) { ++ struct netdev_queue *txq = netdev_get_tx_queue(dev, i); ++ ++ if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id()) ++ return 1; ++ } ++ ++ return 0; ++} ++ + static void poll_one_napi(struct napi_struct *napi) + { + int work; +@@ -182,7 +196,10 @@ void netpoll_poll_dev(struct net_device *dev) + if (!ni || down_trylock(&ni->dev_lock)) + return; + +- if (!netif_running(dev)) { ++ /* Some drivers will take the same locks in poll and xmit, ++ * we can't poll if local CPU is already in xmit. ++ */ ++ if (!netif_running(dev) || netif_local_xmit_active(dev)) { + up(&ni->dev_lock); + return; + } +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index b44f51e404aee..ac82a4158b86b 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -754,6 +754,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, + room = 576; + room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; + room -= sizeof(struct icmphdr); ++ /* Guard against tiny mtu. We need to include at least one ++ * IP network header for this message to make any sense. ++ */ ++ if (room <= (int)sizeof(struct iphdr)) ++ goto ende; + + icmp_param.data_len = skb_in->len - icmp_param.offset; + if (icmp_param.data_len > room) +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 457eb07be4828..8231a7a3dd035 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1855,8 +1855,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, + IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); + if (proto == IPPROTO_ICMPV6) { + struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ++ u8 icmp6_type; + +- ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type); ++ if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl) ++ icmp6_type = fl6->fl6_icmp_type; ++ else ++ icmp6_type = icmp6_hdr(skb)->icmp6_type; ++ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type); + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); + } + +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index fd1ce0405b7ea..3777ab1273f5a 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -1283,9 +1283,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + msg->msg_name = &sin; + msg->msg_namelen = sizeof(sin); + do_udp_sendmsg: +- if (__ipv6_only_sock(sk)) +- return -ENETUNREACH; +- return udp_sendmsg(sk, msg, len); ++ err = __ipv6_only_sock(sk) ? ++ -ENETUNREACH : udp_sendmsg(sk, msg, len); ++ msg->msg_name = sin6; ++ msg->msg_namelen = addr_len; ++ return err; + } + } + +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 4d6890250337d..0f97c6fcec174 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -1023,7 +1023,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta) + list_del_rcu(&sta->list); + sta->removed = true; + +- drv_sta_pre_rcu_remove(local, sta->sdata, sta); ++ if (sta->uploaded) ++ drv_sta_pre_rcu_remove(local, sta->sdata, sta); + + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && + rcu_access_pointer(sdata->u.vlan.sta) == sta) +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index 1f055c21be4cf..4250f3cf30e72 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -1116,6 +1116,7 @@ static void attach_default_qdiscs(struct net_device *dev) + qdisc->ops->attach(qdisc); + } + } ++ qdisc = rtnl_dereference(dev->qdisc); + + #ifdef CONFIG_NET_SCHED + if (qdisc != &noop_qdisc) +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index c76b40322ac7d..36db659a0f7f4 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -1850,6 +1850,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); + if (err) + goto err; ++ if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) { ++ err = -EINVAL; ++ goto err; ++ } + } + + if (sctp_state(asoc, CLOSED)) { +diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c +index 40c40be23fcb7..c982f99099dec 100644 +--- a/net/sctp/stream_interleave.c ++++ b/net/sctp/stream_interleave.c +@@ -1165,7 +1165,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) + + #define _sctp_walk_ifwdtsn(pos, chunk, end) \ + for (pos = chunk->subh.ifwdtsn_hdr->skip; \ +- (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++) ++ (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \ ++ sizeof(struct sctp_ifwdtsn_skip); pos++) + + #define sctp_walk_ifwdtsn(pos, ch) \ + _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \ +diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c +index 5c04ba7d456b2..5b47e33632399 100644 +--- a/net/sunrpc/svcauth_unix.c ++++ b/net/sunrpc/svcauth_unix.c +@@ -428,14 +428,23 @@ static int unix_gid_hash(kuid_t uid) + return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); + } + +-static void unix_gid_put(struct kref *kref) ++static void unix_gid_free(struct rcu_head *rcu) + { +- struct cache_head *item = container_of(kref, struct cache_head, ref); +- struct unix_gid *ug = container_of(item, struct unix_gid, h); ++ struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu); ++ struct cache_head *item = &ug->h; ++ + if (test_bit(CACHE_VALID, &item->flags) && + !test_bit(CACHE_NEGATIVE, &item->flags)) + put_group_info(ug->gi); +- kfree_rcu(ug, rcu); ++ kfree(ug); ++} ++ ++static void unix_gid_put(struct kref *kref) ++{ ++ struct cache_head *item = container_of(kref, struct cache_head, ref); ++ struct unix_gid *ug = container_of(item, struct unix_gid, h); ++ ++ call_rcu(&ug->rcu, unix_gid_free); + } + + static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) +diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c +index adf69a520b800..89918c6003e67 100644 +--- a/sound/firewire/tascam/tascam-stream.c ++++ b/sound/firewire/tascam/tascam-stream.c +@@ -465,7 +465,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate) + + err = amdtp_domain_start(&tscm->domain); + if (err < 0) +- return err; ++ goto error; + + if (!amdtp_stream_wait_callback(&tscm->rx_stream, + CALLBACK_TIMEOUT) || +diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c +index bac4f0036cd6f..e8947a07725ce 100644 +--- a/sound/i2c/cs8427.c ++++ b/sound/i2c/cs8427.c +@@ -553,10 +553,13 @@ int snd_cs8427_iec958_active(struct snd_i2c_device *cs8427, int active) + if (snd_BUG_ON(!cs8427)) + return -ENXIO; + chip = cs8427->private_data; +- if (active) ++ if (active) { + memcpy(chip->playback.pcm_status, + chip->playback.def_status, 24); +- chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; ++ chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; ++ } else { ++ chip->playback.pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; ++ } + snd_ctl_notify(cs8427->bus->card, + SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, + &chip->playback.pcm_ctl->id); +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c +index 2cea3d3ee54dc..81590de8d78bf 100644 +--- a/sound/pci/emu10k1/emupcm.c ++++ b/sound/pci/emu10k1/emupcm.c +@@ -1244,7 +1244,7 @@ static int snd_emu10k1_capture_mic_close(struct snd_pcm_substream *substream) + { + struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream); + +- emu->capture_interrupt = NULL; ++ emu->capture_mic_interrupt = NULL; + emu->pcm_capture_mic_substream = NULL; + return 0; + } +@@ -1352,7 +1352,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream) + { + struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream); + +- emu->capture_interrupt = NULL; ++ emu->capture_efx_interrupt = NULL; + emu->pcm_capture_efx_substream = NULL; + return 0; + } +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 13238cf7aa52f..b6b1440cc04a6 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2574,6 +2574,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), ++ SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index e42a6c5c1ba3e..7fa238e9a7827 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -1701,6 +1701,7 @@ static const struct snd_pci_quirk stac925x_fixup_tbl[] = { + }; + + static const struct hda_pintbl ref92hd73xx_pin_configs[] = { ++ // Port A-H + { 0x0a, 0x02214030 }, + { 0x0b, 0x02a19040 }, + { 0x0c, 0x01a19020 }, +@@ -1709,9 +1710,12 @@ static const struct hda_pintbl ref92hd73xx_pin_configs[] = { + { 0x0f, 0x01014010 }, + { 0x10, 0x01014020 }, + { 0x11, 0x01014030 }, ++ // CD in + { 0x12, 0x02319040 }, ++ // Digial Mic ins + { 0x13, 0x90a000f0 }, + { 0x14, 0x90a000f0 }, ++ // Digital outs + { 0x22, 0x01452050 }, + { 0x23, 0x01452050 }, + {} +@@ -1752,6 +1756,7 @@ static const struct hda_pintbl alienware_m17x_pin_configs[] = { + }; + + static const struct hda_pintbl intel_dg45id_pin_configs[] = { ++ // Analog outputs + { 0x0a, 0x02214230 }, + { 0x0b, 0x02A19240 }, + { 0x0c, 0x01013214 }, +@@ -1759,6 +1764,9 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = { + { 0x0e, 0x01A19250 }, + { 0x0f, 0x01011212 }, + { 0x10, 0x01016211 }, ++ // Digital output ++ { 0x22, 0x01451380 }, ++ { 0x23, 0x40f000f0 }, + {} + }; + +@@ -1949,6 +1957,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { + "DFI LanParty", STAC_92HD73XX_REF), + SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101, + "DFI LanParty", STAC_92HD73XX_REF), ++ SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5001, ++ "Intel DP45SG", STAC_92HD73XX_INTEL), + SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002, + "Intel DG45ID", STAC_92HD73XX_INTEL), + SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003, |