summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-12-16 09:31:43 -0500
committerAnthony G. Basile <blueness@gentoo.org>2015-12-16 09:31:43 -0500
commit16ead3ed0d7d1c942d849ed4ea082f442467fea6 (patch)
treec2c985d838bf870566ab249bc9d124c9655ab3d4
parentgrsecurity-3.1-4.2.7-201512092320 (diff)
downloadhardened-patchset-16ead3ed0d7d1c942d849ed4ea082f442467fea6.tar.gz
hardened-patchset-16ead3ed0d7d1c942d849ed4ea082f442467fea6.tar.bz2
hardened-patchset-16ead3ed0d7d1c942d849ed4ea082f442467fea6.zip
grsecurity-3.1-4.3.3-20151215190820151215
-rw-r--r--4.2.7/1006_linux-4.2.7.patch4131
-rw-r--r--4.3.3/0000_README (renamed from 4.2.7/0000_README)6
-rw-r--r--4.3.3/1002_linux-4.3.3.patch4424
-rw-r--r--4.3.3/4420_grsecurity-3.1-4.3.3-201512151908.patch (renamed from 4.2.7/4420_grsecurity-3.1-4.2.7-201512092320.patch)16428
-rw-r--r--4.3.3/4425_grsec_remove_EI_PAX.patch (renamed from 4.2.7/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.3.3/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.2.7/4427_force_XATTR_PAX_tmpfs.patch)4
-rw-r--r--4.3.3/4430_grsec-remove-localversion-grsec.patch (renamed from 4.2.7/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.3.3/4435_grsec-mute-warnings.patch (renamed from 4.2.7/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.3.3/4440_grsec-remove-protected-paths.patch (renamed from 4.2.7/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.3.3/4450_grsec-kconfig-default-gids.patch (renamed from 4.2.7/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.3.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.2.7/4465_selinux-avc_audit-log-curr_ip.patch)2
-rw-r--r--4.3.3/4470_disable-compat_vdso.patch (renamed from 4.2.7/4470_disable-compat_vdso.patch)2
-rw-r--r--4.3.3/4475_emutramp_default_on.patch (renamed from 4.2.7/4475_emutramp_default_on.patch)0
13 files changed, 12204 insertions, 12793 deletions
diff --git a/4.2.7/1006_linux-4.2.7.patch b/4.2.7/1006_linux-4.2.7.patch
deleted file mode 100644
index ba25fa7..0000000
--- a/4.2.7/1006_linux-4.2.7.patch
+++ /dev/null
@@ -1,4131 +0,0 @@
-diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
-index 0815eac..e12f344 100644
---- a/Documentation/devicetree/bindings/usb/dwc3.txt
-+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
-@@ -35,6 +35,8 @@ Optional properties:
- LTSSM during USB3 Compliance mode.
- - snps,dis_u3_susphy_quirk: when set core will disable USB3 suspend phy.
- - snps,dis_u2_susphy_quirk: when set core will disable USB2 suspend phy.
-+ - snps,dis_enblslpm_quirk: when set clears the enblslpm in GUSB2PHYCFG,
-+ disabling the suspend signal to the PHY.
- - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal
- utmi_l1_suspend_n, false when asserts utmi_sleep_n
- - snps,hird-threshold: HIRD threshold
-diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
-index 6f7fafd..3e2844e 100644
---- a/Documentation/filesystems/proc.txt
-+++ b/Documentation/filesystems/proc.txt
-@@ -140,7 +140,8 @@ Table 1-1: Process specific entries in /proc
- stat Process status
- statm Process memory status information
- status Process status in human readable form
-- wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan
-+ wchan Present with CONFIG_KALLSYMS=y: it shows the kernel function
-+ symbol the task is blocked in - or "0" if not blocked.
- pagemap Page table
- stack Report full stack trace, enable via CONFIG_STACKTRACE
- smaps a extension based on maps, showing the memory consumption of
-@@ -310,7 +311,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
- blocked bitmap of blocked signals
- sigign bitmap of ignored signals
- sigcatch bitmap of caught signals
-- wchan address where process went to sleep
-+ 0 (place holder, used to be the wchan address, use /proc/PID/wchan instead)
- 0 (place holder)
- 0 (place holder)
- exit_signal signal to send to parent thread on exit
-diff --git a/Makefile b/Makefile
-index 9ef3739..f5014ea 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 2
--SUBLEVEL = 6
-+SUBLEVEL = 7
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
-index b69be5c..8c603fd 100644
---- a/arch/arm/boot/dts/imx27.dtsi
-+++ b/arch/arm/boot/dts/imx27.dtsi
-@@ -477,7 +477,10 @@
- compatible = "fsl,imx27-usb";
- reg = <0x10024000 0x200>;
- interrupts = <56>;
-- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
-+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
-+ <&clks IMX27_CLK_USB_AHB_GATE>,
-+ <&clks IMX27_CLK_USB_DIV>;
-+ clock-names = "ipg", "ahb", "per";
- fsl,usbmisc = <&usbmisc 0>;
- status = "disabled";
- };
-@@ -486,7 +489,10 @@
- compatible = "fsl,imx27-usb";
- reg = <0x10024200 0x200>;
- interrupts = <54>;
-- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
-+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
-+ <&clks IMX27_CLK_USB_AHB_GATE>,
-+ <&clks IMX27_CLK_USB_DIV>;
-+ clock-names = "ipg", "ahb", "per";
- fsl,usbmisc = <&usbmisc 1>;
- dr_mode = "host";
- status = "disabled";
-@@ -496,7 +502,10 @@
- compatible = "fsl,imx27-usb";
- reg = <0x10024400 0x200>;
- interrupts = <55>;
-- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
-+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
-+ <&clks IMX27_CLK_USB_AHB_GATE>,
-+ <&clks IMX27_CLK_USB_DIV>;
-+ clock-names = "ipg", "ahb", "per";
- fsl,usbmisc = <&usbmisc 2>;
- dr_mode = "host";
- status = "disabled";
-@@ -506,7 +515,6 @@
- #index-cells = <1>;
- compatible = "fsl,imx27-usbmisc";
- reg = <0x10024600 0x200>;
-- clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
- };
-
- sahara2: sahara@10025000 {
-diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
-index 5771a14..23d645d 100644
---- a/arch/arm/boot/dts/omap5-uevm.dts
-+++ b/arch/arm/boot/dts/omap5-uevm.dts
-@@ -31,6 +31,24 @@
- regulator-max-microvolt = <3000000>;
- };
-
-+ mmc3_pwrseq: sdhci0_pwrseq {
-+ compatible = "mmc-pwrseq-simple";
-+ clocks = <&clk32kgaudio>;
-+ clock-names = "ext_clock";
-+ };
-+
-+ vmmcsdio_fixed: fixedregulator-mmcsdio {
-+ compatible = "regulator-fixed";
-+ regulator-name = "vmmcsdio_fixed";
-+ regulator-min-microvolt = <1800000>;
-+ regulator-max-microvolt = <1800000>;
-+ gpio = <&gpio5 12 GPIO_ACTIVE_HIGH>; /* gpio140 WLAN_EN */
-+ enable-active-high;
-+ startup-delay-us = <70000>;
-+ pinctrl-names = "default";
-+ pinctrl-0 = <&wlan_pins>;
-+ };
-+
- /* HS USB Host PHY on PORT 2 */
- hsusb2_phy: hsusb2_phy {
- compatible = "usb-nop-xceiv";
-@@ -197,12 +215,20 @@
- >;
- };
-
-- mcspi4_pins: pinmux_mcspi4_pins {
-+ mmc3_pins: pinmux_mmc3_pins {
-+ pinctrl-single,pins = <
-+ OMAP5_IOPAD(0x01a4, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_clk */
-+ OMAP5_IOPAD(0x01a6, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_cmd */
-+ OMAP5_IOPAD(0x01a8, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data0 */
-+ OMAP5_IOPAD(0x01aa, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data1 */
-+ OMAP5_IOPAD(0x01ac, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data2 */
-+ OMAP5_IOPAD(0x01ae, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data3 */
-+ >;
-+ };
-+
-+ wlan_pins: pinmux_wlan_pins {
- pinctrl-single,pins = <
-- 0x164 (PIN_INPUT | MUX_MODE1) /* mcspi4_clk */
-- 0x168 (PIN_INPUT | MUX_MODE1) /* mcspi4_simo */
-- 0x16a (PIN_INPUT | MUX_MODE1) /* mcspi4_somi */
-- 0x16c (PIN_INPUT | MUX_MODE1) /* mcspi4_cs0 */
-+ OMAP5_IOPAD(0x1bc, PIN_OUTPUT | MUX_MODE6) /* mcspi1_clk.gpio5_140 */
- >;
- };
-
-@@ -276,6 +302,12 @@
- 0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
- >;
- };
-+
-+ wlcore_irq_pin: pinmux_wlcore_irq_pin {
-+ pinctrl-single,pins = <
-+ OMAP5_IOPAD(0x040, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
-+ >;
-+ };
- };
-
- &mmc1 {
-@@ -290,8 +322,25 @@
- };
-
- &mmc3 {
-+ vmmc-supply = <&vmmcsdio_fixed>;
-+ mmc-pwrseq = <&mmc3_pwrseq>;
- bus-width = <4>;
-- ti,non-removable;
-+ non-removable;
-+ cap-power-off-card;
-+ pinctrl-names = "default";
-+ pinctrl-0 = <&mmc3_pins &wlcore_irq_pin>;
-+ interrupts-extended = <&gic GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
-+ &omap5_pmx_core 0x168>;
-+
-+ #address-cells = <1>;
-+ #size-cells = <0>;
-+ wlcore: wlcore@2 {
-+ compatible = "ti,wl1271";
-+ reg = <2>;
-+ interrupt-parent = <&gpio1>;
-+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>; /* gpio 14 */
-+ ref-clock-frequency = <26000000>;
-+ };
- };
-
- &mmc4 {
-@@ -591,11 +640,6 @@
- pinctrl-0 = <&mcspi3_pins>;
- };
-
--&mcspi4 {
-- pinctrl-names = "default";
-- pinctrl-0 = <&mcspi4_pins>;
--};
--
- &uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins>;
-diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
-index 3ee22ee..1ba10e4 100644
---- a/arch/arm/boot/dts/sama5d4.dtsi
-+++ b/arch/arm/boot/dts/sama5d4.dtsi
-@@ -939,11 +939,11 @@
- reg = <0xf8018000 0x4000>;
- interrupts = <33 IRQ_TYPE_LEVEL_HIGH 6>;
- dmas = <&dma1
-- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-- AT91_XDMAC_DT_PERID(4)>,
-+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
-+ | AT91_XDMAC_DT_PERID(4))>,
- <&dma1
-- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
-- AT91_XDMAC_DT_PERID(5)>;
-+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
-+ | AT91_XDMAC_DT_PERID(5))>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c1>;
-diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
-index d0cfada..18f26ca 100644
---- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
-+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
-@@ -184,18 +184,18 @@
- regulator-name = "vcc-3v0";
- };
-
-- vdd_cpu: dcdc2 {
-+ vdd_gpu: dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
-- regulator-name = "vdd-cpu";
-+ regulator-name = "vdd-gpu";
- };
-
-- vdd_gpu: dcdc3 {
-+ vdd_cpu: dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <700000>;
- regulator-max-microvolt = <1320000>;
-- regulator-name = "vdd-gpu";
-+ regulator-name = "vdd-cpu";
- };
-
- vdd_sys_dll: dcdc4 {
-diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
-index 873dbfc..56fc339 100644
---- a/arch/arm/common/edma.c
-+++ b/arch/arm/common/edma.c
-@@ -406,7 +406,8 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
- BIT(slot));
- if (edma_cc[ctlr]->intr_data[channel].callback)
- edma_cc[ctlr]->intr_data[channel].callback(
-- channel, EDMA_DMA_COMPLETE,
-+ EDMA_CTLR_CHAN(ctlr, channel),
-+ EDMA_DMA_COMPLETE,
- edma_cc[ctlr]->intr_data[channel].data);
- }
- } while (sh_ipr);
-@@ -460,7 +461,8 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
- if (edma_cc[ctlr]->intr_data[k].
- callback) {
- edma_cc[ctlr]->intr_data[k].
-- callback(k,
-+ callback(
-+ EDMA_CTLR_CHAN(ctlr, k),
- EDMA_DMA_CC_ERROR,
- edma_cc[ctlr]->intr_data
- [k].data);
-diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
-index 53c15de..6a9851e 100644
---- a/arch/arm/include/asm/irq.h
-+++ b/arch/arm/include/asm/irq.h
-@@ -35,6 +35,11 @@ extern void (*handle_arch_irq)(struct pt_regs *);
- extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
- #endif
-
-+static inline int nr_legacy_irqs(void)
-+{
-+ return NR_IRQS_LEGACY;
-+}
-+
- #endif
-
- #endif
-diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
-index 0d95f48..a25defd 100644
---- a/arch/arm/mach-at91/pm_suspend.S
-+++ b/arch/arm/mach-at91/pm_suspend.S
-@@ -80,6 +80,8 @@ tmp2 .req r5
- * @r2: base address of second SDRAM Controller or 0 if not present
- * @r3: pm information
- */
-+/* at91_pm_suspend_in_sram must be 8-byte aligned per the requirements of fncpy() */
-+ .align 3
- ENTRY(at91_pm_suspend_in_sram)
- /* Save registers on stack */
- stmfd sp!, {r4 - r12, lr}
-diff --git a/arch/arm/mach-pxa/include/mach/pxa27x.h b/arch/arm/mach-pxa/include/mach/pxa27x.h
-index 599b925..1a42919 100644
---- a/arch/arm/mach-pxa/include/mach/pxa27x.h
-+++ b/arch/arm/mach-pxa/include/mach/pxa27x.h
-@@ -19,7 +19,7 @@
- #define ARB_CORE_PARK (1<<24) /* Be parked with core when idle */
- #define ARB_LOCK_FLAG (1<<23) /* Only Locking masters gain access to the bus */
-
--extern int __init pxa27x_set_pwrmode(unsigned int mode);
-+extern int pxa27x_set_pwrmode(unsigned int mode);
- extern void pxa27x_cpu_pm_enter(suspend_state_t state);
-
- #endif /* __MACH_PXA27x_H */
-diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
-index b5abdeb..aa97547 100644
---- a/arch/arm/mach-pxa/pxa27x.c
-+++ b/arch/arm/mach-pxa/pxa27x.c
-@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);
- */
- static unsigned int pwrmode = PWRMODE_SLEEP;
-
--int __init pxa27x_set_pwrmode(unsigned int mode)
-+int pxa27x_set_pwrmode(unsigned int mode)
- {
- switch (mode) {
- case PWRMODE_SLEEP:
-diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
-index fbe74c6..49d1110 100644
---- a/arch/arm/mach-tegra/board-paz00.c
-+++ b/arch/arm/mach-tegra/board-paz00.c
-@@ -39,8 +39,8 @@ static struct platform_device wifi_rfkill_device = {
- static struct gpiod_lookup_table wifi_gpio_lookup = {
- .dev_id = "rfkill_gpio",
- .table = {
-- GPIO_LOOKUP_IDX("tegra-gpio", 25, NULL, 0, 0),
-- GPIO_LOOKUP_IDX("tegra-gpio", 85, NULL, 1, 0),
-+ GPIO_LOOKUP("tegra-gpio", 25, "reset", 0),
-+ GPIO_LOOKUP("tegra-gpio", 85, "shutdown", 0),
- { },
- },
- };
-diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
-index cba12f3..25ecc6a 100644
---- a/arch/arm/mm/dma-mapping.c
-+++ b/arch/arm/mm/dma-mapping.c
-@@ -1413,12 +1413,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- struct page **pages = __iommu_get_pages(cpu_addr, attrs);
-+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-+ unsigned long off = vma->vm_pgoff;
-
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
- if (!pages)
- return -ENXIO;
-
-+ if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
-+ return -ENXIO;
-+
-+ pages += off;
-+
- do {
- int ret = vm_insert_page(vma, uaddr, *pages++);
- if (ret) {
-diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
-index bbb251b..8b9bf54 100644
---- a/arch/arm64/include/asm/irq.h
-+++ b/arch/arm64/include/asm/irq.h
-@@ -21,4 +21,9 @@ static inline void acpi_irq_init(void)
- }
- #define acpi_irq_init acpi_irq_init
-
-+static inline int nr_legacy_irqs(void)
-+{
-+ return 0;
-+}
-+
- #endif
-diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
-index d6dd9fd..d4264bb 100644
---- a/arch/arm64/include/asm/ptrace.h
-+++ b/arch/arm64/include/asm/ptrace.h
-@@ -83,14 +83,14 @@
- #define compat_sp regs[13]
- #define compat_lr regs[14]
- #define compat_sp_hyp regs[15]
--#define compat_sp_irq regs[16]
--#define compat_lr_irq regs[17]
--#define compat_sp_svc regs[18]
--#define compat_lr_svc regs[19]
--#define compat_sp_abt regs[20]
--#define compat_lr_abt regs[21]
--#define compat_sp_und regs[22]
--#define compat_lr_und regs[23]
-+#define compat_lr_irq regs[16]
-+#define compat_sp_irq regs[17]
-+#define compat_lr_svc regs[18]
-+#define compat_sp_svc regs[19]
-+#define compat_lr_abt regs[20]
-+#define compat_sp_abt regs[21]
-+#define compat_lr_und regs[22]
-+#define compat_sp_und regs[23]
- #define compat_r8_fiq regs[24]
- #define compat_r9_fiq regs[25]
- #define compat_r10_fiq regs[26]
-diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
-index 9807333..4d77757 100644
---- a/arch/arm64/kernel/vmlinux.lds.S
-+++ b/arch/arm64/kernel/vmlinux.lds.S
-@@ -60,9 +60,12 @@ PECOFF_FILE_ALIGNMENT = 0x200;
- #define PECOFF_EDATA_PADDING
- #endif
-
--#ifdef CONFIG_DEBUG_ALIGN_RODATA
-+#if defined(CONFIG_DEBUG_ALIGN_RODATA)
- #define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
- #define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
-+#elif defined(CONFIG_DEBUG_RODATA)
-+#define ALIGN_DEBUG_RO . = ALIGN(1<<PAGE_SHIFT);
-+#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
- #else
- #define ALIGN_DEBUG_RO
- #define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
-diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
-index 1ba2120..9a00137 100644
---- a/arch/mips/ath79/setup.c
-+++ b/arch/mips/ath79/setup.c
-@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
- AR71XX_RESET_SIZE);
- ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
- AR71XX_PLL_SIZE);
-+ ath79_detect_sys_type();
- ath79_ddr_ctrl_init();
-
-- ath79_detect_sys_type();
- if (mips_machtype != ATH79_MACH_GENERIC_OF)
- detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
-
-diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
-index 16e22ce..85dc4ce 100644
---- a/arch/mips/include/asm/cdmm.h
-+++ b/arch/mips/include/asm/cdmm.h
-@@ -84,6 +84,17 @@ void mips_cdmm_driver_unregister(struct mips_cdmm_driver *);
- module_driver(__mips_cdmm_driver, mips_cdmm_driver_register, \
- mips_cdmm_driver_unregister)
-
-+/*
-+ * builtin_mips_cdmm_driver() - Helper macro for drivers that don't do anything
-+ * special in init and have no exit. This eliminates some boilerplate. Each
-+ * driver may only use this macro once, and calling it replaces device_initcall
-+ * (or in some cases, the legacy __initcall). This is meant to be a direct
-+ * parallel of module_mips_cdmm_driver() above but without the __exit stuff that
-+ * is not used for builtin cases.
-+ */
-+#define builtin_mips_cdmm_driver(__mips_cdmm_driver) \
-+ builtin_driver(__mips_cdmm_driver, mips_cdmm_driver_register)
-+
- /* drivers/tty/mips_ejtag_fdc.c */
-
- #ifdef CONFIG_MIPS_EJTAG_FDC_EARLYCON
-diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
-index d5fa3ea..41b1b09 100644
---- a/arch/mips/kvm/emulate.c
-+++ b/arch/mips/kvm/emulate.c
-@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
-
- base = (inst >> 21) & 0x1f;
- op_inst = (inst >> 16) & 0x1f;
-- offset = inst & 0xffff;
-+ offset = (int16_t)inst;
- cache = (inst >> 16) & 0x3;
- op = (inst >> 18) & 0x7;
-
-diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
-index c567240..d1ee95a 100644
---- a/arch/mips/kvm/locore.S
-+++ b/arch/mips/kvm/locore.S
-@@ -165,9 +165,11 @@ FEXPORT(__kvm_mips_vcpu_run)
-
- FEXPORT(__kvm_mips_load_asid)
- /* Set the ASID for the Guest Kernel */
-- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
-- /* addresses shift to 0x80000000 */
-- bltz t0, 1f /* If kernel */
-+ PTR_L t0, VCPU_COP0(k1)
-+ LONG_L t0, COP0_STATUS(t0)
-+ andi t0, KSU_USER | ST0_ERL | ST0_EXL
-+ xori t0, KSU_USER
-+ bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
- 1:
-@@ -482,9 +484,11 @@ __kvm_mips_return_to_guest:
- mtc0 t0, CP0_EPC
-
- /* Set the ASID for the Guest Kernel */
-- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
-- /* addresses shift to 0x80000000 */
-- bltz t0, 1f /* If kernel */
-+ PTR_L t0, VCPU_COP0(k1)
-+ LONG_L t0, COP0_STATUS(t0)
-+ andi t0, KSU_USER | ST0_ERL | ST0_EXL
-+ xori t0, KSU_USER
-+ bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
- 1:
-diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index cd4c129..bafb32b 100644
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -278,7 +278,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-
- if (!gebase) {
- err = -ENOMEM;
-- goto out_free_cpu;
-+ goto out_uninit_cpu;
- }
- kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
- ALIGN(size, PAGE_SIZE), gebase);
-@@ -342,6 +342,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
- out_free_gebase:
- kfree(gebase);
-
-+out_uninit_cpu:
-+ kvm_vcpu_uninit(vcpu);
-+
- out_free_cpu:
- kfree(vcpu);
-
-diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
-index 3fc2e6d..a0706fd 100644
---- a/arch/mips/lantiq/clk.c
-+++ b/arch/mips/lantiq/clk.c
-@@ -99,6 +99,23 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
- }
- EXPORT_SYMBOL(clk_set_rate);
-
-+long clk_round_rate(struct clk *clk, unsigned long rate)
-+{
-+ if (unlikely(!clk_good(clk)))
-+ return 0;
-+ if (clk->rates && *clk->rates) {
-+ unsigned long *r = clk->rates;
-+
-+ while (*r && (*r != rate))
-+ r++;
-+ if (!*r) {
-+ return clk->rate;
-+ }
-+ }
-+ return rate;
-+}
-+EXPORT_SYMBOL(clk_round_rate);
-+
- int clk_enable(struct clk *clk)
- {
- if (unlikely(!clk_good(clk)))
-diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
-index c98d897..cbee788 100644
---- a/arch/s390/kvm/interrupt.c
-+++ b/arch/s390/kvm/interrupt.c
-@@ -1051,8 +1051,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
- src_id, 0, 2);
-
- /* sending vcpu invalid */
-- if (src_id >= KVM_MAX_VCPUS ||
-- kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
-+ if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
- return -EINVAL;
-
- if (sclp.has_sigpif)
-@@ -1131,6 +1130,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
- irq->u.emerg.code, 0, 2);
-
-+ /* sending vcpu invalid */
-+ if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
-+ return -EINVAL;
-+
- set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
- set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
-index f32f843..4a001c1 100644
---- a/arch/s390/kvm/kvm-s390.c
-+++ b/arch/s390/kvm/kvm-s390.c
-@@ -289,12 +289,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
- r = 0;
- break;
- case KVM_CAP_S390_VECTOR_REGISTERS:
-- if (MACHINE_HAS_VX) {
-+ mutex_lock(&kvm->lock);
-+ if (atomic_read(&kvm->online_vcpus)) {
-+ r = -EBUSY;
-+ } else if (MACHINE_HAS_VX) {
- set_kvm_facility(kvm->arch.model.fac->mask, 129);
- set_kvm_facility(kvm->arch.model.fac->list, 129);
- r = 0;
- } else
- r = -EINVAL;
-+ mutex_unlock(&kvm->lock);
- break;
- case KVM_CAP_S390_USER_STSI:
- kvm->arch.user_stsi = 1;
-@@ -1037,7 +1041,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
- if (!kvm->arch.sca)
- goto out_err;
- spin_lock(&kvm_lock);
-- sca_offset = (sca_offset + 16) & 0x7f0;
-+ sca_offset += 16;
-+ if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
-+ sca_offset = 0;
- kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
- spin_unlock(&kvm_lock);
-
-diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
-index 72e58bd..7171056 100644
---- a/arch/s390/kvm/sigp.c
-+++ b/arch/s390/kvm/sigp.c
-@@ -294,12 +294,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
- u16 cpu_addr, u32 parameter, u64 *status_reg)
- {
- int rc;
-- struct kvm_vcpu *dst_vcpu;
-+ struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
-
-- if (cpu_addr >= KVM_MAX_VCPUS)
-- return SIGP_CC_NOT_OPERATIONAL;
--
-- dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
- if (!dst_vcpu)
- return SIGP_CC_NOT_OPERATIONAL;
-
-@@ -481,7 +477,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
- trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
-
- if (order_code == SIGP_EXTERNAL_CALL) {
-- dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
-+ dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
- BUG_ON(dest_vcpu == NULL);
-
- kvm_s390_vcpu_wakeup(dest_vcpu);
-diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
-index f0da5a2..9f1e05e 100644
---- a/arch/tile/kernel/usb.c
-+++ b/arch/tile/kernel/usb.c
-@@ -22,6 +22,7 @@
- #include <linux/platform_device.h>
- #include <linux/usb/tilegx.h>
- #include <linux/init.h>
-+#include <linux/module.h>
- #include <linux/types.h>
-
- static u64 ehci_dmamask = DMA_BIT_MASK(32);
-diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
-index ccffa53..39bcefc 100644
---- a/arch/x86/include/asm/i8259.h
-+++ b/arch/x86/include/asm/i8259.h
-@@ -60,6 +60,7 @@ struct legacy_pic {
- void (*mask_all)(void);
- void (*restore_mask)(void);
- void (*init)(int auto_eoi);
-+ int (*probe)(void);
- int (*irq_pending)(unsigned int irq);
- void (*make_irq)(unsigned int irq);
- };
-diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
-index e16466e..e9cd7be 100644
---- a/arch/x86/include/asm/kvm_emulate.h
-+++ b/arch/x86/include/asm/kvm_emulate.h
-@@ -112,6 +112,16 @@ struct x86_emulate_ops {
- struct x86_exception *fault);
-
- /*
-+ * read_phys: Read bytes of standard (non-emulated/special) memory.
-+ * Used for descriptor reading.
-+ * @addr: [IN ] Physical address from which to read.
-+ * @val: [OUT] Value read from memory.
-+ * @bytes: [IN ] Number of bytes to read from memory.
-+ */
-+ int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
-+ void *val, unsigned int bytes);
-+
-+ /*
- * write_std: Write bytes of standard (non-emulated/special) memory.
- * Used for descriptor writing.
- * @addr: [IN ] Linear address to which to write.
-diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
-index b5d7640..8a4add8 100644
---- a/arch/x86/include/uapi/asm/svm.h
-+++ b/arch/x86/include/uapi/asm/svm.h
-@@ -100,6 +100,7 @@
- { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
- { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
- { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
-+ { SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
- { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
- { SVM_EXIT_INTR, "interrupt" }, \
- { SVM_EXIT_NMI, "nmi" }, \
-diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
-index 2683f36..ea4ba83 100644
---- a/arch/x86/kernel/apic/vector.c
-+++ b/arch/x86/kernel/apic/vector.c
-@@ -360,7 +360,11 @@ int __init arch_probe_nr_irqs(void)
- if (nr < nr_irqs)
- nr_irqs = nr;
-
-- return nr_legacy_irqs();
-+ /*
-+ * We don't know if PIC is present at this point so we need to do
-+ * probe() to get the right number of legacy IRQs.
-+ */
-+ return legacy_pic->probe();
- }
-
- #ifdef CONFIG_X86_IO_APIC
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index cb9e5df..e4f929d 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -272,10 +272,9 @@ __setup("nosmap", setup_disable_smap);
-
- static __always_inline void setup_smap(struct cpuinfo_x86 *c)
- {
-- unsigned long eflags;
-+ unsigned long eflags = native_save_fl();
-
- /* This should have been cleared long ago */
-- raw_local_save_flags(eflags);
- BUG_ON(eflags & X86_EFLAGS_AC);
-
- if (cpu_has(c, X86_FEATURE_SMAP)) {
-diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
-index 50ec9af..6545e6d 100644
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
- */
- void fpu__init_prepare_fx_sw_frame(void)
- {
-- int fsave_header_size = sizeof(struct fregs_state);
- int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
-
-- if (config_enabled(CONFIG_X86_32))
-- size += fsave_header_size;
--
- fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
- fx_sw_reserved.extended_size = size;
- fx_sw_reserved.xfeatures = xfeatures_mask;
- fx_sw_reserved.xstate_size = xstate_size;
-
-- if (config_enabled(CONFIG_IA32_EMULATION)) {
-+ if (config_enabled(CONFIG_IA32_EMULATION) ||
-+ config_enabled(CONFIG_X86_32)) {
-+ int fsave_header_size = sizeof(struct fregs_state);
-+
- fx_sw_reserved_ia32 = fx_sw_reserved;
-- fx_sw_reserved_ia32.extended_size += fsave_header_size;
-+ fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
- }
- }
-
-diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
-index 62fc001..2c4ac07 100644
---- a/arch/x86/kernel/fpu/xstate.c
-+++ b/arch/x86/kernel/fpu/xstate.c
-@@ -402,7 +402,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
- if (!boot_cpu_has(X86_FEATURE_XSAVE))
- return NULL;
-
-- xsave = &current->thread.fpu.state.xsave;
- /*
- * We should not ever be requesting features that we
- * have not enabled. Remember that pcntxt_mask is
-diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 1d40ca8..ffdc0e8 100644
---- a/arch/x86/kernel/head_64.S
-+++ b/arch/x86/kernel/head_64.S
-@@ -65,6 +65,9 @@ startup_64:
- * tables and then reload them.
- */
-
-+ /* Sanitize CPU configuration */
-+ call verify_cpu
-+
- /*
- * Compute the delta between the address I am compiled to run at and the
- * address I am actually running at.
-@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64)
- * after the boot processor executes this code.
- */
-
-+ /* Sanitize CPU configuration */
-+ call verify_cpu
-+
- movq $(init_level4_pgt - __START_KERNEL_map), %rax
- 1:
-
-@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64)
- pushq %rax # target address in negative space
- lretq
-
-+#include "verify_cpu.S"
-+
- #ifdef CONFIG_HOTPLUG_CPU
- /*
- * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
-diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
-index 16cb827..be22f5a 100644
---- a/arch/x86/kernel/i8259.c
-+++ b/arch/x86/kernel/i8259.c
-@@ -295,16 +295,11 @@ static void unmask_8259A(void)
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
- }
-
--static void init_8259A(int auto_eoi)
-+static int probe_8259A(void)
- {
- unsigned long flags;
- unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
- unsigned char new_val;
--
-- i8259A_auto_eoi = auto_eoi;
--
-- raw_spin_lock_irqsave(&i8259A_lock, flags);
--
- /*
- * Check to see if we have a PIC.
- * Mask all except the cascade and read
-@@ -312,16 +307,28 @@ static void init_8259A(int auto_eoi)
- * have a PIC, we will read 0xff as opposed to the
- * value we wrote.
- */
-+ raw_spin_lock_irqsave(&i8259A_lock, flags);
-+
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- outb(probe_val, PIC_MASTER_IMR);
- new_val = inb(PIC_MASTER_IMR);
- if (new_val != probe_val) {
- printk(KERN_INFO "Using NULL legacy PIC\n");
- legacy_pic = &null_legacy_pic;
-- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-- return;
- }
-
-+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-+ return nr_legacy_irqs();
-+}
-+
-+static void init_8259A(int auto_eoi)
-+{
-+ unsigned long flags;
-+
-+ i8259A_auto_eoi = auto_eoi;
-+
-+ raw_spin_lock_irqsave(&i8259A_lock, flags);
-+
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
-
- /*
-@@ -379,6 +386,10 @@ static int legacy_pic_irq_pending_noop(unsigned int irq)
- {
- return 0;
- }
-+static int legacy_pic_probe(void)
-+{
-+ return 0;
-+}
-
- struct legacy_pic null_legacy_pic = {
- .nr_legacy_irqs = 0,
-@@ -388,6 +399,7 @@ struct legacy_pic null_legacy_pic = {
- .mask_all = legacy_pic_noop,
- .restore_mask = legacy_pic_noop,
- .init = legacy_pic_int_noop,
-+ .probe = legacy_pic_probe,
- .irq_pending = legacy_pic_irq_pending_noop,
- .make_irq = legacy_pic_uint_noop,
- };
-@@ -400,6 +412,7 @@ struct legacy_pic default_legacy_pic = {
- .mask_all = mask_8259A,
- .restore_mask = unmask_8259A,
- .init = init_8259A,
-+ .probe = probe_8259A,
- .irq_pending = i8259A_irq_pending,
- .make_irq = make_8259A_irq,
- };
-diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 80f874b..1e6f70f 100644
---- a/arch/x86/kernel/setup.c
-+++ b/arch/x86/kernel/setup.c
-@@ -1198,6 +1198,14 @@ void __init setup_arch(char **cmdline_p)
- clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
- swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- KERNEL_PGD_PTRS);
-+
-+ /*
-+ * sync back low identity map too. It is used for example
-+ * in the 32-bit EFI stub.
-+ */
-+ clone_pgd_range(initial_page_table,
-+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
- #endif
-
- tboot_probe();
-diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
-index b9242ba..4cf401f 100644
---- a/arch/x86/kernel/verify_cpu.S
-+++ b/arch/x86/kernel/verify_cpu.S
-@@ -34,10 +34,11 @@
- #include <asm/msr-index.h>
-
- verify_cpu:
-- pushfl # Save caller passed flags
-- pushl $0 # Kill any dangerous flags
-- popfl
-+ pushf # Save caller passed flags
-+ push $0 # Kill any dangerous flags
-+ popf
-
-+#ifndef __x86_64__
- pushfl # standard way to check for cpuid
- popl %eax
- movl %eax,%ebx
-@@ -48,6 +49,7 @@ verify_cpu:
- popl %eax
- cmpl %eax,%ebx
- jz verify_cpu_no_longmode # cpu has no cpuid
-+#endif
-
- movl $0x0,%eax # See if cpuid 1 is implemented
- cpuid
-@@ -130,10 +132,10 @@ verify_cpu_sse_test:
- jmp verify_cpu_sse_test # try again
-
- verify_cpu_no_longmode:
-- popfl # Restore caller passed flags
-+ popf # Restore caller passed flags
- movl $1,%eax
- ret
- verify_cpu_sse_ok:
-- popfl # Restore caller passed flags
-+ popf # Restore caller passed flags
- xorl %eax, %eax
- ret
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 2392541a..f17c342 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2272,8 +2272,8 @@ static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
- #define GET_SMSTATE(type, smbase, offset) \
- ({ \
- type __val; \
-- int r = ctxt->ops->read_std(ctxt, smbase + offset, &__val, \
-- sizeof(__val), NULL); \
-+ int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val, \
-+ sizeof(__val)); \
- if (r != X86EMUL_CONTINUE) \
- return X86EMUL_UNHANDLEABLE; \
- __val; \
-@@ -2484,17 +2484,36 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
-
- /*
- * Get back to real mode, to prepare a safe state in which to load
-- * CR0/CR3/CR4/EFER. Also this will ensure that addresses passed
-- * to read_std/write_std are not virtual.
-- *
-- * CR4.PCIDE must be zero, because it is a 64-bit mode only feature.
-+ * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
-+ * supports long mode.
- */
-+ cr4 = ctxt->ops->get_cr(ctxt, 4);
-+ if (emulator_has_longmode(ctxt)) {
-+ struct desc_struct cs_desc;
-+
-+ /* Zero CR4.PCIDE before CR0.PG. */
-+ if (cr4 & X86_CR4_PCIDE) {
-+ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-+ cr4 &= ~X86_CR4_PCIDE;
-+ }
-+
-+ /* A 32-bit code segment is required to clear EFER.LMA. */
-+ memset(&cs_desc, 0, sizeof(cs_desc));
-+ cs_desc.type = 0xb;
-+ cs_desc.s = cs_desc.g = cs_desc.p = 1;
-+ ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
-+ }
-+
-+ /* For the 64-bit case, this will clear EFER.LMA. */
- cr0 = ctxt->ops->get_cr(ctxt, 0);
- if (cr0 & X86_CR0_PE)
- ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
-- cr4 = ctxt->ops->get_cr(ctxt, 4);
-+
-+ /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
- if (cr4 & X86_CR4_PAE)
- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-+
-+ /* And finally go back to 32-bit mode. */
- efer = 0;
- ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
-
-@@ -4455,7 +4474,7 @@ static const struct opcode twobyte_table[256] = {
- F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
- /* 0xA8 - 0xAF */
- I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
-- II(No64 | EmulateOnUD | ImplicitOps, em_rsm, rsm),
-+ II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
- F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
- F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
- F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 2a5ca97..236e346 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -348,6 +348,8 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- __kvm_apic_update_irr(pir, apic->regs);
-+
-+ kvm_make_request(KVM_REQ_EVENT, vcpu);
- }
- EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
-
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 2d32b67..00da6e8 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -1085,7 +1085,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
- return target_tsc - tsc;
- }
-
--static void init_vmcb(struct vcpu_svm *svm, bool init_event)
-+static void init_vmcb(struct vcpu_svm *svm)
- {
- struct vmcb_control_area *control = &svm->vmcb->control;
- struct vmcb_save_area *save = &svm->vmcb->save;
-@@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
- set_exception_intercept(svm, PF_VECTOR);
- set_exception_intercept(svm, UD_VECTOR);
- set_exception_intercept(svm, MC_VECTOR);
-+ set_exception_intercept(svm, AC_VECTOR);
-
- set_intercept(svm, INTERCEPT_INTR);
- set_intercept(svm, INTERCEPT_NMI);
-@@ -1156,8 +1157,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
- init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
- init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
-
-- if (!init_event)
-- svm_set_efer(&svm->vcpu, 0);
-+ svm_set_efer(&svm->vcpu, 0);
- save->dr6 = 0xffff0ff0;
- kvm_set_rflags(&svm->vcpu, 2);
- save->rip = 0x0000fff0;
-@@ -1211,7 +1211,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
- if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
- svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
- }
-- init_vmcb(svm, init_event);
-+ init_vmcb(svm);
-
- kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
- kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
-@@ -1267,7 +1267,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
- clear_page(svm->vmcb);
- svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
- svm->asid_generation = 0;
-- init_vmcb(svm, false);
-+ init_vmcb(svm);
-
- svm_init_osvw(&svm->vcpu);
-
-@@ -1795,6 +1795,12 @@ static int ud_interception(struct vcpu_svm *svm)
- return 1;
- }
-
-+static int ac_interception(struct vcpu_svm *svm)
-+{
-+ kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
-+ return 1;
-+}
-+
- static void svm_fpu_activate(struct kvm_vcpu *vcpu)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
-@@ -1889,7 +1895,7 @@ static int shutdown_interception(struct vcpu_svm *svm)
- * so reinitialize it.
- */
- clear_page(svm->vmcb);
-- init_vmcb(svm, false);
-+ init_vmcb(svm);
-
- kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
- return 0;
-@@ -3369,6 +3375,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
- [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
- [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
-+ [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
- [SVM_EXIT_INTR] = intr_interception,
- [SVM_EXIT_NMI] = nmi_interception,
- [SVM_EXIT_SMI] = nop_on_interception,
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index aa9e822..e77d75b 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -1567,7 +1567,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
- u32 eb;
-
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
-- (1u << NM_VECTOR) | (1u << DB_VECTOR);
-+ (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
- if ((vcpu->guest_debug &
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
-@@ -4780,8 +4780,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
- vmx_set_cr0(vcpu, cr0); /* enter rmode */
- vmx->vcpu.arch.cr0 = cr0;
- vmx_set_cr4(vcpu, 0);
-- if (!init_event)
-- vmx_set_efer(vcpu, 0);
-+ vmx_set_efer(vcpu, 0);
- vmx_fpu_activate(vcpu);
- update_exception_bitmap(vcpu);
-
-@@ -5118,6 +5117,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
- return handle_rmode_exception(vcpu, ex_no, error_code);
-
- switch (ex_no) {
-+ case AC_VECTOR:
-+ kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
-+ return 1;
- case DB_VECTOR:
- dr6 = vmcs_readl(EXIT_QUALIFICATION);
- if (!(vcpu->guest_debug &
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 373328b..2781e2b 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -621,7 +621,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
- if ((cr0 ^ old_cr0) & update_bits)
- kvm_mmu_reset_context(vcpu);
-
-- if ((cr0 ^ old_cr0) & X86_CR0_CD)
-+ if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
-+ kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
-+ !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
- kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
-
- return 0;
-@@ -4260,6 +4262,15 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
- }
-
-+static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
-+ unsigned long addr, void *val, unsigned int bytes)
-+{
-+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-+ int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
-+
-+ return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
-+}
-+
- int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
- gva_t addr, void *val,
- unsigned int bytes,
-@@ -4995,6 +5006,7 @@ static const struct x86_emulate_ops emulate_ops = {
- .write_gpr = emulator_write_gpr,
- .read_std = kvm_read_guest_virt_system,
- .write_std = kvm_write_guest_virt_system,
-+ .read_phys = kvm_read_guest_phys_system,
- .fetch = kvm_fetch_guest_virt,
- .read_emulated = emulator_read_emulated,
- .write_emulated = emulator_write_emulated,
-diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
-index db1b0bc..c28f618 100644
---- a/arch/x86/mm/mpx.c
-+++ b/arch/x86/mm/mpx.c
-@@ -622,6 +622,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
- }
-
- /*
-+ * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
-+ * we might run off the end of the bounds table if we are on
-+ * a 64-bit kernel and try to get 8 bytes.
-+ */
-+int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
-+ long __user *bd_entry_ptr)
-+{
-+ u32 bd_entry_32;
-+ int ret;
-+
-+ if (is_64bit_mm(mm))
-+ return get_user(*bd_entry_ret, bd_entry_ptr);
-+
-+ /*
-+ * Note that get_user() uses the type of the *pointer* to
-+ * establish the size of the get, not the destination.
-+ */
-+ ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
-+ *bd_entry_ret = bd_entry_32;
-+ return ret;
-+}
-+
-+/*
- * Get the base of bounds tables pointed by specific bounds
- * directory entry.
- */
-@@ -641,7 +664,7 @@ static int get_bt_addr(struct mm_struct *mm,
- int need_write = 0;
-
- pagefault_disable();
-- ret = get_user(bd_entry, bd_entry_ptr);
-+ ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
- pagefault_enable();
- if (!ret)
- break;
-@@ -736,11 +759,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
- */
- static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
- {
-- unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
-- if (is_64bit_mm(mm))
-- return virt_space / MPX_BD_NR_ENTRIES_64;
-- else
-- return virt_space / MPX_BD_NR_ENTRIES_32;
-+ unsigned long long virt_space;
-+ unsigned long long GB = (1ULL << 30);
-+
-+ /*
-+ * This covers 32-bit emulation as well as 32-bit kernels
-+ * running on 64-bit harware.
-+ */
-+ if (!is_64bit_mm(mm))
-+ return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
-+
-+ /*
-+ * 'x86_virt_bits' returns what the hardware is capable
-+ * of, and returns the full >32-bit adddress space when
-+ * running 32-bit kernels on 64-bit hardware.
-+ */
-+ virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
-+ return virt_space / MPX_BD_NR_ENTRIES_64;
- }
-
- /*
-diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
-index e527a3e..fa893c3 100644
---- a/drivers/bluetooth/ath3k.c
-+++ b/drivers/bluetooth/ath3k.c
-@@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
- { USB_DEVICE(0x04CA, 0x300f) },
- { USB_DEVICE(0x04CA, 0x3010) },
- { USB_DEVICE(0x0930, 0x0219) },
-+ { USB_DEVICE(0x0930, 0x021c) },
- { USB_DEVICE(0x0930, 0x0220) },
- { USB_DEVICE(0x0930, 0x0227) },
- { USB_DEVICE(0x0b05, 0x17d0) },
-@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
- { USB_DEVICE(0x0CF3, 0x311F) },
- { USB_DEVICE(0x0cf3, 0x3121) },
- { USB_DEVICE(0x0CF3, 0x817a) },
-+ { USB_DEVICE(0x0CF3, 0x817b) },
- { USB_DEVICE(0x0cf3, 0xe003) },
- { USB_DEVICE(0x0CF3, 0xE004) },
- { USB_DEVICE(0x0CF3, 0xE005) },
-@@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
-@@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index b4cf8d9..7d9b09f 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -192,6 +192,7 @@ static const struct usb_device_id blacklist_table[] = {
- { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
-@@ -203,6 +204,7 @@ static const struct usb_device_id blacklist_table[] = {
- { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
-diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
-index 2dda4e8..d679ab8 100644
---- a/drivers/clk/bcm/clk-iproc-pll.c
-+++ b/drivers/clk/bcm/clk-iproc-pll.c
-@@ -345,8 +345,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
- struct iproc_pll *pll = clk->pll;
- const struct iproc_pll_ctrl *ctrl = pll->ctrl;
- u32 val;
-- u64 ndiv;
-- unsigned int ndiv_int, ndiv_frac, pdiv;
-+ u64 ndiv, ndiv_int, ndiv_frac;
-+ unsigned int pdiv;
-
- if (parent_rate == 0)
- return 0;
-@@ -366,22 +366,19 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
- val = readl(pll->pll_base + ctrl->ndiv_int.offset);
- ndiv_int = (val >> ctrl->ndiv_int.shift) &
- bit_mask(ctrl->ndiv_int.width);
-- ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
-+ ndiv = ndiv_int << 20;
-
- if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
- val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
- ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
- bit_mask(ctrl->ndiv_frac.width);
--
-- if (ndiv_frac != 0)
-- ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
-- ndiv_frac;
-+ ndiv += ndiv_frac;
- }
-
- val = readl(pll->pll_base + ctrl->pdiv.offset);
- pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
-
-- clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift;
-+ clk->rate = (ndiv * parent_rate) >> 20;
-
- if (pdiv == 0)
- clk->rate *= 2;
-diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
-index bc96f10..9064636 100644
---- a/drivers/clk/versatile/clk-icst.c
-+++ b/drivers/clk/versatile/clk-icst.c
-@@ -156,8 +156,10 @@ struct clk *icst_clk_register(struct device *dev,
- icst->lockreg = base + desc->lock_offset;
-
- clk = clk_register(dev, &icst->hw);
-- if (IS_ERR(clk))
-+ if (IS_ERR(clk)) {
-+ kfree(pclone);
- kfree(icst);
-+ }
-
- return clk;
- }
-diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
-index c5265c1..6aacd20 100644
---- a/drivers/mfd/twl6040.c
-+++ b/drivers/mfd/twl6040.c
-@@ -647,6 +647,8 @@ static int twl6040_probe(struct i2c_client *client,
-
- twl6040->clk32k = devm_clk_get(&client->dev, "clk32k");
- if (IS_ERR(twl6040->clk32k)) {
-+ if (PTR_ERR(twl6040->clk32k) == -EPROBE_DEFER)
-+ return -EPROBE_DEFER;
- dev_info(&client->dev, "clk32k is not handled\n");
- twl6040->clk32k = NULL;
- }
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index a98dd4f..cbbb1c9 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -1751,6 +1751,7 @@ err_undo_flags:
- slave_dev->dev_addr))
- eth_hw_addr_random(bond_dev);
- if (bond_dev->type != ARPHRD_ETHER) {
-+ dev_close(bond_dev);
- ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER;
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
-index aede704..141c2a4 100644
---- a/drivers/net/can/dev.c
-+++ b/drivers/net/can/dev.c
-@@ -915,7 +915,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
-
-- nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
-+ nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
- nla_put_u32(skb, IFLA_CAN_STATE, state) ||
- nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
- nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
-diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
-index 7b92e91..f10834b 100644
---- a/drivers/net/can/sja1000/sja1000.c
-+++ b/drivers/net/can/sja1000/sja1000.c
-@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
- priv->write_reg(priv, SJA1000_RXERR, 0x0);
- priv->read_reg(priv, SJA1000_ECC);
-
-+ /* clear interrupt flags */
-+ priv->read_reg(priv, SJA1000_IR);
-+
- /* leave reset mode */
- set_normal_mode(dev);
- }
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
-index a4473d8..f672dba 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
-@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
- packet->rdesc_count, 1);
-
- /* Make sure ownership is written to the descriptor */
-- dma_wmb();
-+ smp_wmb();
-
- ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-index aae9d5e..dde0486 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
-@@ -1807,6 +1807,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
- struct netdev_queue *txq;
- int processed = 0;
- unsigned int tx_packets = 0, tx_bytes = 0;
-+ unsigned int cur;
-
- DBGPR("-->xgbe_tx_poll\n");
-
-@@ -1814,10 +1815,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
- if (!ring)
- return 0;
-
-+ cur = ring->cur;
-+
-+ /* Be sure we get ring->cur before accessing descriptor data */
-+ smp_rmb();
-+
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
-
- while ((processed < XGBE_TX_DESC_MAX_PROC) &&
-- (ring->dirty != ring->cur)) {
-+ (ring->dirty != cur)) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
- rdesc = rdata->rdesc;
-
-diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
-index de63266d..5d1dde3 100644
---- a/drivers/net/ethernet/freescale/fec_main.c
-+++ b/drivers/net/ethernet/freescale/fec_main.c
-@@ -1775,7 +1775,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
- int ret = 0;
-
- ret = pm_runtime_get_sync(dev);
-- if (IS_ERR_VALUE(ret))
-+ if (ret < 0)
- return ret;
-
- fep->mii_timeout = 0;
-@@ -1811,11 +1811,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- struct fec_enet_private *fep = bus->priv;
- struct device *dev = &fep->pdev->dev;
- unsigned long time_left;
-- int ret = 0;
-+ int ret;
-
- ret = pm_runtime_get_sync(dev);
-- if (IS_ERR_VALUE(ret))
-+ if (ret < 0)
- return ret;
-+ else
-+ ret = 0;
-
- fep->mii_timeout = 0;
- reinit_completion(&fep->mdio_done);
-@@ -2866,7 +2868,7 @@ fec_enet_open(struct net_device *ndev)
- int ret;
-
- ret = pm_runtime_get_sync(&fep->pdev->dev);
-- if (IS_ERR_VALUE(ret))
-+ if (ret < 0)
- return ret;
-
- pinctrl_pm_select_default_state(&fep->pdev->dev);
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index 09ec32e..7e788073 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -949,7 +949,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
- /* Set CPU queue access map - all CPUs have access to all RX
- * queues and to all TX queues
- */
-- for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
-+ for_each_present_cpu(cpu)
- mvreg_write(pp, MVNETA_CPU_MAP(cpu),
- (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
- MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
-@@ -1533,12 +1533,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
- }
-
- skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
-- if (!skb)
-- goto err_drop_frame;
-
-+ /* After refill old buffer has to be unmapped regardless
-+ * the skb is successfully built or not.
-+ */
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
-
-+ if (!skb)
-+ goto err_drop_frame;
-+
- rcvd_pkts++;
- rcvd_bytes += rx_bytes;
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-index 0a32020..2177e56 100644
---- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
-@@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
- }
- }
-
-- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
-+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
- priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
- INIT_WORK(&priv->mfunc.master.comm_work,
- mlx4_master_comm_channel);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
-index 8e81e53..ad8f95d 100644
---- a/drivers/net/ethernet/mellanox/mlx4/eq.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
-@@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
- return;
- }
-
-- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
-+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
- s_eqe->slave_id = slave;
- /* ensure all information is written before setting the ownersip bit */
- dma_wmb();
-diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
-index b1a4ea2..4dd18f4 100644
---- a/drivers/net/ethernet/sfc/ef10.c
-+++ b/drivers/net/ethernet/sfc/ef10.c
-@@ -1809,7 +1809,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
- unsigned int write_ptr;
- efx_qword_t *txd;
-
-- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-+ tx_queue->xmit_more_available = false;
-+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
-+ return;
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
-diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
-index f08266f..5a1c5a8 100644
---- a/drivers/net/ethernet/sfc/farch.c
-+++ b/drivers/net/ethernet/sfc/farch.c
-@@ -321,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
- unsigned write_ptr;
- unsigned old_write_count = tx_queue->write_count;
-
-- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-+ tx_queue->xmit_more_available = false;
-+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
-+ return;
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
-diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
-index 47d1e3a..b8e8ce1 100644
---- a/drivers/net/ethernet/sfc/net_driver.h
-+++ b/drivers/net/ethernet/sfc/net_driver.h
-@@ -219,6 +219,7 @@ struct efx_tx_buffer {
- * @tso_packets: Number of packets via the TSO xmit path
- * @pushes: Number of times the TX push feature has been used
- * @pio_packets: Number of times the TX PIO feature has been used
-+ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
- * @empty_read_count: If the completion path has seen the queue as empty
- * and the transmission path has not yet checked this, the value of
- * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
-@@ -253,6 +254,7 @@ struct efx_tx_queue {
- unsigned int tso_packets;
- unsigned int pushes;
- unsigned int pio_packets;
-+ bool xmit_more_available;
- /* Statistics to supplement MAC stats */
- unsigned long tx_packets;
-
-diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
-index 1833a01..67f6afa 100644
---- a/drivers/net/ethernet/sfc/tx.c
-+++ b/drivers/net/ethernet/sfc/tx.c
-@@ -431,8 +431,20 @@ finish_packet:
- efx_tx_maybe_stop_queue(tx_queue);
-
- /* Pass off to hardware */
-- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
-+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
-+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-+
-+ /* There could be packets left on the partner queue if those
-+ * SKBs had skb->xmit_more set. If we do not push those they
-+ * could be left for a long time and cause a netdev watchdog.
-+ */
-+ if (txq2->xmit_more_available)
-+ efx_nic_push_buffers(txq2);
-+
- efx_nic_push_buffers(tx_queue);
-+ } else {
-+ tx_queue->xmit_more_available = skb->xmit_more;
-+ }
-
- tx_queue->tx_packets++;
-
-@@ -722,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
- tx_queue->read_count = 0;
- tx_queue->old_read_count = 0;
- tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
-+ tx_queue->xmit_more_available = false;
-
- /* Set up TX descriptor ring */
- efx_nic_init_tx(tx_queue);
-@@ -747,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-
- ++tx_queue->read_count;
- }
-+ tx_queue->xmit_more_available = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
- }
-
-@@ -1302,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
- efx_tx_maybe_stop_queue(tx_queue);
-
- /* Pass off to hardware */
-- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
-+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
-+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-+
-+ /* There could be packets left on the partner queue if those
-+ * SKBs had skb->xmit_more set. If we do not push those they
-+ * could be left for a long time and cause a netdev watchdog.
-+ */
-+ if (txq2->xmit_more_available)
-+ efx_nic_push_buffers(txq2);
-+
- efx_nic_push_buffers(tx_queue);
-+ } else {
-+ tx_queue->xmit_more_available = skb->xmit_more;
-+ }
-
- tx_queue->tso_bursts++;
- return NETDEV_TX_OK;
-diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
-index 771cda2..2e51b81 100644
---- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
-+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
-@@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
- {
- struct stmmac_priv *priv = netdev_priv(dev);
-
-- if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
-+ if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
-
-- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
-+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
-+ SOF_TIMESTAMPING_TX_HARDWARE |
-+ SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
-+ SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (priv->ptp_clock)
-diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 248478c..197c939 100644
---- a/drivers/net/macvtap.c
-+++ b/drivers/net/macvtap.c
-@@ -137,7 +137,7 @@ static const struct proto_ops macvtap_socket_ops;
- #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
- NETIF_F_TSO6 | NETIF_F_UFO)
- #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
--#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
-+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
-
- static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
- {
-diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
-index 2ed7506..5e0b432 100644
---- a/drivers/net/ppp/pppoe.c
-+++ b/drivers/net/ppp/pppoe.c
-@@ -589,7 +589,7 @@ static int pppoe_release(struct socket *sock)
-
- po = pppox_sk(sk);
-
-- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
-+ if (po->pppoe_dev) {
- dev_put(po->pppoe_dev);
- po->pppoe_dev = NULL;
- }
-diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
-index 64a60af..8f1738c 100644
---- a/drivers/net/usb/qmi_wwan.c
-+++ b/drivers/net/usb/qmi_wwan.c
-@@ -765,6 +765,10 @@ static const struct usb_device_id products[] = {
- {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
- {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
- {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
-+ {QMI_FIXED_INTF(0x1199, 0x9070, 8)}, /* Sierra Wireless MC74xx/EM74xx */
-+ {QMI_FIXED_INTF(0x1199, 0x9070, 10)}, /* Sierra Wireless MC74xx/EM74xx */
-+ {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */
-+ {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */
- {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
- {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
- {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
-diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
-index 0d3c474..a5ea8a9 100644
---- a/drivers/net/wireless/ath/ath10k/mac.c
-+++ b/drivers/net/wireless/ath/ath10k/mac.c
-@@ -2070,7 +2070,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
- enum ieee80211_band band;
- const u8 *ht_mcs_mask;
- const u16 *vht_mcs_mask;
-- int i, n, max_nss;
-+ int i, n;
-+ u8 max_nss;
- u32 stbc;
-
- lockdep_assert_held(&ar->conf_mutex);
-@@ -2155,7 +2156,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
- arg->peer_ht_rates.rates[i] = i;
- } else {
- arg->peer_ht_rates.num_rates = n;
-- arg->peer_num_spatial_streams = max_nss;
-+ arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
- }
-
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
-@@ -4021,7 +4022,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
-
- static u32 get_nss_from_chainmask(u16 chain_mask)
- {
-- if ((chain_mask & 0x15) == 0x15)
-+ if ((chain_mask & 0xf) == 0xf)
- return 4;
- else if ((chain_mask & 0x7) == 0x7)
- return 3;
-diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
-index 865d578d..fd6aef7 100644
---- a/drivers/net/wireless/iwlwifi/pcie/drv.c
-+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
-@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
- /* 8000 Series */
- {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
-- {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
-@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
- {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
- {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
-+ {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
- #endif /* CONFIG_IWLMVM */
-
- {0}
-diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index 9e144e7..dab9b91 100644
---- a/drivers/net/wireless/iwlwifi/pcie/trans.c
-+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
-@@ -592,10 +592,8 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
-
- do {
- ret = iwl_pcie_set_hw_ready(trans);
-- if (ret >= 0) {
-- ret = 0;
-- goto out;
-- }
-+ if (ret >= 0)
-+ return 0;
-
- usleep_range(200, 1000);
- t += 200;
-@@ -605,10 +603,6 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
-
- IWL_ERR(trans, "Couldn't prepare the card\n");
-
--out:
-- iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
-- CSR_RESET_LINK_PWR_MGMT_DISABLED);
--
- return ret;
- }
-
-diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
-index 5a0636d4..5583856 100644
---- a/drivers/net/wireless/mwifiex/debugfs.c
-+++ b/drivers/net/wireless/mwifiex/debugfs.c
-@@ -731,7 +731,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
- (struct mwifiex_private *) file->private_data;
- unsigned long addr = get_zeroed_page(GFP_KERNEL);
- char *buf = (char *) addr;
-- int pos = 0, ret = 0, i;
-+ int pos, ret, i;
- u8 value[MAX_EEPROM_DATA];
-
- if (!buf)
-@@ -739,7 +739,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
-
- if (saved_offset == -1) {
- /* No command has been given */
-- pos += snprintf(buf, PAGE_SIZE, "0");
-+ pos = snprintf(buf, PAGE_SIZE, "0");
- goto done;
- }
-
-@@ -748,17 +748,17 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
- (u16) saved_bytes, value);
- if (ret) {
- ret = -EINVAL;
-- goto done;
-+ goto out_free;
- }
-
-- pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
-+ pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
-
- for (i = 0; i < saved_bytes; i++)
-- pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
--
-- ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
-+ pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
-
- done:
-+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
-+out_free:
- free_page(addr);
- return ret;
- }
-diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
-index a9c9a07..bc3d907 100644
---- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
-+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
-@@ -680,7 +680,7 @@ void lnet_debug_peer(lnet_nid_t nid);
- static inline void
- lnet_peer_set_alive(lnet_peer_t *lp)
- {
-- lp->lp_last_alive = lp->lp_last_query = get_seconds();
-+ lp->lp_last_alive = lp->lp_last_query = jiffies;
- if (!lp->lp_alive)
- lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
- }
-diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
-index f8b5b33..943a0e2 100644
---- a/drivers/staging/rtl8712/usb_intf.c
-+++ b/drivers/staging/rtl8712/usb_intf.c
-@@ -144,6 +144,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
- {USB_DEVICE(0x0DF6, 0x0058)},
- {USB_DEVICE(0x0DF6, 0x0049)},
- {USB_DEVICE(0x0DF6, 0x004C)},
-+ {USB_DEVICE(0x0DF6, 0x006C)},
- {USB_DEVICE(0x0DF6, 0x0064)},
- /* Skyworth */
- {USB_DEVICE(0x14b2, 0x3300)},
-diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
-index 358323c..43a2ba0 100644
---- a/drivers/tty/mips_ejtag_fdc.c
-+++ b/drivers/tty/mips_ejtag_fdc.c
-@@ -1045,38 +1045,6 @@ err_destroy_ports:
- return ret;
- }
-
--static int mips_ejtag_fdc_tty_remove(struct mips_cdmm_device *dev)
--{
-- struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
-- struct mips_ejtag_fdc_tty_port *dport;
-- int nport;
-- unsigned int cfg;
--
-- if (priv->irq >= 0) {
-- raw_spin_lock_irq(&priv->lock);
-- cfg = mips_ejtag_fdc_read(priv, REG_FDCFG);
-- /* Disable interrupts */
-- cfg &= ~(REG_FDCFG_TXINTTHRES | REG_FDCFG_RXINTTHRES);
-- cfg |= REG_FDCFG_TXINTTHRES_DISABLED;
-- cfg |= REG_FDCFG_RXINTTHRES_DISABLED;
-- mips_ejtag_fdc_write(priv, REG_FDCFG, cfg);
-- raw_spin_unlock_irq(&priv->lock);
-- } else {
-- priv->removing = true;
-- del_timer_sync(&priv->poll_timer);
-- }
-- kthread_stop(priv->thread);
-- if (dev->cpu == 0)
-- mips_ejtag_fdc_con.tty_drv = NULL;
-- tty_unregister_driver(priv->driver);
-- for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
-- dport = &priv->ports[nport];
-- tty_port_destroy(&dport->port);
-- }
-- put_tty_driver(priv->driver);
-- return 0;
--}
--
- static int mips_ejtag_fdc_tty_cpu_down(struct mips_cdmm_device *dev)
- {
- struct mips_ejtag_fdc_tty *priv = mips_cdmm_get_drvdata(dev);
-@@ -1149,12 +1117,11 @@ static struct mips_cdmm_driver mips_ejtag_fdc_tty_driver = {
- .name = "mips_ejtag_fdc",
- },
- .probe = mips_ejtag_fdc_tty_probe,
-- .remove = mips_ejtag_fdc_tty_remove,
- .cpu_down = mips_ejtag_fdc_tty_cpu_down,
- .cpu_up = mips_ejtag_fdc_tty_cpu_up,
- .id_table = mips_ejtag_fdc_tty_ids,
- };
--module_mips_cdmm_driver(mips_ejtag_fdc_tty_driver);
-+builtin_mips_cdmm_driver(mips_ejtag_fdc_tty_driver);
-
- static int __init mips_ejtag_fdc_init_console(void)
- {
-diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index afc1879..dedac8a 100644
---- a/drivers/tty/n_tty.c
-+++ b/drivers/tty/n_tty.c
-@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
- {
- struct n_tty_data *ldata = tty->disc_data;
-
-- tty_audit_add_data(tty, to, n, ldata->icanon);
-+ tty_audit_add_data(tty, from, n, ldata->icanon);
- return copy_to_user(to, from, n);
- }
-
-diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
-index 90ca082..3d245cd 100644
---- a/drivers/tty/tty_audit.c
-+++ b/drivers/tty/tty_audit.c
-@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
- *
- * Audit @data of @size from @tty, if necessary.
- */
--void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
-+void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size, unsigned icanon)
- {
- struct tty_audit_buf *buf;
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 774df35..1aa0286 100644
---- a/drivers/tty/tty_io.c
-+++ b/drivers/tty/tty_io.c
-@@ -1279,18 +1279,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
- int was_stopped = tty->stopped;
-
- if (tty->ops->send_xchar) {
-+ down_read(&tty->termios_rwsem);
- tty->ops->send_xchar(tty, ch);
-+ up_read(&tty->termios_rwsem);
- return 0;
- }
-
- if (tty_write_lock(tty, 0) < 0)
- return -ERESTARTSYS;
-
-+ down_read(&tty->termios_rwsem);
- if (was_stopped)
- start_tty(tty);
- tty->ops->write(tty, &ch, 1);
- if (was_stopped)
- stop_tty(tty);
-+ up_read(&tty->termios_rwsem);
- tty_write_unlock(tty);
- return 0;
- }
-diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
-index 5232fb6..043e332 100644
---- a/drivers/tty/tty_ioctl.c
-+++ b/drivers/tty/tty_ioctl.c
-@@ -1142,16 +1142,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
- spin_unlock_irq(&tty->flow_lock);
- break;
- case TCIOFF:
-- down_read(&tty->termios_rwsem);
- if (STOP_CHAR(tty) != __DISABLED_CHAR)
- retval = tty_send_xchar(tty, STOP_CHAR(tty));
-- up_read(&tty->termios_rwsem);
- break;
- case TCION:
-- down_read(&tty->termios_rwsem);
- if (START_CHAR(tty) != __DISABLED_CHAR)
- retval = tty_send_xchar(tty, START_CHAR(tty));
-- up_read(&tty->termios_rwsem);
- break;
- default:
- return -EINVAL;
-diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
-index fa77432..846ceb9 100644
---- a/drivers/usb/chipidea/ci_hdrc_imx.c
-+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
-@@ -68,6 +68,12 @@ struct ci_hdrc_imx_data {
- struct imx_usbmisc_data *usbmisc_data;
- bool supports_runtime_pm;
- bool in_lpm;
-+ /* SoC before i.mx6 (except imx23/imx28) needs three clks */
-+ bool need_three_clks;
-+ struct clk *clk_ipg;
-+ struct clk *clk_ahb;
-+ struct clk *clk_per;
-+ /* --------------------------------- */
- };
-
- /* Common functions shared by usbmisc drivers */
-@@ -119,6 +125,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
- }
-
- /* End of common functions shared by usbmisc drivers*/
-+static int imx_get_clks(struct device *dev)
-+{
-+ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
-+ int ret = 0;
-+
-+ data->clk_ipg = devm_clk_get(dev, "ipg");
-+ if (IS_ERR(data->clk_ipg)) {
-+ /* If the platform only needs one clocks */
-+ data->clk = devm_clk_get(dev, NULL);
-+ if (IS_ERR(data->clk)) {
-+ ret = PTR_ERR(data->clk);
-+ dev_err(dev,
-+ "Failed to get clks, err=%ld,%ld\n",
-+ PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
-+ return ret;
-+ }
-+ return ret;
-+ }
-+
-+ data->clk_ahb = devm_clk_get(dev, "ahb");
-+ if (IS_ERR(data->clk_ahb)) {
-+ ret = PTR_ERR(data->clk_ahb);
-+ dev_err(dev,
-+ "Failed to get ahb clock, err=%d\n", ret);
-+ return ret;
-+ }
-+
-+ data->clk_per = devm_clk_get(dev, "per");
-+ if (IS_ERR(data->clk_per)) {
-+ ret = PTR_ERR(data->clk_per);
-+ dev_err(dev,
-+ "Failed to get per clock, err=%d\n", ret);
-+ return ret;
-+ }
-+
-+ data->need_three_clks = true;
-+ return ret;
-+}
-+
-+static int imx_prepare_enable_clks(struct device *dev)
-+{
-+ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
-+ int ret = 0;
-+
-+ if (data->need_three_clks) {
-+ ret = clk_prepare_enable(data->clk_ipg);
-+ if (ret) {
-+ dev_err(dev,
-+ "Failed to prepare/enable ipg clk, err=%d\n",
-+ ret);
-+ return ret;
-+ }
-+
-+ ret = clk_prepare_enable(data->clk_ahb);
-+ if (ret) {
-+ dev_err(dev,
-+ "Failed to prepare/enable ahb clk, err=%d\n",
-+ ret);
-+ clk_disable_unprepare(data->clk_ipg);
-+ return ret;
-+ }
-+
-+ ret = clk_prepare_enable(data->clk_per);
-+ if (ret) {
-+ dev_err(dev,
-+ "Failed to prepare/enable per clk, err=%d\n",
-+ ret);
-+ clk_disable_unprepare(data->clk_ahb);
-+ clk_disable_unprepare(data->clk_ipg);
-+ return ret;
-+ }
-+ } else {
-+ ret = clk_prepare_enable(data->clk);
-+ if (ret) {
-+ dev_err(dev,
-+ "Failed to prepare/enable clk, err=%d\n",
-+ ret);
-+ return ret;
-+ }
-+ }
-+
-+ return ret;
-+}
-+
-+static void imx_disable_unprepare_clks(struct device *dev)
-+{
-+ struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
-+
-+ if (data->need_three_clks) {
-+ clk_disable_unprepare(data->clk_per);
-+ clk_disable_unprepare(data->clk_ahb);
-+ clk_disable_unprepare(data->clk_ipg);
-+ } else {
-+ clk_disable_unprepare(data->clk);
-+ }
-+}
-
- static int ci_hdrc_imx_probe(struct platform_device *pdev)
- {
-@@ -137,23 +239,18 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
- if (!data)
- return -ENOMEM;
-
-+ platform_set_drvdata(pdev, data);
- data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
- if (IS_ERR(data->usbmisc_data))
- return PTR_ERR(data->usbmisc_data);
-
-- data->clk = devm_clk_get(&pdev->dev, NULL);
-- if (IS_ERR(data->clk)) {
-- dev_err(&pdev->dev,
-- "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
-- return PTR_ERR(data->clk);
-- }
-+ ret = imx_get_clks(&pdev->dev);
-+ if (ret)
-+ return ret;
-
-- ret = clk_prepare_enable(data->clk);
-- if (ret) {
-- dev_err(&pdev->dev,
-- "Failed to prepare or enable clock, err=%d\n", ret);
-+ ret = imx_prepare_enable_clks(&pdev->dev);
-+ if (ret)
- return ret;
-- }
-
- data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
- if (IS_ERR(data->phy)) {
-@@ -196,8 +293,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
- goto disable_device;
- }
-
-- platform_set_drvdata(pdev, data);
--
- if (data->supports_runtime_pm) {
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-@@ -210,7 +305,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
- disable_device:
- ci_hdrc_remove_device(data->ci_pdev);
- err_clk:
-- clk_disable_unprepare(data->clk);
-+ imx_disable_unprepare_clks(&pdev->dev);
- return ret;
- }
-
-@@ -224,7 +319,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
- pm_runtime_put_noidle(&pdev->dev);
- }
- ci_hdrc_remove_device(data->ci_pdev);
-- clk_disable_unprepare(data->clk);
-+ imx_disable_unprepare_clks(&pdev->dev);
-
- return 0;
- }
-@@ -236,7 +331,7 @@ static int imx_controller_suspend(struct device *dev)
-
- dev_dbg(dev, "at %s\n", __func__);
-
-- clk_disable_unprepare(data->clk);
-+ imx_disable_unprepare_clks(dev);
- data->in_lpm = true;
-
- return 0;
-@@ -254,7 +349,7 @@ static int imx_controller_resume(struct device *dev)
- return 0;
- }
-
-- ret = clk_prepare_enable(data->clk);
-+ ret = imx_prepare_enable_clks(dev);
- if (ret)
- return ret;
-
-@@ -269,7 +364,7 @@ static int imx_controller_resume(struct device *dev)
- return 0;
-
- clk_disable:
-- clk_disable_unprepare(data->clk);
-+ imx_disable_unprepare_clks(dev);
- return ret;
- }
-
-diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
-index 6e53c24..92937c1 100644
---- a/drivers/usb/chipidea/udc.c
-+++ b/drivers/usb/chipidea/udc.c
-@@ -1730,6 +1730,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
- return retval;
- }
-
-+static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
-+{
-+ if (!ci_otg_is_fsm_mode(ci))
-+ return;
-+
-+ mutex_lock(&ci->fsm.lock);
-+ if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
-+ ci->fsm.a_bidl_adis_tmout = 1;
-+ ci_hdrc_otg_fsm_start(ci);
-+ } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
-+ ci->fsm.protocol = PROTO_UNDEF;
-+ ci->fsm.otg->state = OTG_STATE_UNDEFINED;
-+ }
-+ mutex_unlock(&ci->fsm.lock);
-+}
-+
- /**
- * ci_udc_stop: unregister a gadget driver
- */
-@@ -1754,6 +1770,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
- ci->driver = NULL;
- spin_unlock_irqrestore(&ci->lock, flags);
-
-+ ci_udc_stop_for_otg_fsm(ci);
- return 0;
- }
-
-diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
-index f38e875..8218ba7 100644
---- a/drivers/usb/class/usblp.c
-+++ b/drivers/usb/class/usblp.c
-@@ -873,11 +873,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
-
- add_wait_queue(&usblp->wwait, &waita);
- for (;;) {
-- set_current_state(TASK_INTERRUPTIBLE);
- if (mutex_lock_interruptible(&usblp->mut)) {
- rc = -EINTR;
- break;
- }
-+ set_current_state(TASK_INTERRUPTIBLE);
- rc = usblp_wtest(usblp, nonblock);
- mutex_unlock(&usblp->mut);
- if (rc <= 0)
-diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
-index ff5773c..c0566ec 100644
---- a/drivers/usb/dwc3/core.c
-+++ b/drivers/usb/dwc3/core.c
-@@ -490,6 +490,9 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
- if (dwc->dis_u2_susphy_quirk)
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-
-+ if (dwc->dis_enblslpm_quirk)
-+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
-+
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-
- return 0;
-@@ -509,12 +512,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
-
- reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
- /* This should read as U3 followed by revision number */
-- if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
-+ if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
-+ /* Detected DWC_usb3 IP */
-+ dwc->revision = reg;
-+ } else if ((reg & DWC3_GSNPSID_MASK) == 0x33310000) {
-+ /* Detected DWC_usb31 IP */
-+ dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
-+ dwc->revision |= DWC3_REVISION_IS_DWC31;
-+ } else {
- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
- ret = -ENODEV;
- goto err0;
- }
-- dwc->revision = reg;
-
- /*
- * Write Linux Version Code to our GUID register so it's easy to figure
-@@ -881,6 +890,8 @@ static int dwc3_probe(struct platform_device *pdev)
- "snps,dis_u3_susphy_quirk");
- dwc->dis_u2_susphy_quirk = of_property_read_bool(node,
- "snps,dis_u2_susphy_quirk");
-+ dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
-+ "snps,dis_enblslpm_quirk");
-
- dwc->tx_de_emphasis_quirk = of_property_read_bool(node,
- "snps,tx_de_emphasis_quirk");
-@@ -911,6 +922,7 @@ static int dwc3_probe(struct platform_device *pdev)
- dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk;
- dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
- dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
-+ dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk;
-
- dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
- if (pdata->tx_de_emphasis)
-diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
-index 0447788..6e53ce9 100644
---- a/drivers/usb/dwc3/core.h
-+++ b/drivers/usb/dwc3/core.h
-@@ -108,6 +108,9 @@
- #define DWC3_GPRTBIMAP_FS0 0xc188
- #define DWC3_GPRTBIMAP_FS1 0xc18c
-
-+#define DWC3_VER_NUMBER 0xc1a0
-+#define DWC3_VER_TYPE 0xc1a4
-+
- #define DWC3_GUSB2PHYCFG(n) (0xc200 + (n * 0x04))
- #define DWC3_GUSB2I2CCTL(n) (0xc240 + (n * 0x04))
-
-@@ -175,6 +178,7 @@
- #define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
- #define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
- #define DWC3_GUSB2PHYCFG_ULPI_UTMI (1 << 4)
-+#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
-
- /* Global USB2 PHY Vendor Control Register */
- #define DWC3_GUSB2PHYACC_NEWREGREQ (1 << 25)
-@@ -712,6 +716,8 @@ struct dwc3_scratchpad_array {
- * @rx_detect_poll_quirk: set if we enable rx_detect to polling lfps quirk
- * @dis_u3_susphy_quirk: set if we disable usb3 suspend phy
- * @dis_u2_susphy_quirk: set if we disable usb2 suspend phy
-+ * @dis_enblslpm_quirk: set if we clear enblslpm in GUSB2PHYCFG,
-+ * disabling the suspend signal to the PHY.
- * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
- * @tx_de_emphasis: Tx de-emphasis value
- * 0 - -6dB de-emphasis
-@@ -766,6 +772,14 @@ struct dwc3 {
- u32 num_event_buffers;
- u32 u1u2;
- u32 maximum_speed;
-+
-+ /*
-+ * All 3.1 IP version constants are greater than the 3.0 IP
-+ * version constants. This works for most version checks in
-+ * dwc3. However, in the future, this may not apply as
-+ * features may be developed on newer versions of the 3.0 IP
-+ * that are not in the 3.1 IP.
-+ */
- u32 revision;
-
- #define DWC3_REVISION_173A 0x5533173a
-@@ -788,6 +802,13 @@ struct dwc3 {
- #define DWC3_REVISION_270A 0x5533270a
- #define DWC3_REVISION_280A 0x5533280a
-
-+/*
-+ * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
-+ * just so dwc31 revisions are always larger than dwc3.
-+ */
-+#define DWC3_REVISION_IS_DWC31 0x80000000
-+#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_USB31)
-+
- enum dwc3_ep0_next ep0_next_event;
- enum dwc3_ep0_state ep0state;
- enum dwc3_link_state link_state;
-@@ -841,6 +862,7 @@ struct dwc3 {
- unsigned rx_detect_poll_quirk:1;
- unsigned dis_u3_susphy_quirk:1;
- unsigned dis_u2_susphy_quirk:1;
-+ unsigned dis_enblslpm_quirk:1;
-
- unsigned tx_de_emphasis_quirk:1;
- unsigned tx_de_emphasis:2;
-diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
-index 27e4fc8..04b87eb 100644
---- a/drivers/usb/dwc3/dwc3-pci.c
-+++ b/drivers/usb/dwc3/dwc3-pci.c
-@@ -27,6 +27,8 @@
- #include "platform_data.h"
-
- #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
-+#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
- #define PCI_DEVICE_ID_INTEL_BYT 0x0f37
- #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
- #define PCI_DEVICE_ID_INTEL_BSW 0x22B7
-@@ -100,6 +102,22 @@ static int dwc3_pci_quirks(struct pci_dev *pdev)
- }
- }
-
-+ if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
-+ (pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
-+ pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
-+ pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
-+
-+ struct dwc3_platform_data pdata;
-+
-+ memset(&pdata, 0, sizeof(pdata));
-+ pdata.usb3_lpm_capable = true;
-+ pdata.has_lpm_erratum = true;
-+ pdata.dis_enblslpm_quirk = true;
-+
-+ return platform_device_add_data(pci_get_drvdata(pdev), &pdata,
-+ sizeof(pdata));
-+ }
-+
- return 0;
- }
-
-@@ -172,6 +190,14 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
- PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
- PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
- },
-+ {
-+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
-+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
-+ },
-+ {
-+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
-+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
-+ },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
-diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index 333a7c0..6fbf461 100644
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -1859,27 +1859,32 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
- unsigned int i;
- int ret;
-
-- req = next_request(&dep->req_queued);
-- if (!req) {
-- WARN_ON_ONCE(1);
-- return 1;
-- }
-- i = 0;
- do {
-- slot = req->start_slot + i;
-- if ((slot == DWC3_TRB_NUM - 1) &&
-+ req = next_request(&dep->req_queued);
-+ if (!req) {
-+ WARN_ON_ONCE(1);
-+ return 1;
-+ }
-+ i = 0;
-+ do {
-+ slot = req->start_slot + i;
-+ if ((slot == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
-- slot++;
-- slot %= DWC3_TRB_NUM;
-- trb = &dep->trb_pool[slot];
-+ slot++;
-+ slot %= DWC3_TRB_NUM;
-+ trb = &dep->trb_pool[slot];
-+
-+ ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
-+ event, status);
-+ if (ret)
-+ break;
-+ } while (++i < req->request.num_mapped_sgs);
-+
-+ dwc3_gadget_giveback(dep, req, status);
-
-- ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
-- event, status);
- if (ret)
- break;
-- } while (++i < req->request.num_mapped_sgs);
--
-- dwc3_gadget_giveback(dep, req, status);
-+ } while (1);
-
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- list_empty(&dep->req_queued)) {
-@@ -2709,12 +2714,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
- }
-
- dwc->gadget.ops = &dwc3_gadget_ops;
-- dwc->gadget.max_speed = USB_SPEED_SUPER;
- dwc->gadget.speed = USB_SPEED_UNKNOWN;
- dwc->gadget.sg_supported = true;
- dwc->gadget.name = "dwc3-gadget";
-
- /*
-+ * FIXME We might be setting max_speed to <SUPER, however versions
-+ * <2.20a of dwc3 have an issue with metastability (documented
-+ * elsewhere in this driver) which tells us we can't set max speed to
-+ * anything lower than SUPER.
-+ *
-+ * Because gadget.max_speed is only used by composite.c and function
-+ * drivers (i.e. it won't go into dwc3's registers) we are allowing this
-+ * to happen so we avoid sending SuperSpeed Capability descriptor
-+ * together with our BOS descriptor as that could confuse host into
-+ * thinking we can handle super speed.
-+ *
-+ * Note that, in fact, we won't even support GetBOS requests when speed
-+ * is less than super speed because we don't have means, yet, to tell
-+ * composite.c that we are USB 2.0 + LPM ECN.
-+ */
-+ if (dwc->revision < DWC3_REVISION_220A)
-+ dwc3_trace(trace_dwc3_gadget,
-+ "Changing max_speed on rev %08x\n",
-+ dwc->revision);
-+
-+ dwc->gadget.max_speed = dwc->maximum_speed;
-+
-+ /*
- * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
- * on ep out.
- */
-diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
-index d3614ec..db29380 100644
---- a/drivers/usb/dwc3/platform_data.h
-+++ b/drivers/usb/dwc3/platform_data.h
-@@ -42,6 +42,7 @@ struct dwc3_platform_data {
- unsigned rx_detect_poll_quirk:1;
- unsigned dis_u3_susphy_quirk:1;
- unsigned dis_u2_susphy_quirk:1;
-+ unsigned dis_enblslpm_quirk:1;
-
- unsigned tx_de_emphasis_quirk:1;
- unsigned tx_de_emphasis:2;
-diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
-index 4095cce0..35fff45 100644
---- a/drivers/usb/gadget/udc/atmel_usba_udc.c
-+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
-@@ -1634,7 +1634,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
- spin_lock(&udc->lock);
-
- int_enb = usba_int_enb_get(udc);
-- status = usba_readl(udc, INT_STA) & int_enb;
-+ status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
- DBG(DBG_INT, "irq, status=%#08x\n", status);
-
- if (status & USBA_DET_SUSPEND) {
-diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
-index 2bee912..baa0191 100644
---- a/drivers/usb/gadget/udc/net2280.c
-+++ b/drivers/usb/gadget/udc/net2280.c
-@@ -1846,7 +1846,7 @@ static void defect7374_disable_data_eps(struct net2280 *dev)
-
- for (i = 1; i < 5; i++) {
- ep = &dev->ep[i];
-- writel(0, &ep->cfg->ep_cfg);
-+ writel(i, &ep->cfg->ep_cfg);
- }
-
- /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
-diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
-index bfcbb9a..ee8d5fa 100644
---- a/drivers/usb/host/ehci-orion.c
-+++ b/drivers/usb/host/ehci-orion.c
-@@ -224,7 +224,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
- priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
- if (IS_ERR(priv->phy)) {
- err = PTR_ERR(priv->phy);
-- goto err_phy_get;
-+ if (err != -ENOSYS)
-+ goto err_phy_get;
- } else {
- err = phy_init(priv->phy);
- if (err)
-diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
-index d7b9f484..6062996 100644
---- a/drivers/usb/host/xhci.c
-+++ b/drivers/usb/host/xhci.c
-@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
- command |= CMD_RESET;
- writel(command, &xhci->op_regs->command);
-
-+ /* Existing Intel xHCI controllers require a delay of 1 mS,
-+ * after setting the CMD_RESET bit, and before accessing any
-+ * HC registers. This allows the HC to complete the
-+ * reset operation and be ready for HC register access.
-+ * Without this delay, the subsequent HC register access,
-+ * may result in a system hang very rarely.
-+ */
-+ if (xhci->quirks & XHCI_INTEL_HOST)
-+ udelay(1000);
-+
- ret = xhci_handshake(&xhci->op_regs->command,
- CMD_RESET, 0, 10 * 1000 * 1000);
- if (ret)
-diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
-index 514a6cd..2fe6d26 100644
---- a/drivers/usb/musb/musb_core.c
-+++ b/drivers/usb/musb/musb_core.c
-@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
- /*-------------------------------------------------------------------------*/
-
- #ifndef CONFIG_BLACKFIN
--static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
-+static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
- {
- void __iomem *addr = phy->io_priv;
- int i = 0;
-@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
- * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
- */
-
-- musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
-+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
- musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
- MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
-
-@@ -176,7 +176,7 @@ out:
- return ret;
- }
-
--static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
-+static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
- {
- void __iomem *addr = phy->io_priv;
- int i = 0;
-@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
- power &= ~MUSB_POWER_SUSPENDM;
- musb_writeb(addr, MUSB_POWER, power);
-
-- musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
-- musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
-+ musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
-+ musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
- musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
-
- while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
-diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
-index 7c8eb4c..4021846 100644
---- a/drivers/usb/serial/option.c
-+++ b/drivers/usb/serial/option.c
-@@ -162,6 +162,7 @@ static void option_instat_callback(struct urb *urb);
- #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
- #define NOVATELWIRELESS_PRODUCT_E362 0x9010
- #define NOVATELWIRELESS_PRODUCT_E371 0x9011
-+#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
- #define NOVATELWIRELESS_PRODUCT_G2 0xA010
- #define NOVATELWIRELESS_PRODUCT_MC551 0xB001
-
-@@ -357,6 +358,7 @@ static void option_instat_callback(struct urb *urb);
- /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
- * It seems to contain a Qualcomm QSC6240/6290 chipset */
- #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
-+#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
-
- /* iBall 3.5G connect wireless modem */
- #define IBALL_3_5G_CONNECT 0x9605
-@@ -522,6 +524,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- };
-
-+static const struct option_blacklist_info four_g_w100_blacklist = {
-+ .sendsetup = BIT(1) | BIT(2),
-+ .reserved = BIT(3),
-+};
-+
- static const struct option_blacklist_info alcatel_x200_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-@@ -1060,6 +1067,7 @@ static const struct usb_device_id option_ids[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
-+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
-
- { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
- { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
-@@ -1653,6 +1661,9 @@ static const struct usb_device_id option_ids[] = {
- { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
- .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
- },
-+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
-+ .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
-+ },
- { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
- { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
- { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
-diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
-index f49d262..514fa91 100644
---- a/drivers/usb/serial/qcserial.c
-+++ b/drivers/usb/serial/qcserial.c
-@@ -22,6 +22,8 @@
- #define DRIVER_AUTHOR "Qualcomm Inc"
- #define DRIVER_DESC "Qualcomm USB Serial driver"
-
-+#define QUECTEL_EC20_PID 0x9215
-+
- /* standard device layouts supported by this driver */
- enum qcserial_layouts {
- QCSERIAL_G2K = 0, /* Gobi 2000 */
-@@ -169,6 +171,38 @@ static const struct usb_device_id id_table[] = {
- };
- MODULE_DEVICE_TABLE(usb, id_table);
-
-+static int handle_quectel_ec20(struct device *dev, int ifnum)
-+{
-+ int altsetting = 0;
-+
-+ /*
-+ * Quectel EC20 Mini PCIe LTE module layout:
-+ * 0: DM/DIAG (use libqcdm from ModemManager for communication)
-+ * 1: NMEA
-+ * 2: AT-capable modem port
-+ * 3: Modem interface
-+ * 4: NDIS
-+ */
-+ switch (ifnum) {
-+ case 0:
-+ dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
-+ break;
-+ case 1:
-+ dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
-+ break;
-+ case 2:
-+ case 3:
-+ dev_dbg(dev, "Quectel EC20 Modem port found\n");
-+ break;
-+ case 4:
-+ /* Don't claim the QMI/net interface */
-+ altsetting = -1;
-+ break;
-+ }
-+
-+ return altsetting;
-+}
-+
- static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
- {
- struct usb_host_interface *intf = serial->interface->cur_altsetting;
-@@ -178,6 +212,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
- __u8 ifnum;
- int altsetting = -1;
-
-+ /* we only support vendor specific functions */
-+ if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
-+ goto done;
-+
- nintf = serial->dev->actconfig->desc.bNumInterfaces;
- dev_dbg(dev, "Num Interfaces = %d\n", nintf);
- ifnum = intf->desc.bInterfaceNumber;
-@@ -237,6 +275,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
- altsetting = -1;
- break;
- case QCSERIAL_G2K:
-+ /* handle non-standard layouts */
-+ if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
-+ altsetting = handle_quectel_ec20(dev, ifnum);
-+ goto done;
-+ }
-+
- /*
- * Gobi 2K+ USB layout:
- * 0: QMI/net
-@@ -297,29 +341,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
- break;
- case QCSERIAL_HWI:
- /*
-- * Huawei layout:
-- * 0: AT-capable modem port
-- * 1: DM/DIAG
-- * 2: AT-capable modem port
-- * 3: CCID-compatible PCSC interface
-- * 4: QMI/net
-- * 5: NMEA
-+ * Huawei devices map functions by subclass + protocol
-+ * instead of interface numbers. The protocol identify
-+ * a specific function, while the subclass indicate a
-+ * specific firmware source
-+ *
-+ * This is a blacklist of functions known to be
-+ * non-serial. The rest are assumed to be serial and
-+ * will be handled by this driver
- */
-- switch (ifnum) {
-- case 0:
-- case 2:
-- dev_dbg(dev, "Modem port found\n");
-- break;
-- case 1:
-- dev_dbg(dev, "DM/DIAG interface found\n");
-- break;
-- case 5:
-- dev_dbg(dev, "NMEA GPS interface found\n");
-- break;
-- default:
-- /* don't claim any unsupported interface */
-+ switch (intf->desc.bInterfaceProtocol) {
-+ /* QMI combined (qmi_wwan) */
-+ case 0x07:
-+ case 0x37:
-+ case 0x67:
-+ /* QMI data (qmi_wwan) */
-+ case 0x08:
-+ case 0x38:
-+ case 0x68:
-+ /* QMI control (qmi_wwan) */
-+ case 0x09:
-+ case 0x39:
-+ case 0x69:
-+ /* NCM like (huawei_cdc_ncm) */
-+ case 0x16:
-+ case 0x46:
-+ case 0x76:
- altsetting = -1;
- break;
-+ default:
-+ dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
-+ intf->desc.bInterfaceClass,
-+ intf->desc.bInterfaceSubClass,
-+ intf->desc.bInterfaceProtocol);
- }
- break;
- default:
-diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
-index e9da41d..2694df2 100644
---- a/drivers/usb/serial/ti_usb_3410_5052.c
-+++ b/drivers/usb/serial/ti_usb_3410_5052.c
-@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
- { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
-+ { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
- { } /* terminator */
- };
-
-@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
- { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
-+ { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
- { } /* terminator */
- };
-
-diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
-index 4a2423e..98f35c6 100644
---- a/drivers/usb/serial/ti_usb_3410_5052.h
-+++ b/drivers/usb/serial/ti_usb_3410_5052.h
-@@ -56,6 +56,10 @@
- #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
- #define ABBOTT_STRIP_PORT_ID 0x3420
-
-+/* Honeywell vendor and product IDs */
-+#define HONEYWELL_VENDOR_ID 0x10ac
-+#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
-+
- /* Commands */
- #define TI_GET_VERSION 0x01
- #define TI_GET_PORT_STATUS 0x02
-diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
-index 96093ae..cdc3d33 100644
---- a/drivers/xen/events/events_base.c
-+++ b/drivers/xen/events/events_base.c
-@@ -39,6 +39,7 @@
- #include <asm/irq.h>
- #include <asm/idle.h>
- #include <asm/io_apic.h>
-+#include <asm/i8259.h>
- #include <asm/xen/pci.h>
- #include <xen/page.h>
- #endif
-@@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
- return xen_allocate_irq_dynamic();
-
- /* Legacy IRQ descriptors are already allocated by the arch. */
-- if (gsi < NR_IRQS_LEGACY)
-+ if (gsi < nr_legacy_irqs())
- irq = gsi;
- else
- irq = irq_alloc_desc_at(gsi, -1);
-@@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq)
- kfree(info);
-
- /* Legacy IRQ descriptors are managed by the arch. */
-- if (irq < NR_IRQS_LEGACY)
-+ if (irq < nr_legacy_irqs())
- return;
-
- irq_free_desc(irq);
-diff --git a/fs/proc/array.c b/fs/proc/array.c
-index ce065cf..57fde2d 100644
---- a/fs/proc/array.c
-+++ b/fs/proc/array.c
-@@ -372,7 +372,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
- static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task, int whole)
- {
-- unsigned long vsize, eip, esp, wchan = ~0UL;
-+ unsigned long vsize, eip, esp, wchan = 0;
- int priority, nice;
- int tty_pgrp = -1, tty_nr = 0;
- sigset_t sigign, sigcatch;
-@@ -504,7 +504,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
- seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
- seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
-- seq_put_decimal_ull(m, ' ', wchan);
-+
-+ /*
-+ * We used to output the absolute kernel address, but that's an
-+ * information leak - so instead we show a 0/1 flag here, to signal
-+ * to user-space whether there's a wchan field in /proc/PID/wchan.
-+ *
-+ * This works with older implementations of procps as well.
-+ */
-+ if (wchan)
-+ seq_puts(m, " 1");
-+ else
-+ seq_puts(m, " 0");
-+
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ull(m, ' ', 0);
- seq_put_decimal_ll(m, ' ', task->exit_signal);
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index aa50d1a..83a43c1 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -430,13 +430,10 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
-
- wchan = get_wchan(task);
-
-- if (lookup_symbol_name(wchan, symname) < 0) {
-- if (!ptrace_may_access(task, PTRACE_MODE_READ))
-- return 0;
-- seq_printf(m, "%lu", wchan);
-- } else {
-+ if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
- seq_printf(m, "%s", symname);
-- }
-+ else
-+ seq_putc(m, '0');
-
- return 0;
- }
-diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
-index 05e99b8..053f122 100644
---- a/include/linux/kvm_host.h
-+++ b/include/linux/kvm_host.h
-@@ -436,6 +436,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
-
-+static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
-+{
-+ struct kvm_vcpu *vcpu;
-+ int i;
-+
-+ kvm_for_each_vcpu(i, vcpu, kvm)
-+ if (vcpu->vcpu_id == id)
-+ return vcpu;
-+ return NULL;
-+}
-+
- #define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index ad6c891..342a760 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -605,7 +605,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
-
- /* tty_audit.c */
- #ifdef CONFIG_AUDIT
--extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
-+extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size, unsigned icanon);
- extern void tty_audit_exit(void);
- extern void tty_audit_fork(struct signal_struct *sig);
-@@ -613,8 +613,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
- extern void tty_audit_push(struct tty_struct *tty);
- extern int tty_audit_push_current(void);
- #else
--static inline void tty_audit_add_data(struct tty_struct *tty,
-- unsigned char *data, size_t size, unsigned icanon)
-+static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
-+ size_t size, unsigned icanon)
- {
- }
- static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
-diff --git a/include/net/inet_common.h b/include/net/inet_common.h
-index 279f835..109e3ee 100644
---- a/include/net/inet_common.h
-+++ b/include/net/inet_common.h
-@@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
-
- static inline void inet_ctl_sock_destroy(struct sock *sk)
- {
-- sock_release(sk->sk_socket);
-+ if (sk)
-+ sock_release(sk->sk_socket);
- }
-
- #endif
-diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
-index 5fa643b..ff6d78f 100644
---- a/include/net/ip_fib.h
-+++ b/include/net/ip_fib.h
-@@ -306,7 +306,7 @@ void fib_flush_external(struct net *net);
-
- /* Exported by fib_semantics.c */
- int ip_fib_check_default(__be32 gw, struct net_device *dev);
--int fib_sync_down_dev(struct net_device *dev, unsigned long event);
-+int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
- int fib_sync_down_addr(struct net *net, __be32 local);
- int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
- void fib_select_multipath(struct fib_result *res);
-diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
-index f1a117f..0bec458 100644
---- a/net/bluetooth/hidp/core.c
-+++ b/net/bluetooth/hidp/core.c
-@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
- {
- struct hidp_session *session = (struct hidp_session *) arg;
-
-+ /* The HIDP user-space API only contains calls to add and remove
-+ * devices. There is no way to forward events of any kind. Therefore,
-+ * we have to forcefully disconnect a device on idle-timeouts. This is
-+ * unfortunate and weird API design, but it is spec-compliant and
-+ * required for backwards-compatibility. Hence, on idle-timeout, we
-+ * signal driver-detach events, so poll() will be woken up with an
-+ * error-condition on both sockets.
-+ */
-+
-+ session->intr_sock->sk->sk_err = EUNATCH;
-+ session->ctrl_sock->sk->sk_err = EUNATCH;
-+ wake_up_interruptible(sk_sleep(session->intr_sock->sk));
-+ wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
-+
- hidp_session_terminate(session);
- }
-
-diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
-index 92720f3..e32a9e4 100644
---- a/net/bluetooth/mgmt.c
-+++ b/net/bluetooth/mgmt.c
-@@ -3090,6 +3090,11 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
- } else {
- u8 addr_type;
-
-+ if (cp->addr.type == BDADDR_LE_PUBLIC)
-+ addr_type = ADDR_LE_DEV_PUBLIC;
-+ else
-+ addr_type = ADDR_LE_DEV_RANDOM;
-+
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- &cp->addr.bdaddr);
- if (conn) {
-@@ -3105,13 +3110,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
- */
- if (!cp->disconnect)
- conn = NULL;
-+ } else {
-+ hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
- }
-
-- if (cp->addr.type == BDADDR_LE_PUBLIC)
-- addr_type = ADDR_LE_DEV_PUBLIC;
-- else
-- addr_type = ADDR_LE_DEV_RANDOM;
--
- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
-diff --git a/net/core/dst.c b/net/core/dst.c
-index 002144be..cc4a086 100644
---- a/net/core/dst.c
-+++ b/net/core/dst.c
-@@ -287,7 +287,7 @@ void dst_release(struct dst_entry *dst)
- if (unlikely(newrefcnt < 0))
- net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
- __func__, dst, newrefcnt);
-- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
-+ if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
- call_rcu(&dst->rcu_head, dst_destroy_rcu);
- }
- }
-diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
-index 6bbc549..d7116cf 100644
---- a/net/ipv4/fib_frontend.c
-+++ b/net/ipv4/fib_frontend.c
-@@ -1063,9 +1063,10 @@ static void nl_fib_lookup_exit(struct net *net)
- net->ipv4.fibnl = NULL;
- }
-
--static void fib_disable_ip(struct net_device *dev, unsigned long event)
-+static void fib_disable_ip(struct net_device *dev, unsigned long event,
-+ bool force)
- {
-- if (fib_sync_down_dev(dev, event))
-+ if (fib_sync_down_dev(dev, event, force))
- fib_flush(dev_net(dev));
- rt_cache_flush(dev_net(dev));
- arp_ifdown(dev);
-@@ -1093,7 +1094,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
- /* Last address was deleted from this interface.
- * Disable IP.
- */
-- fib_disable_ip(dev, event);
-+ fib_disable_ip(dev, event, true);
- } else {
- rt_cache_flush(dev_net(dev));
- }
-@@ -1110,7 +1111,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
- unsigned int flags;
-
- if (event == NETDEV_UNREGISTER) {
-- fib_disable_ip(dev, event);
-+ fib_disable_ip(dev, event, true);
- rt_flush_dev(dev);
- return NOTIFY_DONE;
- }
-@@ -1131,14 +1132,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
- rt_cache_flush(net);
- break;
- case NETDEV_DOWN:
-- fib_disable_ip(dev, event);
-+ fib_disable_ip(dev, event, false);
- break;
- case NETDEV_CHANGE:
- flags = dev_get_flags(dev);
- if (flags & (IFF_RUNNING | IFF_LOWER_UP))
- fib_sync_up(dev, RTNH_F_LINKDOWN);
- else
-- fib_sync_down_dev(dev, event);
-+ fib_sync_down_dev(dev, event, false);
- /* fall through */
- case NETDEV_CHANGEMTU:
- rt_cache_flush(net);
-diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index 3a06586..71bad5c 100644
---- a/net/ipv4/fib_semantics.c
-+++ b/net/ipv4/fib_semantics.c
-@@ -1132,7 +1132,13 @@ int fib_sync_down_addr(struct net *net, __be32 local)
- return ret;
- }
-
--int fib_sync_down_dev(struct net_device *dev, unsigned long event)
-+/* Event force Flags Description
-+ * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
-+ * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
-+ * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
-+ * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
-+ */
-+int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
- {
- int ret = 0;
- int scope = RT_SCOPE_NOWHERE;
-@@ -1141,8 +1147,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
- struct hlist_head *head = &fib_info_devhash[hash];
- struct fib_nh *nh;
-
-- if (event == NETDEV_UNREGISTER ||
-- event == NETDEV_DOWN)
-+ if (force)
- scope = -1;
-
- hlist_for_each_entry(nh, head, nh_hash) {
-@@ -1291,6 +1296,13 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
- if (!(dev->flags & IFF_UP))
- return 0;
-
-+ if (nh_flags & RTNH_F_DEAD) {
-+ unsigned int flags = dev_get_flags(dev);
-+
-+ if (flags & (IFF_RUNNING | IFF_LOWER_UP))
-+ nh_flags |= RTNH_F_LINKDOWN;
-+ }
-+
- prev_fi = NULL;
- hash = fib_devindex_hashfn(dev->ifindex);
- head = &fib_info_devhash[hash];
-diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
-index b0c6258..ea3aedb 100644
---- a/net/ipv4/fib_trie.c
-+++ b/net/ipv4/fib_trie.c
-@@ -1561,7 +1561,7 @@ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
- do {
- /* record parent and next child index */
- pn = n;
-- cindex = key ? get_index(key, pn) : 0;
-+ cindex = (key > pn->key) ? get_index(key, pn) : 0;
-
- if (cindex >> pn->bits)
- break;
-diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index 5aa46d4..5a8ee32 100644
---- a/net/ipv4/gre_offload.c
-+++ b/net/ipv4/gre_offload.c
-@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
- SKB_GSO_TCP_ECN |
- SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
-- SKB_GSO_IPIP)))
-+ SKB_GSO_IPIP |
-+ SKB_GSO_SIT)))
- goto out;
-
- if (!skb->encapsulation)
-diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
-index 3a2c016..df28693 100644
---- a/net/ipv4/ipmr.c
-+++ b/net/ipv4/ipmr.c
-@@ -1683,8 +1683,8 @@ static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
- {
- struct ip_options *opt = &(IPCB(skb)->opt);
-
-- IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
-- IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
-+ IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
-+ IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
-
- if (unlikely(opt->optlen))
- ip_forward_options(skb);
-@@ -1746,7 +1746,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
- * to blackhole.
- */
-
-- IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
-+ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
- ip_rt_put(rt);
- goto out_free;
- }
-diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
-index 0330ab2..a1442c5 100644
---- a/net/ipv4/sysctl_net_ipv4.c
-+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -47,14 +47,14 @@ static void set_local_port_range(struct net *net, int range[2])
- {
- bool same_parity = !((range[0] ^ range[1]) & 1);
-
-- write_seqlock(&net->ipv4.ip_local_ports.lock);
-+ write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
- if (same_parity && !net->ipv4.ip_local_ports.warned) {
- net->ipv4.ip_local_ports.warned = true;
- pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
- }
- net->ipv4.ip_local_ports.range[0] = range[0];
- net->ipv4.ip_local_ports.range[1] = range[1];
-- write_sequnlock(&net->ipv4.ip_local_ports.lock);
-+ write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
- }
-
- /* Validate changes from /proc interface. */
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index b7dedd9..747a4c4 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -3406,7 +3406,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
- */
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
- skb_mstamp_get(&skb->skb_mstamp);
-- NET_INC_STATS_BH(sock_net(sk), mib);
-+ NET_INC_STATS(sock_net(sk), mib);
- return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
- }
-
-diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index 21c2c81..c8c1fea 100644
---- a/net/ipv6/addrconf.c
-+++ b/net/ipv6/addrconf.c
-@@ -411,6 +411,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
- if (err) {
- ipv6_mc_destroy_dev(ndev);
- del_timer(&ndev->regen_timer);
-+ snmp6_unregister_dev(ndev);
- goto err_release;
- }
- /* protected by rtnl_lock */
-diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
-index ac35a28..85c4b2f 100644
---- a/net/ipv6/sit.c
-+++ b/net/ipv6/sit.c
-@@ -1394,34 +1394,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
- return 0;
- }
-
--static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
-+static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
- {
- struct ip_tunnel *tunnel = netdev_priv(dev);
- struct iphdr *iph = &tunnel->parms.iph;
- struct net *net = dev_net(dev);
- struct sit_net *sitn = net_generic(net, sit_net_id);
-
-- tunnel->dev = dev;
-- tunnel->net = dev_net(dev);
--
- iph->version = 4;
- iph->protocol = IPPROTO_IPV6;
- iph->ihl = 5;
- iph->ttl = 64;
-
-- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-- if (!dev->tstats)
-- return -ENOMEM;
--
-- tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
-- if (!tunnel->dst_cache) {
-- free_percpu(dev->tstats);
-- return -ENOMEM;
-- }
--
- dev_hold(dev);
- rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
-- return 0;
- }
-
- static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
-@@ -1831,23 +1817,19 @@ static int __net_init sit_init_net(struct net *net)
- */
- sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
-
-- err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
-- if (err)
-- goto err_dev_free;
--
-- ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
- err = register_netdev(sitn->fb_tunnel_dev);
- if (err)
- goto err_reg_dev;
-
-+ ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
-+ ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
-+
- t = netdev_priv(sitn->fb_tunnel_dev);
-
- strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
- return 0;
-
- err_reg_dev:
-- dev_put(sitn->fb_tunnel_dev);
--err_dev_free:
- ipip6_dev_free(sitn->fb_tunnel_dev);
- err_alloc_dev:
- return err;
-diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
-index a26c401..4396459 100644
---- a/net/irda/irlmp.c
-+++ b/net/irda/irlmp.c
-@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
- for (element = hashbin_get_first(iter->hashbin);
- element != NULL;
- element = hashbin_get_next(iter->hashbin)) {
-- if (!off || *off-- == 0) {
-+ if (!off || (*off)-- == 0) {
- /* NB: hashbin left locked */
- return element;
- }
-diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
-index 9b2cc27..33bf779 100644
---- a/net/mac80211/mlme.c
-+++ b/net/mac80211/mlme.c
-@@ -3378,7 +3378,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
-
- if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
- ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
-- int sig = ifmgd->ave_beacon_signal;
-+ int sig = ifmgd->ave_beacon_signal / 16;
- int last_sig = ifmgd->last_ave_beacon_signal;
- struct ieee80211_event event = {
- .type = RSSI_EVENT,
-@@ -4999,6 +4999,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
- return 0;
- }
-
-+ if (ifmgd->assoc_data &&
-+ ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) {
-+ sdata_info(sdata,
-+ "aborting association with %pM by local choice (Reason: %u=%s)\n",
-+ req->bssid, req->reason_code,
-+ ieee80211_get_reason_code_string(req->reason_code));
-+
-+ drv_mgd_prepare_tx(sdata->local, sdata);
-+ ieee80211_send_deauth_disassoc(sdata, req->bssid,
-+ IEEE80211_STYPE_DEAUTH,
-+ req->reason_code, tx,
-+ frame_buf);
-+ ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_report_disconnect(sdata, frame_buf,
-+ sizeof(frame_buf), true,
-+ req->reason_code);
-+ return 0;
-+ }
-+
- if (ifmgd->associated &&
- ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
- sdata_info(sdata,
-diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
-index 6f14591..0b13bfa 100644
---- a/net/mac80211/trace.h
-+++ b/net/mac80211/trace.h
-@@ -33,11 +33,11 @@
- __field(u32, chan_width) \
- __field(u32, center_freq1) \
- __field(u32, center_freq2)
--#define CHANDEF_ASSIGN(c) \
-- __entry->control_freq = (c)->chan ? (c)->chan->center_freq : 0; \
-- __entry->chan_width = (c)->width; \
-- __entry->center_freq1 = (c)->center_freq1; \
-- __entry->center_freq2 = (c)->center_freq2;
-+#define CHANDEF_ASSIGN(c) \
-+ __entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0; \
-+ __entry->chan_width = (c) ? (c)->width : 0; \
-+ __entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
-+ __entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
- #define CHANDEF_PR_FMT " control:%d MHz width:%d center: %d/%d MHz"
- #define CHANDEF_PR_ARG __entry->control_freq, __entry->chan_width, \
- __entry->center_freq1, __entry->center_freq2
-diff --git a/net/mac80211/util.c b/net/mac80211/util.c
-index 43e5aad..f5fa8c0 100644
---- a/net/mac80211/util.c
-+++ b/net/mac80211/util.c
-@@ -2984,6 +2984,13 @@ ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
- if (end > 0)
- return false;
-
-+ /* One shot NOA */
-+ if (data->count[i] == 1)
-+ return false;
-+
-+ if (data->desc[i].interval == 0)
-+ return false;
-+
- /* End time is in the past, check for repetitions */
- skip = DIV_ROUND_UP(-end, data->desc[i].interval);
- if (data->count[i] < 255) {
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index a133d16..8b158f7 100644
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -2346,7 +2346,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
- int pos, idx, shift;
-
- err = 0;
-- netlink_table_grab();
-+ netlink_lock_table();
- for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
- if (len - pos < sizeof(u32))
- break;
-@@ -2361,7 +2361,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
- }
- if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
- err = -EFAULT;
-- netlink_table_ungrab();
-+ netlink_unlock_table();
- break;
- }
- default:
-diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
-index 609f922..30b09f0 100644
---- a/net/nfc/nci/hci.c
-+++ b/net/nfc/nci/hci.c
-@@ -101,6 +101,20 @@ struct nci_hcp_packet {
- #define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f)
- #define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
-
-+static int nci_hci_result_to_errno(u8 result)
-+{
-+ switch (result) {
-+ case NCI_HCI_ANY_OK:
-+ return 0;
-+ case NCI_HCI_ANY_E_REG_PAR_UNKNOWN:
-+ return -EOPNOTSUPP;
-+ case NCI_HCI_ANY_E_TIMEOUT:
-+ return -ETIME;
-+ default:
-+ return -1;
-+ }
-+}
-+
- /* HCI core */
- static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
- {
-@@ -146,18 +160,18 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
- if (!conn_info)
- return -EPROTO;
-
-- skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
-+ i = 0;
-+ skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
- NCI_DATA_HDR_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
-- skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
-+ skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
- *skb_push(skb, 1) = data_type;
-
-- i = 0;
-- len = conn_info->max_pkt_payload_len;
--
- do {
-+ len = conn_info->max_pkt_payload_len;
-+
- /* If last packet add NCI_HFP_NO_CHAINING */
- if (i + conn_info->max_pkt_payload_len -
- (skb->len + 1) >= data_len) {
-@@ -177,9 +191,15 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
- return r;
-
- i += len;
-+
- if (i < data_len) {
-- skb_trim(skb, 0);
-- skb_pull(skb, len);
-+ skb = nci_skb_alloc(ndev,
-+ conn_info->max_pkt_payload_len +
-+ NCI_DATA_HDR_SIZE, GFP_KERNEL);
-+ if (!skb)
-+ return -ENOMEM;
-+
-+ skb_reserve(skb, NCI_DATA_HDR_SIZE + 1);
- }
- } while (i < data_len);
-
-@@ -212,7 +232,8 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
- const u8 *param, size_t param_len,
- struct sk_buff **skb)
- {
-- struct nci_conn_info *conn_info;
-+ struct nci_hcp_message *message;
-+ struct nci_conn_info *conn_info;
- struct nci_data data;
- int r;
- u8 pipe = ndev->hci_dev->gate2pipe[gate];
-@@ -232,9 +253,15 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
-
- r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
- msecs_to_jiffies(NCI_DATA_TIMEOUT));
--
-- if (r == NCI_STATUS_OK && skb)
-- *skb = conn_info->rx_skb;
-+ if (r == NCI_STATUS_OK) {
-+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
-+ r = nci_hci_result_to_errno(
-+ NCI_HCP_MSG_GET_CMD(message->header));
-+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
-+
-+ if (!r && skb)
-+ *skb = conn_info->rx_skb;
-+ }
-
- return r;
- }
-@@ -328,9 +355,6 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
- struct nci_conn_info *conn_info;
- u8 status = result;
-
-- if (result != NCI_HCI_ANY_OK)
-- goto exit;
--
- conn_info = ndev->hci_dev->conn_info;
- if (!conn_info) {
- status = NCI_STATUS_REJECTED;
-@@ -340,7 +364,7 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
- conn_info->rx_skb = skb;
-
- exit:
-- nci_req_complete(ndev, status);
-+ nci_req_complete(ndev, NCI_STATUS_OK);
- }
-
- /* Receive hcp message for pipe, with type and cmd.
-@@ -378,7 +402,7 @@ static void nci_hci_msg_rx_work(struct work_struct *work)
- u8 pipe, type, instruction;
-
- while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
-- pipe = skb->data[0];
-+ pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
- skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
- message = (struct nci_hcp_message *)skb->data;
- type = NCI_HCP_MSG_GET_TYPE(message->header);
-@@ -395,7 +419,7 @@ void nci_hci_data_received_cb(void *context,
- {
- struct nci_dev *ndev = (struct nci_dev *)context;
- struct nci_hcp_packet *packet;
-- u8 pipe, type, instruction;
-+ u8 pipe, type;
- struct sk_buff *hcp_skb;
- struct sk_buff *frag_skb;
- int msg_len;
-@@ -415,7 +439,7 @@ void nci_hci_data_received_cb(void *context,
-
- /* it's the last fragment. Does it need re-aggregation? */
- if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
-- pipe = packet->header & NCI_HCI_FRAGMENT;
-+ pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
- skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
-
- msg_len = 0;
-@@ -434,7 +458,7 @@ void nci_hci_data_received_cb(void *context,
- *skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
-
- skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
-- msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
-+ msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
- memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
- NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
- }
-@@ -452,11 +476,10 @@ void nci_hci_data_received_cb(void *context,
- packet = (struct nci_hcp_packet *)hcp_skb->data;
- type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
- if (type == NCI_HCI_HCP_RESPONSE) {
-- pipe = packet->header;
-- instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
-- skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
-- NCI_HCI_HCP_MESSAGE_HEADER_LEN);
-- nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
-+ pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
-+ skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
-+ nci_hci_hcp_message_rx(ndev, pipe, type,
-+ NCI_STATUS_OK, hcp_skb);
- } else {
- skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
- schedule_work(&ndev->hci_dev->msg_rx_work);
-@@ -488,6 +511,7 @@ EXPORT_SYMBOL(nci_hci_open_pipe);
- int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
- const u8 *param, size_t param_len)
- {
-+ struct nci_hcp_message *message;
- struct nci_conn_info *conn_info;
- struct nci_data data;
- int r;
-@@ -520,6 +544,12 @@ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
- r = nci_request(ndev, nci_hci_send_data_req,
- (unsigned long)&data,
- msecs_to_jiffies(NCI_DATA_TIMEOUT));
-+ if (r == NCI_STATUS_OK) {
-+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
-+ r = nci_hci_result_to_errno(
-+ NCI_HCP_MSG_GET_CMD(message->header));
-+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
-+ }
-
- kfree(tmp);
- return r;
-@@ -529,6 +559,7 @@ EXPORT_SYMBOL(nci_hci_set_param);
- int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
- struct sk_buff **skb)
- {
-+ struct nci_hcp_message *message;
- struct nci_conn_info *conn_info;
- struct nci_data data;
- int r;
-@@ -553,8 +584,15 @@ int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
- r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
- msecs_to_jiffies(NCI_DATA_TIMEOUT));
-
-- if (r == NCI_STATUS_OK)
-- *skb = conn_info->rx_skb;
-+ if (r == NCI_STATUS_OK) {
-+ message = (struct nci_hcp_message *)conn_info->rx_skb->data;
-+ r = nci_hci_result_to_errno(
-+ NCI_HCP_MSG_GET_CMD(message->header));
-+ skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
-+
-+ if (!r && skb)
-+ *skb = conn_info->rx_skb;
-+ }
-
- return r;
- }
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 7851b12..71cb085 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -2784,22 +2784,40 @@ static int packet_release(struct socket *sock)
- * Attach a packet hook.
- */
-
--static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
-+static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
-+ __be16 proto)
- {
- struct packet_sock *po = pkt_sk(sk);
- struct net_device *dev_curr;
- __be16 proto_curr;
- bool need_rehook;
-+ struct net_device *dev = NULL;
-+ int ret = 0;
-+ bool unlisted = false;
-
-- if (po->fanout) {
-- if (dev)
-- dev_put(dev);
--
-+ if (po->fanout)
- return -EINVAL;
-- }
-
- lock_sock(sk);
- spin_lock(&po->bind_lock);
-+ rcu_read_lock();
-+
-+ if (name) {
-+ dev = dev_get_by_name_rcu(sock_net(sk), name);
-+ if (!dev) {
-+ ret = -ENODEV;
-+ goto out_unlock;
-+ }
-+ } else if (ifindex) {
-+ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
-+ if (!dev) {
-+ ret = -ENODEV;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ if (dev)
-+ dev_hold(dev);
-
- proto_curr = po->prot_hook.type;
- dev_curr = po->prot_hook.dev;
-@@ -2807,14 +2825,29 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
- need_rehook = proto_curr != proto || dev_curr != dev;
-
- if (need_rehook) {
-- unregister_prot_hook(sk, true);
-+ if (po->running) {
-+ rcu_read_unlock();
-+ __unregister_prot_hook(sk, true);
-+ rcu_read_lock();
-+ dev_curr = po->prot_hook.dev;
-+ if (dev)
-+ unlisted = !dev_get_by_index_rcu(sock_net(sk),
-+ dev->ifindex);
-+ }
-
- po->num = proto;
- po->prot_hook.type = proto;
-- po->prot_hook.dev = dev;
-
-- po->ifindex = dev ? dev->ifindex : 0;
-- packet_cached_dev_assign(po, dev);
-+ if (unlikely(unlisted)) {
-+ dev_put(dev);
-+ po->prot_hook.dev = NULL;
-+ po->ifindex = -1;
-+ packet_cached_dev_reset(po);
-+ } else {
-+ po->prot_hook.dev = dev;
-+ po->ifindex = dev ? dev->ifindex : 0;
-+ packet_cached_dev_assign(po, dev);
-+ }
- }
- if (dev_curr)
- dev_put(dev_curr);
-@@ -2822,7 +2855,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
- if (proto == 0 || !need_rehook)
- goto out_unlock;
-
-- if (!dev || (dev->flags & IFF_UP)) {
-+ if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
- register_prot_hook(sk);
- } else {
- sk->sk_err = ENETDOWN;
-@@ -2831,9 +2864,10 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
- }
-
- out_unlock:
-+ rcu_read_unlock();
- spin_unlock(&po->bind_lock);
- release_sock(sk);
-- return 0;
-+ return ret;
- }
-
- /*
-@@ -2845,8 +2879,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
- {
- struct sock *sk = sock->sk;
- char name[15];
-- struct net_device *dev;
-- int err = -ENODEV;
-
- /*
- * Check legality
-@@ -2856,19 +2888,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
- return -EINVAL;
- strlcpy(name, uaddr->sa_data, sizeof(name));
-
-- dev = dev_get_by_name(sock_net(sk), name);
-- if (dev)
-- err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
-- return err;
-+ return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
- }
-
- static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
- {
- struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
- struct sock *sk = sock->sk;
-- struct net_device *dev = NULL;
-- int err;
--
-
- /*
- * Check legality
-@@ -2879,16 +2905,8 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
- if (sll->sll_family != AF_PACKET)
- return -EINVAL;
-
-- if (sll->sll_ifindex) {
-- err = -ENODEV;
-- dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
-- if (dev == NULL)
-- goto out;
-- }
-- err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
--
--out:
-- return err;
-+ return packet_do_bind(sk, NULL, sll->sll_ifindex,
-+ sll->sll_protocol ? : pkt_sk(sk)->num);
- }
-
- static struct proto packet_proto = {
-diff --git a/net/rds/connection.c b/net/rds/connection.c
-index da6da57..9d66705 100644
---- a/net/rds/connection.c
-+++ b/net/rds/connection.c
-@@ -187,6 +187,12 @@ new_conn:
- }
- }
-
-+ if (trans == NULL) {
-+ kmem_cache_free(rds_conn_slab, conn);
-+ conn = ERR_PTR(-ENODEV);
-+ goto out;
-+ }
-+
- conn->c_trans = trans;
-
- ret = trans->conn_alloc(conn, gfp);
-diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
-index fbc5ef8..27a9921 100644
---- a/net/rds/tcp_recv.c
-+++ b/net/rds/tcp_recv.c
-@@ -214,8 +214,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
- }
-
- to_copy = min(tc->t_tinc_data_rem, left);
-- pskb_pull(clone, offset);
-- pskb_trim(clone, to_copy);
-+ if (!pskb_pull(clone, offset) ||
-+ pskb_trim(clone, to_copy)) {
-+ pr_warn("rds_tcp_data_recv: pull/trim failed "
-+ "left %zu data_rem %zu skb_len %d\n",
-+ left, tc->t_tinc_data_rem, skb->len);
-+ kfree_skb(clone);
-+ desc->error = -ENOMEM;
-+ goto out;
-+ }
- skb_queue_tail(&tinc->ti_skb_list, clone);
-
- rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
-diff --git a/net/tipc/msg.c b/net/tipc/msg.c
-index 08b4cc7..b3a3931 100644
---- a/net/tipc/msg.c
-+++ b/net/tipc/msg.c
-@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
- {
- struct sk_buff *head = *headbuf;
- struct sk_buff *frag = *buf;
-- struct sk_buff *tail;
-+ struct sk_buff *tail = NULL;
- struct tipc_msg *msg;
- u32 fragid;
- int delta;
-@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
- goto err;
- head = *headbuf = frag;
-- skb_frag_list_init(head);
-- TIPC_SKB_CB(head)->tail = NULL;
- *buf = NULL;
-+ TIPC_SKB_CB(head)->tail = NULL;
-+ if (skb_is_nonlinear(head)) {
-+ skb_walk_frags(head, tail) {
-+ TIPC_SKB_CB(head)->tail = tail;
-+ }
-+ } else {
-+ skb_frag_list_init(head);
-+ }
- return 0;
- }
-
-diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
-index 66deebc..f8dfee5 100644
---- a/net/tipc/udp_media.c
-+++ b/net/tipc/udp_media.c
-@@ -48,6 +48,7 @@
- #include <linux/tipc_netlink.h>
- #include "core.h"
- #include "bearer.h"
-+#include "msg.h"
-
- /* IANA assigned UDP port */
- #define UDP_PORT_DEFAULT 6118
-@@ -216,6 +217,10 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
- {
- struct udp_bearer *ub;
- struct tipc_bearer *b;
-+ int usr = msg_user(buf_msg(skb));
-+
-+ if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR))
-+ skb_linearize(skb);
-
- ub = rcu_dereference_sk_user_data(sk);
- if (!ub) {
-diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
-index 76b4157..d059cf3 100644
---- a/net/wireless/nl80211.c
-+++ b/net/wireless/nl80211.c
-@@ -3408,12 +3408,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
- wdev->iftype))
- return -EINVAL;
-
-- if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
-- params.acl = parse_acl_data(&rdev->wiphy, info);
-- if (IS_ERR(params.acl))
-- return PTR_ERR(params.acl);
-- }
--
- if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
- params.smps_mode =
- nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
-@@ -3437,6 +3431,12 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
- params.smps_mode = NL80211_SMPS_OFF;
- }
-
-+ if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
-+ params.acl = parse_acl_data(&rdev->wiphy, info);
-+ if (IS_ERR(params.acl))
-+ return PTR_ERR(params.acl);
-+ }
-+
- wdev_lock(wdev);
- err = rdev_start_ap(rdev, dev, &params);
- if (!err) {
-diff --git a/sound/usb/midi.c b/sound/usb/midi.c
-index 417ebb1..bec63e0 100644
---- a/sound/usb/midi.c
-+++ b/sound/usb/midi.c
-@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
- u8 running_status_length;
- } ports[0x10];
- u8 seen_f5;
-+ bool in_sysex;
-+ u8 last_cin;
- u8 error_resubmit;
- int current_port;
- };
-@@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
- }
-
- /*
-+ * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
-+ * but the previously seen CIN, but still with three data bytes.
-+ */
-+static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
-+ uint8_t *buffer, int buffer_length)
-+{
-+ unsigned int i, cin, length;
-+
-+ for (i = 0; i + 3 < buffer_length; i += 4) {
-+ if (buffer[i] == 0 && i > 0)
-+ break;
-+ cin = buffer[i] & 0x0f;
-+ if (ep->in_sysex &&
-+ cin == ep->last_cin &&
-+ (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
-+ cin = 0x4;
-+#if 0
-+ if (buffer[i + 1] == 0x90) {
-+ /*
-+ * Either a corrupted running status or a real note-on
-+ * message; impossible to detect reliably.
-+ */
-+ }
-+#endif
-+ length = snd_usbmidi_cin_length[cin];
-+ snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
-+ ep->in_sysex = cin == 0x4;
-+ if (!ep->in_sysex)
-+ ep->last_cin = cin;
-+ }
-+}
-+
-+/*
- * CME protocol: like the standard protocol, but SysEx commands are sent as a
- * single USB packet preceded by a 0x0F byte.
- */
-@@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
- .output_packet = snd_usbmidi_output_standard_packet,
- };
-
-+static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
-+ .input = ch345_broken_sysex_input,
-+ .output = snd_usbmidi_standard_output,
-+ .output_packet = snd_usbmidi_output_standard_packet,
-+};
-+
- /*
- * AKAI MPD16 protocol:
- *
-@@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
- * Various chips declare a packet size larger than 4 bytes, but
- * do not actually work with larger packets:
- */
-+ case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
- case USB_ID(0x0a92, 0x1020): /* ESI M4U */
- case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
- case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
-@@ -2375,6 +2417,10 @@ int snd_usbmidi_create(struct snd_card *card,
-
- err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
- break;
-+ case QUIRK_MIDI_CH345:
-+ umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
-+ err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
-+ break;
- default:
- dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
- quirk->type);
-diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
-index e475665..ecc2a4e 100644
---- a/sound/usb/quirks-table.h
-+++ b/sound/usb/quirks-table.h
-@@ -2820,6 +2820,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
- .idProduct = 0x1020,
- },
-
-+/* QinHeng devices */
-+{
-+ USB_DEVICE(0x1a86, 0x752d),
-+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
-+ .vendor_name = "QinHeng",
-+ .product_name = "CH345",
-+ .ifnum = 1,
-+ .type = QUIRK_MIDI_CH345
-+ }
-+},
-+
- /* KeithMcMillen Stringport */
- {
- USB_DEVICE(0x1f38, 0x0001),
-diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
-index 00ebc0c..eef9b8e 100644
---- a/sound/usb/quirks.c
-+++ b/sound/usb/quirks.c
-@@ -535,6 +535,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
- [QUIRK_MIDI_CME] = create_any_midi_quirk,
- [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
- [QUIRK_MIDI_FTDI] = create_any_midi_quirk,
-+ [QUIRK_MIDI_CH345] = create_any_midi_quirk,
- [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
- [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
- [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
-@@ -1271,6 +1272,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
- case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
- case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
- case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
-+ case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
- if (fp->altsetting == 3)
- return SNDRV_PCM_FMTBIT_DSD_U32_BE;
- break;
-diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
-index 91d0380..991aa84 100644
---- a/sound/usb/usbaudio.h
-+++ b/sound/usb/usbaudio.h
-@@ -94,6 +94,7 @@ enum quirk_type {
- QUIRK_MIDI_AKAI,
- QUIRK_MIDI_US122L,
- QUIRK_MIDI_FTDI,
-+ QUIRK_MIDI_CH345,
- QUIRK_AUDIO_STANDARD_INTERFACE,
- QUIRK_AUDIO_FIXED_ENDPOINT,
- QUIRK_AUDIO_EDIROL_UAXX,
diff --git a/4.2.7/0000_README b/4.3.3/0000_README
index 8a09897..651d7dc 100644
--- a/4.2.7/0000_README
+++ b/4.3.3/0000_README
@@ -2,11 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1006_linux-4.2.7.patch
+Patch: 1002_linux-4.3.3.patch
From: http://www.kernel.org
-Desc: Linux 4.2.7
+Desc: Linux 4.3.3
-Patch: 4420_grsecurity-3.1-4.2.7-201512092320.patch
+Patch: 4420_grsecurity-3.1-4.3.3-201512151908.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.3.3/1002_linux-4.3.3.patch b/4.3.3/1002_linux-4.3.3.patch
new file mode 100644
index 0000000..d8cd741
--- /dev/null
+++ b/4.3.3/1002_linux-4.3.3.patch
@@ -0,0 +1,4424 @@
+diff --git a/Makefile b/Makefile
+index 1a4953b..2070d16 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 3
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index c4e9c37..0e5f4fc 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -91,7 +91,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
+
+ seg_size += bv.bv_len;
+ bvprv = bv;
+- bvprvp = &bv;
++ bvprvp = &bvprv;
+ sectors += bv.bv_len >> 9;
+ continue;
+ }
+@@ -101,7 +101,7 @@ new_segment:
+
+ nsegs++;
+ bvprv = bv;
+- bvprvp = &bv;
++ bvprvp = &bvprv;
+ seg_size = bv.bv_len;
+ sectors += bv.bv_len >> 9;
+ }
+diff --git a/certs/.gitignore b/certs/.gitignore
+new file mode 100644
+index 0000000..f51aea4
+--- /dev/null
++++ b/certs/.gitignore
+@@ -0,0 +1,4 @@
++#
++# Generated files
++#
++x509_certificate_list
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 128e7df..8630a77 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3444,6 +3444,7 @@ static void rbd_queue_workfn(struct work_struct *work)
+ goto err_rq;
+ }
+ img_request->rq = rq;
++ snapc = NULL; /* img_request consumes a ref */
+
+ if (op_type == OBJ_OP_DISCARD)
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index f51d376..c2f5117 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -3675,6 +3675,11 @@ static int pci_probe(struct pci_dev *dev,
+
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+ ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
++ /* JMicron JMB38x often shows 0 at first read, just ignore it */
++ if (!ohci->it_context_support) {
++ ohci_notice(ohci, "overriding IsoXmitIntMask\n");
++ ohci->it_context_support = 0xf;
++ }
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ ohci->it_context_mask = ohci->it_context_support;
+ ohci->n_it = hweight32(ohci->it_context_mask);
+diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
+index 1f88ccc..a01f0cc 100644
+--- a/drivers/media/pci/cobalt/Kconfig
++++ b/drivers/media/pci/cobalt/Kconfig
+@@ -1,6 +1,6 @@
+ config VIDEO_COBALT
+ tristate "Cisco Cobalt support"
+- depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
++ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
+ depends on GPIOLIB || COMPILE_TEST
+ depends on SND
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index a937772..7f709cb 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -1583,8 +1583,14 @@ err_disable_device:
+ static void nicvf_remove(struct pci_dev *pdev)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+- struct nicvf *nic = netdev_priv(netdev);
+- struct net_device *pnetdev = nic->pnicvf->netdev;
++ struct nicvf *nic;
++ struct net_device *pnetdev;
++
++ if (!netdev)
++ return;
++
++ nic = netdev_priv(netdev);
++ pnetdev = nic->pnicvf->netdev;
+
+ /* Check if this Qset is assigned to different VF.
+ * If yes, clean primary and all secondary Qsets.
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 731423c..8bead97 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -4934,26 +4934,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
+ struct res_counter *counter;
+ struct res_counter *tmp;
+ int err;
+- int index;
++ int *counters_arr = NULL;
++ int i, j;
+
+ err = move_all_busy(dev, slave, RES_COUNTER);
+ if (err)
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
+
+- spin_lock_irq(mlx4_tlock(dev));
+- list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+- if (counter->com.owner == slave) {
+- index = counter->com.res_id;
+- rb_erase(&counter->com.node,
+- &tracker->res_tree[RES_COUNTER]);
+- list_del(&counter->com.list);
+- kfree(counter);
+- __mlx4_counter_free(dev, index);
++ counters_arr = kmalloc_array(dev->caps.max_counters,
++ sizeof(*counters_arr), GFP_KERNEL);
++ if (!counters_arr)
++ return;
++
++ do {
++ i = 0;
++ j = 0;
++ spin_lock_irq(mlx4_tlock(dev));
++ list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
++ if (counter->com.owner == slave) {
++ counters_arr[i++] = counter->com.res_id;
++ rb_erase(&counter->com.node,
++ &tracker->res_tree[RES_COUNTER]);
++ list_del(&counter->com.list);
++ kfree(counter);
++ }
++ }
++ spin_unlock_irq(mlx4_tlock(dev));
++
++ while (j < i) {
++ __mlx4_counter_free(dev, counters_arr[j++]);
+ mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
+ }
+- }
+- spin_unlock_irq(mlx4_tlock(dev));
++ } while (i);
++
++ kfree(counters_arr);
+ }
+
+ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 59874d6..443632d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+ return err;
+ }
+
++static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
++ u32 tirn)
++{
++ void *in;
++ int inlen;
++ int err;
++
++ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
++ in = mlx5_vzalloc(inlen);
++ if (!in)
++ return -ENOMEM;
++
++ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
++
++ err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
++
++ kvfree(in);
++
++ return err;
++}
++
++static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
++{
++ int err;
++ int i;
++
++ for (i = 0; i < MLX5E_NUM_TT; i++) {
++ err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
++ priv->tirn[i]);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
+ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -1367,13 +1403,20 @@ int mlx5e_open_locked(struct net_device *netdev)
+
+ err = mlx5e_set_dev_port_mtu(netdev);
+ if (err)
+- return err;
++ goto err_clear_state_opened_flag;
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+- return err;
++ goto err_clear_state_opened_flag;
++ }
++
++ err = mlx5e_refresh_tirs_self_loopback_enable(priv);
++ if (err) {
++ netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
++ __func__, err);
++ goto err_close_channels;
+ }
+
+ mlx5e_update_carrier(priv);
+@@ -1382,6 +1425,12 @@ int mlx5e_open_locked(struct net_device *netdev)
+ schedule_delayed_work(&priv->update_stats_work, 0);
+
+ return 0;
++
++err_close_channels:
++ mlx5e_close_channels(priv);
++err_clear_state_opened_flag:
++ clear_bit(MLX5E_STATE_OPENED, &priv->state);
++ return err;
+ }
+
+ static int mlx5e_open(struct net_device *netdev)
+@@ -1899,6 +1948,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
++ if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
++ mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index b4f2123..79ef799 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -7429,15 +7429,15 @@ process_pkt:
+
+ rtl8169_rx_vlan_tag(desc, skb);
+
++ if (skb->pkt_type == PACKET_MULTICAST)
++ dev->stats.multicast++;
++
+ napi_gro_receive(&tp->napi, skb);
+
+ u64_stats_update_begin(&tp->rx_stats.syncp);
+ tp->rx_stats.packets++;
+ tp->rx_stats.bytes += pkt_size;
+ u64_stats_update_end(&tp->rx_stats.syncp);
+-
+- if (skb->pkt_type == PACKET_MULTICAST)
+- dev->stats.multicast++;
+ }
+ release_descriptor:
+ desc->opts2 = 0;
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 9c71295..85e64044 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -675,7 +675,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ { PHY_ID_BCM5461, 0xfffffff0 },
+ { PHY_ID_BCM54616S, 0xfffffff0 },
+ { PHY_ID_BCM5464, 0xfffffff0 },
+- { PHY_ID_BCM5482, 0xfffffff0 },
++ { PHY_ID_BCM5481, 0xfffffff0 },
+ { PHY_ID_BCM5482, 0xfffffff0 },
+ { PHY_ID_BCM50610, 0xfffffff0 },
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2a7c1be..66e0853 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -775,6 +775,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
++ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
+ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 488c6f5..c9e309c 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -581,7 +581,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
+ {
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct net_vrf_dev *vrf_ptr;
+- int err;
+
+ if (!data || !data[IFLA_VRF_TABLE])
+ return -EINVAL;
+@@ -590,26 +589,16 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
+
+ dev->priv_flags |= IFF_VRF_MASTER;
+
+- err = -ENOMEM;
+ vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
+ if (!vrf_ptr)
+- goto out_fail;
++ return -ENOMEM;
+
+ vrf_ptr->ifindex = dev->ifindex;
+ vrf_ptr->tb_id = vrf->tb_id;
+
+- err = register_netdevice(dev);
+- if (err < 0)
+- goto out_fail;
+-
+ rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
+
+- return 0;
+-
+-out_fail:
+- kfree(vrf_ptr);
+- free_netdev(dev);
+- return err;
++ return register_netdev(dev);
+ }
+
+ static size_t vrf_nl_getsize(const struct net_device *dev)
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 938efe3..94eea1f 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3398,7 +3398,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
+ int btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+- u64 owner, u64 offset, int no_quota);
++ u64 owner, u64 offset);
+
+ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
+ int delalloc);
+@@ -3411,7 +3411,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
+ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, u64 parent,
+- u64 root_objectid, u64 owner, u64 offset, int no_quota);
++ u64 root_objectid, u64 owner, u64 offset);
+
+ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index ac3e81d..7832031 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -197,6 +197,119 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
+ trans->delayed_ref_updates--;
+ }
+
++static bool merge_ref(struct btrfs_trans_handle *trans,
++ struct btrfs_delayed_ref_root *delayed_refs,
++ struct btrfs_delayed_ref_head *head,
++ struct btrfs_delayed_ref_node *ref,
++ u64 seq)
++{
++ struct btrfs_delayed_ref_node *next;
++ bool done = false;
++
++ next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
++ list);
++ while (!done && &next->list != &head->ref_list) {
++ int mod;
++ struct btrfs_delayed_ref_node *next2;
++
++ next2 = list_next_entry(next, list);
++
++ if (next == ref)
++ goto next;
++
++ if (seq && next->seq >= seq)
++ goto next;
++
++ if (next->type != ref->type)
++ goto next;
++
++ if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
++ ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
++ comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
++ btrfs_delayed_node_to_tree_ref(next),
++ ref->type))
++ goto next;
++ if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
++ ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
++ comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
++ btrfs_delayed_node_to_data_ref(next)))
++ goto next;
++
++ if (ref->action == next->action) {
++ mod = next->ref_mod;
++ } else {
++ if (ref->ref_mod < next->ref_mod) {
++ swap(ref, next);
++ done = true;
++ }
++ mod = -next->ref_mod;
++ }
++
++ drop_delayed_ref(trans, delayed_refs, head, next);
++ ref->ref_mod += mod;
++ if (ref->ref_mod == 0) {
++ drop_delayed_ref(trans, delayed_refs, head, ref);
++ done = true;
++ } else {
++ /*
++ * Can't have multiples of the same ref on a tree block.
++ */
++ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
++ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
++ }
++next:
++ next = next2;
++ }
++
++ return done;
++}
++
++void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
++ struct btrfs_fs_info *fs_info,
++ struct btrfs_delayed_ref_root *delayed_refs,
++ struct btrfs_delayed_ref_head *head)
++{
++ struct btrfs_delayed_ref_node *ref;
++ u64 seq = 0;
++
++ assert_spin_locked(&head->lock);
++
++ if (list_empty(&head->ref_list))
++ return;
++
++ /* We don't have too many refs to merge for data. */
++ if (head->is_data)
++ return;
++
++ spin_lock(&fs_info->tree_mod_seq_lock);
++ if (!list_empty(&fs_info->tree_mod_seq_list)) {
++ struct seq_list *elem;
++
++ elem = list_first_entry(&fs_info->tree_mod_seq_list,
++ struct seq_list, list);
++ seq = elem->seq;
++ }
++ spin_unlock(&fs_info->tree_mod_seq_lock);
++
++ ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
++ list);
++ while (&ref->list != &head->ref_list) {
++ if (seq && ref->seq >= seq)
++ goto next;
++
++ if (merge_ref(trans, delayed_refs, head, ref, seq)) {
++ if (list_empty(&head->ref_list))
++ break;
++ ref = list_first_entry(&head->ref_list,
++ struct btrfs_delayed_ref_node,
++ list);
++ continue;
++ }
++next:
++ ref = list_next_entry(ref, list);
++ }
++}
++
+ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 seq)
+@@ -292,8 +405,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+ exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
+ list);
+ /* No need to compare bytenr nor is_head */
+- if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
+- exist->seq != ref->seq)
++ if (exist->type != ref->type || exist->seq != ref->seq)
+ goto add_tail;
+
+ if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
+@@ -524,7 +636,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_delayed_ref_node *ref, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 ref_root, int level,
+- int action, int no_quota)
++ int action)
+ {
+ struct btrfs_delayed_tree_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+@@ -546,7 +658,6 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ ref->action = action;
+ ref->is_head = 0;
+ ref->in_tree = 1;
+- ref->no_quota = no_quota;
+ ref->seq = seq;
+
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+@@ -579,7 +690,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_delayed_ref_node *ref, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+- u64 offset, int action, int no_quota)
++ u64 offset, int action)
+ {
+ struct btrfs_delayed_data_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+@@ -602,7 +713,6 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ ref->action = action;
+ ref->is_head = 0;
+ ref->in_tree = 1;
+- ref->no_quota = no_quota;
+ ref->seq = seq;
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+@@ -633,17 +743,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes, u64 parent,
+ u64 ref_root, int level, int action,
+- struct btrfs_delayed_extent_op *extent_op,
+- int no_quota)
++ struct btrfs_delayed_extent_op *extent_op)
+ {
+ struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+
+- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
+- no_quota = 0;
+-
+ BUG_ON(extent_op && extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+ if (!ref)
+@@ -672,8 +778,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ bytenr, num_bytes, action, 0);
+
+ add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, level, action,
+- no_quota);
++ num_bytes, parent, ref_root, level, action);
+ spin_unlock(&delayed_refs->lock);
+
+ return 0;
+@@ -694,17 +799,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 num_bytes,
+ u64 parent, u64 ref_root,
+ u64 owner, u64 offset, int action,
+- struct btrfs_delayed_extent_op *extent_op,
+- int no_quota)
++ struct btrfs_delayed_extent_op *extent_op)
+ {
+ struct btrfs_delayed_data_ref *ref;
+ struct btrfs_delayed_ref_head *head_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+
+- if (!is_fstree(ref_root) || !fs_info->quota_enabled)
+- no_quota = 0;
+-
+ BUG_ON(extent_op && !extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+ if (!ref)
+@@ -740,7 +841,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+
+ add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+ num_bytes, parent, ref_root, owner, offset,
+- action, no_quota);
++ action);
+ spin_unlock(&delayed_refs->lock);
+
+ return 0;
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 13fb5e6..930887a 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -68,7 +68,6 @@ struct btrfs_delayed_ref_node {
+
+ unsigned int action:8;
+ unsigned int type:8;
+- unsigned int no_quota:1;
+ /* is this node still in the rbtree? */
+ unsigned int is_head:1;
+ unsigned int in_tree:1;
+@@ -233,15 +232,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes, u64 parent,
+ u64 ref_root, int level, int action,
+- struct btrfs_delayed_extent_op *extent_op,
+- int no_quota);
++ struct btrfs_delayed_extent_op *extent_op);
+ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes,
+ u64 parent, u64 ref_root,
+ u64 owner, u64 offset, int action,
+- struct btrfs_delayed_extent_op *extent_op,
+- int no_quota);
++ struct btrfs_delayed_extent_op *extent_op);
+ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes,
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 601d7d4..cadacf6 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -95,8 +95,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 parent, u64 root_objectid,
+ u64 flags, struct btrfs_disk_key *key,
+- int level, struct btrfs_key *ins,
+- int no_quota);
++ int level, struct btrfs_key *ins);
+ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
+ struct btrfs_root *extent_root, u64 flags,
+ int force);
+@@ -2009,8 +2008,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, u64 parent,
+- u64 root_objectid, u64 owner, u64 offset,
+- int no_quota)
++ u64 root_objectid, u64 owner, u64 offset)
+ {
+ int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -2022,12 +2020,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ num_bytes,
+ parent, root_objectid, (int)owner,
+- BTRFS_ADD_DELAYED_REF, NULL, no_quota);
++ BTRFS_ADD_DELAYED_REF, NULL);
+ } else {
+ ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ num_bytes,
+ parent, root_objectid, owner, offset,
+- BTRFS_ADD_DELAYED_REF, NULL, no_quota);
++ BTRFS_ADD_DELAYED_REF, NULL);
+ }
+ return ret;
+ }
+@@ -2048,15 +2046,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ u64 num_bytes = node->num_bytes;
+ u64 refs;
+ int ret;
+- int no_quota = node->no_quota;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+- if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
+- no_quota = 1;
+-
+ path->reada = 1;
+ path->leave_spinning = 1;
+ /* this will setup the path even if it fails to insert the back ref */
+@@ -2291,8 +2285,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ parent, ref_root,
+ extent_op->flags_to_set,
+ &extent_op->key,
+- ref->level, &ins,
+- node->no_quota);
++ ref->level, &ins);
+ } else if (node->action == BTRFS_ADD_DELAYED_REF) {
+ ret = __btrfs_inc_extent_ref(trans, root, node,
+ parent, ref_root,
+@@ -2433,7 +2426,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+ }
+ }
+
++ /*
++ * We need to try and merge add/drops of the same ref since we
++ * can run into issues with relocate dropping the implicit ref
++ * and then it being added back again before the drop can
++ * finish. If we merged anything we need to re-loop so we can
++ * get a good ref.
++ * Or we can get node references of the same type that weren't
++ * merged when created due to bumps in the tree mod seq, and
++ * we need to merge them to prevent adding an inline extent
++ * backref before dropping it (triggering a BUG_ON at
++ * insert_inline_extent_backref()).
++ */
+ spin_lock(&locked_ref->lock);
++ btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
++ locked_ref);
+
+ /*
+ * locked_ref is the head node, so we have to go one
+@@ -3109,7 +3116,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ int level;
+ int ret = 0;
+ int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
+- u64, u64, u64, u64, u64, u64, int);
++ u64, u64, u64, u64, u64, u64);
+
+
+ if (btrfs_test_is_dummy_root(root))
+@@ -3150,15 +3157,14 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ key.offset -= btrfs_file_extent_offset(buf, fi);
+ ret = process_func(trans, root, bytenr, num_bytes,
+ parent, ref_root, key.objectid,
+- key.offset, 1);
++ key.offset);
+ if (ret)
+ goto fail;
+ } else {
+ bytenr = btrfs_node_blockptr(buf, i);
+ num_bytes = root->nodesize;
+ ret = process_func(trans, root, bytenr, num_bytes,
+- parent, ref_root, level - 1, 0,
+- 1);
++ parent, ref_root, level - 1, 0);
+ if (ret)
+ goto fail;
+ }
+@@ -6233,7 +6239,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ int extent_slot = 0;
+ int found_extent = 0;
+ int num_to_del = 1;
+- int no_quota = node->no_quota;
+ u32 item_size;
+ u64 refs;
+ u64 bytenr = node->bytenr;
+@@ -6242,9 +6247,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+ SKINNY_METADATA);
+
+- if (!info->quota_enabled || !is_fstree(root_objectid))
+- no_quota = 1;
+-
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+@@ -6570,7 +6572,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ buf->start, buf->len,
+ parent, root->root_key.objectid,
+ btrfs_header_level(buf),
+- BTRFS_DROP_DELAYED_REF, NULL, 0);
++ BTRFS_DROP_DELAYED_REF, NULL);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+
+@@ -6618,7 +6620,7 @@ out:
+ /* Can return -ENOMEM */
+ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+- u64 owner, u64 offset, int no_quota)
++ u64 owner, u64 offset)
+ {
+ int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -6641,13 +6643,13 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+ num_bytes,
+ parent, root_objectid, (int)owner,
+- BTRFS_DROP_DELAYED_REF, NULL, no_quota);
++ BTRFS_DROP_DELAYED_REF, NULL);
+ } else {
+ ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ num_bytes,
+ parent, root_objectid, owner,
+ offset, BTRFS_DROP_DELAYED_REF,
+- NULL, no_quota);
++ NULL);
+ }
+ return ret;
+ }
+@@ -7429,8 +7431,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 parent, u64 root_objectid,
+ u64 flags, struct btrfs_disk_key *key,
+- int level, struct btrfs_key *ins,
+- int no_quota)
++ int level, struct btrfs_key *ins)
+ {
+ int ret;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+@@ -7520,7 +7521,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+ ins->offset, 0,
+ root_objectid, owner, offset,
+- BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
++ BTRFS_ADD_DELAYED_EXTENT, NULL);
+ return ret;
+ }
+
+@@ -7734,7 +7735,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ ins.objectid, ins.offset,
+ parent, root_objectid, level,
+ BTRFS_ADD_DELAYED_EXTENT,
+- extent_op, 0);
++ extent_op);
+ if (ret)
+ goto out_free_delayed;
+ }
+@@ -8282,7 +8283,7 @@ skip:
+ }
+ }
+ ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
+- root->root_key.objectid, level - 1, 0, 0);
++ root->root_key.objectid, level - 1, 0);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+ btrfs_tree_unlock(next);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 8c6f247..e27ea7a 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -756,8 +756,16 @@ next_slot:
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+- if (key.objectid > ino ||
+- key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
++
++ if (key.objectid > ino)
++ break;
++ if (WARN_ON_ONCE(key.objectid < ino) ||
++ key.type < BTRFS_EXTENT_DATA_KEY) {
++ ASSERT(del_nr == 0);
++ path->slots[0]++;
++ goto next_slot;
++ }
++ if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ break;
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+@@ -776,8 +784,8 @@ next_slot:
+ btrfs_file_extent_inline_len(leaf,
+ path->slots[0], fi);
+ } else {
+- WARN_ON(1);
+- extent_end = search_start;
++ /* can't happen */
++ BUG();
+ }
+
+ /*
+@@ -847,7 +855,7 @@ next_slot:
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ new_key.objectid,
+- start - extent_offset, 1);
++ start - extent_offset);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+ key.offset = start;
+@@ -925,7 +933,7 @@ delete_extent_item:
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ key.objectid, key.offset -
+- extent_offset, 0);
++ extent_offset);
+ BUG_ON(ret); /* -ENOMEM */
+ inode_sub_bytes(inode,
+ extent_end - key.offset);
+@@ -1204,7 +1212,7 @@ again:
+
+ ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
+ root->root_key.objectid,
+- ino, orig_offset, 1);
++ ino, orig_offset);
+ BUG_ON(ret); /* -ENOMEM */
+
+ if (split == start) {
+@@ -1231,7 +1239,7 @@ again:
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+- ino, orig_offset, 0);
++ ino, orig_offset);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+ other_start = 0;
+@@ -1248,7 +1256,7 @@ again:
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+- ino, orig_offset, 0);
++ ino, orig_offset);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+ if (del_nr == 0) {
+@@ -1868,8 +1876,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ struct btrfs_log_ctx ctx;
+ int ret = 0;
+ bool full_sync = 0;
+- const u64 len = end - start + 1;
++ u64 len;
+
++ /*
++ * The range length can be represented by u64, we have to do the typecasts
++ * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
++ */
++ len = (u64)end - (u64)start + 1;
+ trace_btrfs_sync_file(file, datasync);
+
+ /*
+@@ -2057,8 +2070,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ }
+ }
+ if (!full_sync) {
+- ret = btrfs_wait_ordered_range(inode, start,
+- end - start + 1);
++ ret = btrfs_wait_ordered_range(inode, start, len);
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ goto out;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 611b66d..396e3d5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1294,8 +1294,14 @@ next_slot:
+ num_bytes = 0;
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+- if (found_key.objectid > ino ||
+- found_key.type > BTRFS_EXTENT_DATA_KEY ||
++ if (found_key.objectid > ino)
++ break;
++ if (WARN_ON_ONCE(found_key.objectid < ino) ||
++ found_key.type < BTRFS_EXTENT_DATA_KEY) {
++ path->slots[0]++;
++ goto next_slot;
++ }
++ if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
+ found_key.offset > end)
+ break;
+
+@@ -2573,7 +2579,7 @@ again:
+ ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+ new->disk_len, 0,
+ backref->root_id, backref->inum,
+- new->file_pos, 0); /* start - extent_offset */
++ new->file_pos); /* start - extent_offset */
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_free_path;
+@@ -4217,6 +4223,47 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
+
+ }
+
++static int truncate_inline_extent(struct inode *inode,
++ struct btrfs_path *path,
++ struct btrfs_key *found_key,
++ const u64 item_end,
++ const u64 new_size)
++{
++ struct extent_buffer *leaf = path->nodes[0];
++ int slot = path->slots[0];
++ struct btrfs_file_extent_item *fi;
++ u32 size = (u32)(new_size - found_key->offset);
++ struct btrfs_root *root = BTRFS_I(inode)->root;
++
++ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
++
++ if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
++ loff_t offset = new_size;
++ loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
++
++ /*
++ * Zero out the remaining of the last page of our inline extent,
++ * instead of directly truncating our inline extent here - that
++ * would be much more complex (decompressing all the data, then
++ * compressing the truncated data, which might be bigger than
++ * the size of the inline extent, resize the extent, etc).
++ * We release the path because to get the page we might need to
++ * read the extent item from disk (data not in the page cache).
++ */
++ btrfs_release_path(path);
++ return btrfs_truncate_page(inode, offset, page_end - offset, 0);
++ }
++
++ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
++ size = btrfs_file_extent_calc_inline_size(size);
++ btrfs_truncate_item(root, path, size, 1);
++
++ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
++ inode_sub_bytes(inode, item_end + 1 - new_size);
++
++ return 0;
++}
++
+ /*
+ * this can truncate away extent items, csum items and directory items.
+ * It starts at a high offset and removes keys until it can't find
+@@ -4411,27 +4458,40 @@ search_again:
+ * special encodings
+ */
+ if (!del_item &&
+- btrfs_file_extent_compression(leaf, fi) == 0 &&
+ btrfs_file_extent_encryption(leaf, fi) == 0 &&
+ btrfs_file_extent_other_encoding(leaf, fi) == 0) {
+- u32 size = new_size - found_key.offset;
+-
+- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+- inode_sub_bytes(inode, item_end + 1 -
+- new_size);
+
+ /*
+- * update the ram bytes to properly reflect
+- * the new size of our item
++ * Need to release path in order to truncate a
++ * compressed extent. So delete any accumulated
++ * extent items so far.
+ */
+- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+- size =
+- btrfs_file_extent_calc_inline_size(size);
+- btrfs_truncate_item(root, path, size, 1);
++ if (btrfs_file_extent_compression(leaf, fi) !=
++ BTRFS_COMPRESS_NONE && pending_del_nr) {
++ err = btrfs_del_items(trans, root, path,
++ pending_del_slot,
++ pending_del_nr);
++ if (err) {
++ btrfs_abort_transaction(trans,
++ root,
++ err);
++ goto error;
++ }
++ pending_del_nr = 0;
++ }
++
++ err = truncate_inline_extent(inode, path,
++ &found_key,
++ item_end,
++ new_size);
++ if (err) {
++ btrfs_abort_transaction(trans,
++ root, err);
++ goto error;
++ }
+ } else if (test_bit(BTRFS_ROOT_REF_COWS,
+ &root->state)) {
+- inode_sub_bytes(inode, item_end + 1 -
+- found_key.offset);
++ inode_sub_bytes(inode, item_end + 1 - new_size);
+ }
+ }
+ delete:
+@@ -4461,7 +4521,7 @@ delete:
+ ret = btrfs_free_extent(trans, root, extent_start,
+ extent_num_bytes, 0,
+ btrfs_header_owner(leaf),
+- ino, extent_offset, 0);
++ ino, extent_offset);
+ BUG_ON(ret);
+ if (btrfs_should_throttle_delayed_refs(trans, root))
+ btrfs_async_run_delayed_refs(root,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8d20f3b..6548a36 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3203,41 +3203,6 @@ out:
+ return ret;
+ }
+
+-/* Helper to check and see if this root currently has a ref on the given disk
+- * bytenr. If it does then we need to update the quota for this root. This
+- * doesn't do anything if quotas aren't enabled.
+- */
+-static int check_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+- u64 disko)
+-{
+- struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
+- struct ulist *roots;
+- struct ulist_iterator uiter;
+- struct ulist_node *root_node = NULL;
+- int ret;
+-
+- if (!root->fs_info->quota_enabled)
+- return 1;
+-
+- btrfs_get_tree_mod_seq(root->fs_info, &tree_mod_seq_elem);
+- ret = btrfs_find_all_roots(trans, root->fs_info, disko,
+- tree_mod_seq_elem.seq, &roots);
+- if (ret < 0)
+- goto out;
+- ret = 0;
+- ULIST_ITER_INIT(&uiter);
+- while ((root_node = ulist_next(roots, &uiter))) {
+- if (root_node->val == root->objectid) {
+- ret = 1;
+- break;
+- }
+- }
+- ulist_free(roots);
+-out:
+- btrfs_put_tree_mod_seq(root->fs_info, &tree_mod_seq_elem);
+- return ret;
+-}
+-
+ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ u64 endoff,
+@@ -3328,6 +3293,150 @@ static void clone_update_extent_map(struct inode *inode,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+
++/*
++ * Make sure we do not end up inserting an inline extent into a file that has
++ * already other (non-inline) extents. If a file has an inline extent it can
++ * not have any other extents and the (single) inline extent must start at the
++ * file offset 0. Failing to respect these rules will lead to file corruption,
++ * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
++ *
++ * We can have extents that have been already written to disk or we can have
++ * dirty ranges still in delalloc, in which case the extent maps and items are
++ * created only when we run delalloc, and the delalloc ranges might fall outside
++ * the range we are currently locking in the inode's io tree. So we check the
++ * inode's i_size because of that (i_size updates are done while holding the
++ * i_mutex, which we are holding here).
++ * We also check to see if the inode has a size not greater than "datal" but has
++ * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
++ * protected against such concurrent fallocate calls by the i_mutex).
++ *
++ * If the file has no extents but a size greater than datal, do not allow the
++ * copy because we would need turn the inline extent into a non-inline one (even
++ * with NO_HOLES enabled). If we find our destination inode only has one inline
++ * extent, just overwrite it with the source inline extent if its size is less
++ * than the source extent's size, or we could copy the source inline extent's
++ * data into the destination inode's inline extent if the later is greater then
++ * the former.
++ */
++static int clone_copy_inline_extent(struct inode *src,
++ struct inode *dst,
++ struct btrfs_trans_handle *trans,
++ struct btrfs_path *path,
++ struct btrfs_key *new_key,
++ const u64 drop_start,
++ const u64 datal,
++ const u64 skip,
++ const u64 size,
++ char *inline_data)
++{
++ struct btrfs_root *root = BTRFS_I(dst)->root;
++ const u64 aligned_end = ALIGN(new_key->offset + datal,
++ root->sectorsize);
++ int ret;
++ struct btrfs_key key;
++
++ if (new_key->offset > 0)
++ return -EOPNOTSUPP;
++
++ key.objectid = btrfs_ino(dst);
++ key.type = BTRFS_EXTENT_DATA_KEY;
++ key.offset = 0;
++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++ if (ret < 0) {
++ return ret;
++ } else if (ret > 0) {
++ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
++ ret = btrfs_next_leaf(root, path);
++ if (ret < 0)
++ return ret;
++ else if (ret > 0)
++ goto copy_inline_extent;
++ }
++ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
++ if (key.objectid == btrfs_ino(dst) &&
++ key.type == BTRFS_EXTENT_DATA_KEY) {
++ ASSERT(key.offset > 0);
++ return -EOPNOTSUPP;
++ }
++ } else if (i_size_read(dst) <= datal) {
++ struct btrfs_file_extent_item *ei;
++ u64 ext_len;
++
++ /*
++ * If the file size is <= datal, make sure there are no other
++ * extents following (can happen do to an fallocate call with
++ * the flag FALLOC_FL_KEEP_SIZE).
++ */
++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
++ struct btrfs_file_extent_item);
++ /*
++ * If it's an inline extent, it can not have other extents
++ * following it.
++ */
++ if (btrfs_file_extent_type(path->nodes[0], ei) ==
++ BTRFS_FILE_EXTENT_INLINE)
++ goto copy_inline_extent;
++
++ ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
++ if (ext_len > aligned_end)
++ return -EOPNOTSUPP;
++
++ ret = btrfs_next_item(root, path);
++ if (ret < 0) {
++ return ret;
++ } else if (ret == 0) {
++ btrfs_item_key_to_cpu(path->nodes[0], &key,
++ path->slots[0]);
++ if (key.objectid == btrfs_ino(dst) &&
++ key.type == BTRFS_EXTENT_DATA_KEY)
++ return -EOPNOTSUPP;
++ }
++ }
++
++copy_inline_extent:
++ /*
++ * We have no extent items, or we have an extent at offset 0 which may
++ * or may not be inlined. All these cases are dealt the same way.
++ */
++ if (i_size_read(dst) > datal) {
++ /*
++ * If the destination inode has an inline extent...
++ * This would require copying the data from the source inline
++ * extent into the beginning of the destination's inline extent.
++ * But this is really complex, both extents can be compressed
++ * or just one of them, which would require decompressing and
++ * re-compressing data (which could increase the new compressed
++ * size, not allowing the compressed data to fit anymore in an
++ * inline extent).
++ * So just don't support this case for now (it should be rare,
++ * we are not really saving space when cloning inline extents).
++ */
++ return -EOPNOTSUPP;
++ }
++
++ btrfs_release_path(path);
++ ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
++ if (ret)
++ return ret;
++ ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
++ if (ret)
++ return ret;
++
++ if (skip) {
++ const u32 start = btrfs_file_extent_calc_inline_size(0);
++
++ memmove(inline_data + start, inline_data + start + skip, datal);
++ }
++
++ write_extent_buffer(path->nodes[0], inline_data,
++ btrfs_item_ptr_offset(path->nodes[0],
++ path->slots[0]),
++ size);
++ inode_add_bytes(dst, datal);
++
++ return 0;
++}
++
+ /**
+ * btrfs_clone() - clone a range from inode file to another
+ *
+@@ -3352,9 +3461,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
+ u32 nritems;
+ int slot;
+ int ret;
+- int no_quota;
+ const u64 len = olen_aligned;
+- u64 last_disko = 0;
+ u64 last_dest_end = destoff;
+
+ ret = -ENOMEM;
+@@ -3400,7 +3507,6 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
+
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ process_slot:
+- no_quota = 1;
+ if (path->slots[0] >= nritems) {
+ ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
+ if (ret < 0)
+@@ -3552,35 +3658,13 @@ process_slot:
+ btrfs_set_file_extent_num_bytes(leaf, extent,
+ datal);
+
+- /*
+- * We need to look up the roots that point at
+- * this bytenr and see if the new root does. If
+- * it does not we need to make sure we update
+- * quotas appropriately.
+- */
+- if (disko && root != BTRFS_I(src)->root &&
+- disko != last_disko) {
+- no_quota = check_ref(trans, root,
+- disko);
+- if (no_quota < 0) {
+- btrfs_abort_transaction(trans,
+- root,
+- ret);
+- btrfs_end_transaction(trans,
+- root);
+- ret = no_quota;
+- goto out;
+- }
+- }
+-
+ if (disko) {
+ inode_add_bytes(inode, datal);
+ ret = btrfs_inc_extent_ref(trans, root,
+ disko, diskl, 0,
+ root->root_key.objectid,
+ btrfs_ino(inode),
+- new_key.offset - datao,
+- no_quota);
++ new_key.offset - datao);
+ if (ret) {
+ btrfs_abort_transaction(trans,
+ root,
+@@ -3594,21 +3678,6 @@ process_slot:
+ } else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ u64 skip = 0;
+ u64 trim = 0;
+- u64 aligned_end = 0;
+-
+- /*
+- * Don't copy an inline extent into an offset
+- * greater than zero. Having an inline extent
+- * at such an offset results in chaos as btrfs
+- * isn't prepared for such cases. Just skip
+- * this case for the same reasons as commented
+- * at btrfs_ioctl_clone().
+- */
+- if (last_dest_end > 0) {
+- ret = -EOPNOTSUPP;
+- btrfs_end_transaction(trans, root);
+- goto out;
+- }
+
+ if (off > key.offset) {
+ skip = off - key.offset;
+@@ -3626,42 +3695,22 @@ process_slot:
+ size -= skip + trim;
+ datal -= skip + trim;
+
+- aligned_end = ALIGN(new_key.offset + datal,
+- root->sectorsize);
+- ret = btrfs_drop_extents(trans, root, inode,
+- drop_start,
+- aligned_end,
+- 1);
++ ret = clone_copy_inline_extent(src, inode,
++ trans, path,
++ &new_key,
++ drop_start,
++ datal,
++ skip, size, buf);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ btrfs_abort_transaction(trans,
+- root, ret);
+- btrfs_end_transaction(trans, root);
+- goto out;
+- }
+-
+- ret = btrfs_insert_empty_item(trans, root, path,
+- &new_key, size);
+- if (ret) {
+- btrfs_abort_transaction(trans, root,
+- ret);
++ root,
++ ret);
+ btrfs_end_transaction(trans, root);
+ goto out;
+ }
+-
+- if (skip) {
+- u32 start =
+- btrfs_file_extent_calc_inline_size(0);
+- memmove(buf+start, buf+start+skip,
+- datal);
+- }
+-
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+- write_extent_buffer(leaf, buf,
+- btrfs_item_ptr_offset(leaf, slot),
+- size);
+- inode_add_bytes(inode, datal);
+ }
+
+ /* If we have an implicit hole (NO_HOLES feature). */
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 303babe..ab507e3 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1716,7 +1716,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
+ num_bytes, parent,
+ btrfs_header_owner(leaf),
+- key.objectid, key.offset, 1);
++ key.objectid, key.offset);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ break;
+@@ -1724,7 +1724,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ parent, btrfs_header_owner(leaf),
+- key.objectid, key.offset, 1);
++ key.objectid, key.offset);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ break;
+@@ -1900,23 +1900,21 @@ again:
+
+ ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
+ path->nodes[level]->start,
+- src->root_key.objectid, level - 1, 0,
+- 1);
++ src->root_key.objectid, level - 1, 0);
+ BUG_ON(ret);
+ ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
+ 0, dest->root_key.objectid, level - 1,
+- 0, 1);
++ 0);
+ BUG_ON(ret);
+
+ ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
+ path->nodes[level]->start,
+- src->root_key.objectid, level - 1, 0,
+- 1);
++ src->root_key.objectid, level - 1, 0);
+ BUG_ON(ret);
+
+ ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
+ 0, dest->root_key.objectid, level - 1,
+- 0, 1);
++ 0);
+ BUG_ON(ret);
+
+ btrfs_unlock_up_safe(path, 0);
+@@ -2745,7 +2743,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ node->eb->start, blocksize,
+ upper->eb->start,
+ btrfs_header_owner(upper->eb),
+- node->level, 0, 1);
++ node->level, 0);
+ BUG_ON(ret);
+
+ ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index a739b82..23bb2e4 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -2353,8 +2353,14 @@ static int send_subvol_begin(struct send_ctx *sctx)
+ }
+
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
+- TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
+- sctx->send_root->root_item.uuid);
++
++ if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
++ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
++ sctx->send_root->root_item.received_uuid);
++ else
++ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
++ sctx->send_root->root_item.uuid);
++
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
+ le64_to_cpu(sctx->send_root->root_item.ctransid));
+ if (parent_root) {
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 1bbaace..6f8af2d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -691,7 +691,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ ret = btrfs_inc_extent_ref(trans, root,
+ ins.objectid, ins.offset,
+ 0, root->root_key.objectid,
+- key->objectid, offset, 0);
++ key->objectid, offset);
+ if (ret)
+ goto out;
+ } else {
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 6f518c9..1fcd7b6 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -313,8 +313,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ /* check to make sure this item is what we want */
+ if (found_key.objectid != key.objectid)
+ break;
+- if (found_key.type != BTRFS_XATTR_ITEM_KEY)
++ if (found_key.type > BTRFS_XATTR_ITEM_KEY)
+ break;
++ if (found_key.type < BTRFS_XATTR_ITEM_KEY)
++ goto next;
+
+ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ if (verify_dir_item(root, leaf, di))
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 51cb02d..fe2c982 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1935,7 +1935,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
+
+ len = sizeof(*head) +
+ pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
+- sizeof(struct timespec);
++ sizeof(struct ceph_timespec);
+
+ /* calculate (max) length for cap releases */
+ len += sizeof(struct ceph_mds_request_release) *
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index c711be8..9c8d233 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -271,8 +271,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+ dput(dentry);
+ dentry = ERR_PTR(-EEXIST);
+ }
+- if (IS_ERR(dentry))
++
++ if (IS_ERR(dentry)) {
+ mutex_unlock(&d_inode(parent)->i_mutex);
++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++ }
++
+ return dentry;
+ }
+
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index 4573155..2fab243 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -411,7 +411,13 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+ ext4_lblk_t lblk = ex->ee_block;
+ ext4_fsblk_t pblk = ext4_ext_pblock(ex);
+ unsigned int len = ext4_ext_get_actual_len(ex);
+- int err = 0;
++ int ret, err = 0;
++
++#if 0
++ ext4_msg(inode->i_sb, KERN_CRIT,
++ "ext4_encrypted_zeroout ino %lu lblk %u len %u",
++ (unsigned long) inode->i_ino, lblk, len);
++#endif
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+
+@@ -437,17 +443,26 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+- bio->bi_iter.bi_sector = pblk;
+- err = bio_add_page(bio, ciphertext_page,
++ bio->bi_iter.bi_sector =
++ pblk << (inode->i_sb->s_blocksize_bits - 9);
++ ret = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+- if (err) {
++ if (ret != inode->i_sb->s_blocksize) {
++ /* should never happen! */
++ ext4_msg(inode->i_sb, KERN_ERR,
++ "bio_add_page failed: %d", ret);
++ WARN_ON(1);
+ bio_put(bio);
++ err = -EIO;
+ goto errout;
+ }
+ err = submit_bio_wait(WRITE, bio);
++ if ((err == 0) && bio->bi_error)
++ err = -EIO;
+ bio_put(bio);
+ if (err)
+ goto errout;
++ lblk++; pblk++;
+ }
+ err = 0;
+ errout:
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index d418431..e770c1ee 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+ return 0;
+ }
+
++ err = handle->h_err;
+ if (!handle->h_transaction) {
+- err = jbd2_journal_stop(handle);
+- return handle->h_err ? handle->h_err : err;
++ rc = jbd2_journal_stop(handle);
++ return err ? err : rc;
+ }
+
+ sb = handle->h_transaction->t_journal->j_private;
+- err = handle->h_err;
+ rc = jbd2_journal_stop(handle);
+
+ if (!err)
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2553aa8..7f486e3 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3558,6 +3558,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ max_zeroout = sbi->s_extent_max_zeroout_kb >>
+ (inode->i_sb->s_blocksize_bits - 10);
+
++ if (ext4_encrypted_inode(inode))
++ max_zeroout = 0;
++
+ /* If extent is less than s_max_zeroout_kb, zeroout directly */
+ if (max_zeroout && (ee_len <= max_zeroout)) {
+ err = ext4_ext_zeroout(inode, ex);
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 84ba4d2..17fbe38 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -425,6 +425,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ struct buffer_head *bh, *head;
+ int ret = 0;
+ int nr_submitted = 0;
++ int nr_to_submit = 0;
+
+ blocksize = 1 << inode->i_blkbits;
+
+@@ -477,11 +478,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+ }
+ set_buffer_async_write(bh);
++ nr_to_submit++;
+ } while ((bh = bh->b_this_page) != head);
+
+ bh = head = page_buffers(page);
+
+- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
++ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
++ nr_to_submit) {
+ data_page = ext4_encrypt(inode, page);
+ if (IS_ERR(data_page)) {
+ ret = PTR_ERR(data_page);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a63c7b0..df84bd2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -394,9 +394,13 @@ static void ext4_handle_error(struct super_block *sb)
+ smp_wmb();
+ sb->s_flags |= MS_RDONLY;
+ }
+- if (test_opt(sb, ERRORS_PANIC))
++ if (test_opt(sb, ERRORS_PANIC)) {
++ if (EXT4_SB(sb)->s_journal &&
++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++ return;
+ panic("EXT4-fs (device %s): panic forced after error\n",
+ sb->s_id);
++ }
+ }
+
+ #define ext4_error_ratelimit(sb) \
+@@ -585,8 +589,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
+ jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+ save_error_info(sb, function, line);
+ }
+- if (test_opt(sb, ERRORS_PANIC))
++ if (test_opt(sb, ERRORS_PANIC)) {
++ if (EXT4_SB(sb)->s_journal &&
++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++ return;
+ panic("EXT4-fs panic from previous error\n");
++ }
+ }
+
+ void __ext4_msg(struct super_block *sb,
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 8270fe9..37023d0 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2071,8 +2071,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+
+ __jbd2_journal_abort_hard(journal);
+
+- if (errno)
++ if (errno) {
+ jbd2_journal_update_sb_errno(journal);
++ write_lock(&journal->j_state_lock);
++ journal->j_flags |= JBD2_REC_ERR;
++ write_unlock(&journal->j_state_lock);
++ }
+ }
+
+ /**
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 326d9e1..ffdf9b9 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1824,7 +1824,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
+ nfsi->attr_gencount = fattr->gencount;
+ }
+- invalid &= ~NFS_INO_INVALID_ATTR;
++
++ /* Don't declare attrcache up to date if there were no attrs! */
++ if (fattr->valid != 0)
++ invalid &= ~NFS_INO_INVALID_ATTR;
++
+ /* Don't invalidate the data if we were to blame */
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+ || S_ISLNK(inode->i_mode)))
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 223bedd..10410e8 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+ return ret;
+ idr_preload(GFP_KERNEL);
+ spin_lock(&nn->nfs_client_lock);
+- ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
++ ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ clp->cl_cb_ident = ret;
+ spin_unlock(&nn->nfs_client_lock);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 8abe271..abf5cae 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+
+ dprintk("--> %s\n", __func__);
+
+- lgp = kzalloc(sizeof(*lgp), gfp_flags);
+- if (lgp == NULL)
+- return NULL;
++ /*
++ * Synchronously retrieve layout information from server and
++ * store in lseg. If we race with a concurrent seqid morphing
++ * op, then re-send the LAYOUTGET.
++ */
++ do {
++ lgp = kzalloc(sizeof(*lgp), gfp_flags);
++ if (lgp == NULL)
++ return NULL;
++
++ i_size = i_size_read(ino);
++
++ lgp->args.minlength = PAGE_CACHE_SIZE;
++ if (lgp->args.minlength > range->length)
++ lgp->args.minlength = range->length;
++ if (range->iomode == IOMODE_READ) {
++ if (range->offset >= i_size)
++ lgp->args.minlength = 0;
++ else if (i_size - range->offset < lgp->args.minlength)
++ lgp->args.minlength = i_size - range->offset;
++ }
++ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
++ lgp->args.range = *range;
++ lgp->args.type = server->pnfs_curr_ld->id;
++ lgp->args.inode = ino;
++ lgp->args.ctx = get_nfs_open_context(ctx);
++ lgp->gfp_flags = gfp_flags;
++ lgp->cred = lo->plh_lc_cred;
+
+- i_size = i_size_read(ino);
++ lseg = nfs4_proc_layoutget(lgp, gfp_flags);
++ } while (lseg == ERR_PTR(-EAGAIN));
+
+- lgp->args.minlength = PAGE_CACHE_SIZE;
+- if (lgp->args.minlength > range->length)
+- lgp->args.minlength = range->length;
+- if (range->iomode == IOMODE_READ) {
+- if (range->offset >= i_size)
+- lgp->args.minlength = 0;
+- else if (i_size - range->offset < lgp->args.minlength)
+- lgp->args.minlength = i_size - range->offset;
+- }
+- lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+- lgp->args.range = *range;
+- lgp->args.type = server->pnfs_curr_ld->id;
+- lgp->args.inode = ino;
+- lgp->args.ctx = get_nfs_open_context(ctx);
+- lgp->gfp_flags = gfp_flags;
+- lgp->cred = lo->plh_lc_cred;
+-
+- /* Synchronously retrieve layout information from server and
+- * store in lseg.
+- */
+- lseg = nfs4_proc_layoutget(lgp, gfp_flags);
+ if (IS_ERR(lseg)) {
+ switch (PTR_ERR(lseg)) {
+ case -ENOMEM:
+@@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
+ /* existing state ID, make sure the sequence number matches. */
+ if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+ dprintk("%s forget reply due to sequence\n", __func__);
++ status = -EAGAIN;
+ goto out_forget_reply;
+ }
+ pnfs_set_layout_stateid(lo, &res->stateid, false);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 0f1d569..0dea0c2 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -765,16 +765,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
+ s->sc_type = 0;
+ }
+
+-static void
++/**
++ * nfs4_get_existing_delegation - Discover if this delegation already exists
++ * @clp: a pointer to the nfs4_client we're granting a delegation to
++ * @fp: a pointer to the nfs4_file we're granting a delegation on
++ *
++ * Return:
++ * On success: NULL if an existing delegation was not found.
++ *
++ * On error: -EAGAIN if one was previously granted to this nfs4_client
++ * for this nfs4_file.
++ *
++ */
++
++static int
++nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
++{
++ struct nfs4_delegation *searchdp = NULL;
++ struct nfs4_client *searchclp = NULL;
++
++ lockdep_assert_held(&state_lock);
++ lockdep_assert_held(&fp->fi_lock);
++
++ list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
++ searchclp = searchdp->dl_stid.sc_client;
++ if (clp == searchclp) {
++ return -EAGAIN;
++ }
++ }
++ return 0;
++}
++
++/**
++ * hash_delegation_locked - Add a delegation to the appropriate lists
++ * @dp: a pointer to the nfs4_delegation we are adding.
++ * @fp: a pointer to the nfs4_file we're granting a delegation on
++ *
++ * Return:
++ * On success: NULL if the delegation was successfully hashed.
++ *
++ * On error: -EAGAIN if one was previously granted to this
++ * nfs4_client for this nfs4_file. Delegation is not hashed.
++ *
++ */
++
++static int
+ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+ {
++ int status;
++ struct nfs4_client *clp = dp->dl_stid.sc_client;
++
+ lockdep_assert_held(&state_lock);
+ lockdep_assert_held(&fp->fi_lock);
+
++ status = nfs4_get_existing_delegation(clp, fp);
++ if (status)
++ return status;
++ ++fp->fi_delegees;
+ atomic_inc(&dp->dl_stid.sc_count);
+ dp->dl_stid.sc_type = NFS4_DELEG_STID;
+ list_add(&dp->dl_perfile, &fp->fi_delegations);
+- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
++ list_add(&dp->dl_perclnt, &clp->cl_delegations);
++ return 0;
+ }
+
+ static bool
+@@ -3360,6 +3412,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
+ stp->st_access_bmap = 0;
+ stp->st_deny_bmap = 0;
+ stp->st_openstp = NULL;
++ init_rwsem(&stp->st_rwsem);
+ spin_lock(&oo->oo_owner.so_client->cl_lock);
+ list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
+ spin_lock(&fp->fi_lock);
+@@ -3945,6 +3998,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
+ return fl;
+ }
+
++/**
++ * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
++ * @dp: a pointer to the nfs4_delegation we're adding.
++ *
++ * Return:
++ * On success: Return code will be 0 on success.
++ *
++ * On error: -EAGAIN if there was an existing delegation.
++ * nonzero if there is an error in other cases.
++ *
++ */
++
+ static int nfs4_setlease(struct nfs4_delegation *dp)
+ {
+ struct nfs4_file *fp = dp->dl_stid.sc_file;
+@@ -3976,16 +4041,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
+ goto out_unlock;
+ /* Race breaker */
+ if (fp->fi_deleg_file) {
+- status = 0;
+- ++fp->fi_delegees;
+- hash_delegation_locked(dp, fp);
++ status = hash_delegation_locked(dp, fp);
+ goto out_unlock;
+ }
+ fp->fi_deleg_file = filp;
+- fp->fi_delegees = 1;
+- hash_delegation_locked(dp, fp);
++ fp->fi_delegees = 0;
++ status = hash_delegation_locked(dp, fp);
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&state_lock);
++ if (status) {
++ /* Should never happen, this is a new fi_deleg_file */
++ WARN_ON_ONCE(1);
++ goto out_fput;
++ }
+ return 0;
+ out_unlock:
+ spin_unlock(&fp->fi_lock);
+@@ -4005,6 +4073,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ if (fp->fi_had_conflict)
+ return ERR_PTR(-EAGAIN);
+
++ spin_lock(&state_lock);
++ spin_lock(&fp->fi_lock);
++ status = nfs4_get_existing_delegation(clp, fp);
++ spin_unlock(&fp->fi_lock);
++ spin_unlock(&state_lock);
++
++ if (status)
++ return ERR_PTR(status);
++
+ dp = alloc_init_deleg(clp, fh, odstate);
+ if (!dp)
+ return ERR_PTR(-ENOMEM);
+@@ -4023,9 +4100,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ status = -EAGAIN;
+ goto out_unlock;
+ }
+- ++fp->fi_delegees;
+- hash_delegation_locked(dp, fp);
+- status = 0;
++ status = hash_delegation_locked(dp, fp);
+ out_unlock:
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&state_lock);
+@@ -4187,15 +4262,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ */
+ if (stp) {
+ /* Stateid was found, this is an OPEN upgrade */
++ down_read(&stp->st_rwsem);
+ status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
+- if (status)
++ if (status) {
++ up_read(&stp->st_rwsem);
+ goto out;
++ }
+ } else {
+ stp = open->op_stp;
+ open->op_stp = NULL;
+ init_open_stateid(stp, fp, open);
++ down_read(&stp->st_rwsem);
+ status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
+ if (status) {
++ up_read(&stp->st_rwsem);
+ release_open_stateid(stp);
+ goto out;
+ }
+@@ -4207,6 +4287,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ }
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++ up_read(&stp->st_rwsem);
+
+ if (nfsd4_has_session(&resp->cstate)) {
+ if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
+@@ -4819,10 +4900,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ * revoked delegations are kept only for free_stateid.
+ */
+ return nfserr_bad_stateid;
++ down_write(&stp->st_rwsem);
+ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+- if (status)
+- return status;
+- return nfs4_check_fh(current_fh, &stp->st_stid);
++ if (status == nfs_ok)
++ status = nfs4_check_fh(current_fh, &stp->st_stid);
++ if (status != nfs_ok)
++ up_write(&stp->st_rwsem);
++ return status;
+ }
+
+ /*
+@@ -4869,6 +4953,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+ return status;
+ oo = openowner(stp->st_stateowner);
+ if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
++ up_write(&stp->st_rwsem);
+ nfs4_put_stid(&stp->st_stid);
+ return nfserr_bad_stateid;
+ }
+@@ -4899,11 +4984,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ goto out;
+ oo = openowner(stp->st_stateowner);
+ status = nfserr_bad_stateid;
+- if (oo->oo_flags & NFS4_OO_CONFIRMED)
++ if (oo->oo_flags & NFS4_OO_CONFIRMED) {
++ up_write(&stp->st_rwsem);
+ goto put_stateid;
++ }
+ oo->oo_flags |= NFS4_OO_CONFIRMED;
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++ up_write(&stp->st_rwsem);
+ dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
+ __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
+
+@@ -4982,6 +5070,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
+ memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+ status = nfs_ok;
+ put_stateid:
++ up_write(&stp->st_rwsem);
+ nfs4_put_stid(&stp->st_stid);
+ out:
+ nfsd4_bump_seqid(cstate, status);
+@@ -5035,6 +5124,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ goto out;
+ update_stateid(&stp->st_stid.sc_stateid);
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++ up_write(&stp->st_rwsem);
+
+ nfsd4_close_open_stateid(stp);
+
+@@ -5260,6 +5350,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ stp->st_access_bmap = 0;
+ stp->st_deny_bmap = open_stp->st_deny_bmap;
+ stp->st_openstp = open_stp;
++ init_rwsem(&stp->st_rwsem);
+ list_add(&stp->st_locks, &open_stp->st_locks);
+ list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ spin_lock(&fp->fi_lock);
+@@ -5428,6 +5519,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ &open_stp, nn);
+ if (status)
+ goto out;
++ up_write(&open_stp->st_rwsem);
+ open_sop = openowner(open_stp->st_stateowner);
+ status = nfserr_bad_stateid;
+ if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
+@@ -5435,6 +5527,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ goto out;
+ status = lookup_or_create_lock_state(cstate, open_stp, lock,
+ &lock_stp, &new);
++ if (status == nfs_ok)
++ down_write(&lock_stp->st_rwsem);
+ } else {
+ status = nfs4_preprocess_seqid_op(cstate,
+ lock->lk_old_lock_seqid,
+@@ -5540,6 +5634,8 @@ out:
+ seqid_mutating_err(ntohl(status)))
+ lock_sop->lo_owner.so_seqid++;
+
++ up_write(&lock_stp->st_rwsem);
++
+ /*
+ * If this is a new, never-before-used stateid, and we are
+ * returning an error, then just go ahead and release it.
+@@ -5709,6 +5805,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ fput:
+ fput(filp);
+ put_stateid:
++ up_write(&stp->st_rwsem);
+ nfs4_put_stid(&stp->st_stid);
+ out:
+ nfsd4_bump_seqid(cstate, status);
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index 583ffc1..31bde12 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -534,15 +534,16 @@ struct nfs4_file {
+ * Better suggestions welcome.
+ */
+ struct nfs4_ol_stateid {
+- struct nfs4_stid st_stid; /* must be first field */
+- struct list_head st_perfile;
+- struct list_head st_perstateowner;
+- struct list_head st_locks;
+- struct nfs4_stateowner * st_stateowner;
+- struct nfs4_clnt_odstate * st_clnt_odstate;
+- unsigned char st_access_bmap;
+- unsigned char st_deny_bmap;
+- struct nfs4_ol_stateid * st_openstp;
++ struct nfs4_stid st_stid;
++ struct list_head st_perfile;
++ struct list_head st_perstateowner;
++ struct list_head st_locks;
++ struct nfs4_stateowner *st_stateowner;
++ struct nfs4_clnt_odstate *st_clnt_odstate;
++ unsigned char st_access_bmap;
++ unsigned char st_deny_bmap;
++ struct nfs4_ol_stateid *st_openstp;
++ struct rw_semaphore st_rwsem;
+ };
+
+ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index b7dfac2..12bfa9c 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -374,6 +374,8 @@ static int ocfs2_mknod(struct inode *dir,
+ mlog_errno(status);
+ goto leave;
+ }
++ /* update inode->i_mode after mask with "umask". */
++ inode->i_mode = mode;
+
+ handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ S_ISDIR(mode),
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index f1f32af..3e4ff3f 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -227,7 +227,7 @@ struct ipv6_pinfo {
+ struct ipv6_ac_socklist *ipv6_ac_list;
+ struct ipv6_fl_socklist __rcu *ipv6_fl_list;
+
+- struct ipv6_txoptions *opt;
++ struct ipv6_txoptions __rcu *opt;
+ struct sk_buff *pktoptions;
+ struct sk_buff *rxpmtu;
+ struct inet6_cork cork;
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index df07e78..1abeb82 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1046,6 +1046,7 @@ struct journal_s
+ #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
+ * data write error in ordered
+ * mode */
++#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
+
+ /*
+ * Function declarations for the journaling transaction and buffer
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index dd20974..1565324 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+- u8 reserved_0[0x6];
++ u8 reserved_0[0x3];
++ u8 self_lb_en_modifiable[0x1];
++ u8 reserved_1[0x2];
+ u8 max_lso_cap[0x5];
+- u8 reserved_1[0x4];
++ u8 reserved_2[0x4];
+ u8 rss_ind_tbl_cap[0x4];
+- u8 reserved_2[0x3];
++ u8 reserved_3[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+- u8 reserved_3[0x2];
++ u8 reserved_4[0x2];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+- u8 reserved_4[0x20];
++ u8 reserved_5[0x20];
+
+- u8 reserved_5[0x10];
++ u8 reserved_6[0x10];
+ u8 lro_min_mss_size[0x10];
+
+- u8 reserved_6[0x120];
++ u8 reserved_7[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+- u8 reserved_7[0x600];
++ u8 reserved_8[0x600];
+ };
+
+ struct mlx5_ifc_roce_cap_bits {
+@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
+ };
+
+ struct mlx5_ifc_modify_tir_bitmask_bits {
+- u8 reserved[0x20];
++ u8 reserved_0[0x20];
+
+- u8 reserved1[0x1f];
++ u8 reserved_1[0x1b];
++ u8 self_lb_en[0x1];
++ u8 reserved_2[0x3];
+ u8 lro[0x1];
+ };
+
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index b36d837..2a91a05 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -62,6 +62,7 @@ struct unix_sock {
+ #define UNIX_GC_CANDIDATE 0
+ #define UNIX_GC_MAYBE_CYCLE 1
+ struct socket_wq peer_wq;
++ wait_queue_t peer_wake;
+ };
+
+ static inline struct unix_sock *unix_sk(const struct sock *sk)
+diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
+index aaf9700..fb961a5 100644
+--- a/include/net/ip6_fib.h
++++ b/include/net/ip6_fib.h
+@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
+
+ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
+ {
+- if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
++ if (rt->rt6i_flags & RTF_PCPU ||
++ (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
+ rt = (struct rt6_info *)(rt->dst.from);
+
+ return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index fa915fa..d49a8f8 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ err = ip6_local_out_sk(sk, skb);
+
+ if (net_xmit_eval(err) == 0) {
+- struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
++ put_cpu_ptr(tstats);
+ } else {
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index f6dafec..62a750a 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
+ struct pcpu_sw_netstats __percpu *stats)
+ {
+ if (err > 0) {
+- struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
++ struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_bytes += err;
+ tstats->tx_packets++;
+ u64_stats_update_end(&tstats->syncp);
++ put_cpu_ptr(tstats);
+ } else if (err < 0) {
+ err_stats->tx_errors++;
+ err_stats->tx_aborted_errors++;
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 711cca4..b14e158 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
+ */
+
+ struct ipv6_txoptions {
++ atomic_t refcnt;
+ /* Length of this structure */
+ int tot_len;
+
+@@ -217,7 +218,7 @@ struct ipv6_txoptions {
+ struct ipv6_opt_hdr *dst0opt;
+ struct ipv6_rt_hdr *srcrt; /* Routing Header */
+ struct ipv6_opt_hdr *dst1opt;
+-
++ struct rcu_head rcu;
+ /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
+ };
+
+@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
+ struct rcu_head rcu;
+ };
+
++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
++{
++ struct ipv6_txoptions *opt;
++
++ rcu_read_lock();
++ opt = rcu_dereference(np->opt);
++ if (opt && !atomic_inc_not_zero(&opt->refcnt))
++ opt = NULL;
++ rcu_read_unlock();
++ return opt;
++}
++
++static inline void txopt_put(struct ipv6_txoptions *opt)
++{
++ if (opt && atomic_dec_and_test(&opt->refcnt))
++ kfree_rcu(opt, rcu);
++}
++
+ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+@@ -490,6 +509,7 @@ struct ip6_create_arg {
+ u32 user;
+ const struct in6_addr *src;
+ const struct in6_addr *dst;
++ int iif;
+ u8 ecn;
+ };
+
+diff --git a/include/net/ndisc.h b/include/net/ndisc.h
+index aba5695..b3a7751 100644
+--- a/include/net/ndisc.h
++++ b/include/net/ndisc.h
+@@ -182,8 +182,7 @@ int ndisc_rcv(struct sk_buff *skb);
+
+ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *solicit,
+- const struct in6_addr *daddr, const struct in6_addr *saddr,
+- struct sk_buff *oskb);
++ const struct in6_addr *daddr, const struct in6_addr *saddr);
+
+ void ndisc_send_rs(struct net_device *dev,
+ const struct in6_addr *saddr, const struct in6_addr *daddr);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 444faa8..f1ad8f8 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -61,6 +61,9 @@ struct Qdisc {
+ */
+ #define TCQ_F_WARN_NONWC (1 << 16)
+ #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
++#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
++ * qdisc_tree_decrease_qlen() should stop.
++ */
+ u32 limit;
+ const struct Qdisc_ops *ops;
+ struct qdisc_size_table __rcu *stab;
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index 319baab..731c40e 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -272,7 +272,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+ struct net_device *filter_dev,
+ int idx)
+ {
+- return -EOPNOTSUPP;
++ return idx;
+ }
+
+ static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+diff --git a/kernel/.gitignore b/kernel/.gitignore
+index 790d83c..b3097bd 100644
+--- a/kernel/.gitignore
++++ b/kernel/.gitignore
+@@ -5,4 +5,3 @@ config_data.h
+ config_data.gz
+ timeconst.h
+ hz.bc
+-x509_certificate_list
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 29ace10..7a0decf 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -104,7 +104,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
+ /* all elements already exist */
+ return -EEXIST;
+
+- memcpy(array->value + array->elem_size * index, value, array->elem_size);
++ memcpy(array->value + array->elem_size * index, value, map->value_size);
+ return 0;
+ }
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 2b515ba..c169bba 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = pn->flags | NTF_PROXY;
+ ndm->ndm_type = RTN_UNICAST;
+- ndm->ndm_ifindex = pn->dev->ifindex;
++ ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
+ ndm->ndm_state = NUD_NONE;
+
+ if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+@@ -2290,7 +2290,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ if (h > s_h)
+ s_idx = 0;
+ for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+- if (dev_net(n->dev) != net)
++ if (pneigh_net(n) != net)
+ continue;
+ if (idx < s_idx)
+ goto next;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 3b6899b..8a1741b 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ err = put_user(cmlen, &cm->cmsg_len);
+ if (!err) {
+ cmlen = CMSG_SPACE(i*sizeof(int));
++ if (msg->msg_controllen < cmlen)
++ cmlen = msg->msg_controllen;
+ msg->msg_control += cmlen;
+ msg->msg_controllen -= cmlen;
+ }
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 5165571..a049050 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -202,7 +202,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+
+
+- final_p = fl6_update_dst(&fl6, np->opt, &final);
++ rcu_read_lock();
++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
++ rcu_read_unlock();
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+@@ -219,7 +221,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ &ireq->ir_v6_loc_addr,
+ &ireq->ir_v6_rmt_addr);
+ fl6.daddr = ireq->ir_v6_rmt_addr;
+- err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++ rcu_read_lock();
++ err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++ np->tclass);
++ rcu_read_unlock();
+ err = net_xmit_eval(err);
+ }
+
+@@ -415,6 +420,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ {
+ struct inet_request_sock *ireq = inet_rsk(req);
+ struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
++ struct ipv6_txoptions *opt;
+ struct inet_sock *newinet;
+ struct dccp6_sock *newdp6;
+ struct sock *newsk;
+@@ -534,13 +540,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ * Yes, keeping reference count would be much more clever, but we make
+ * one more one thing there: reattach optmem to newsk.
+ */
+- if (np->opt != NULL)
+- newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++ opt = rcu_dereference(np->opt);
++ if (opt) {
++ opt = ipv6_dup_options(newsk, opt);
++ RCU_INIT_POINTER(newnp->opt, opt);
++ }
+ inet_csk(newsk)->icsk_ext_hdr_len = 0;
+- if (newnp->opt != NULL)
+- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+- newnp->opt->opt_flen);
++ if (opt)
++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++ opt->opt_flen;
+
+ dccp_sync_mss(newsk, dst_mtu(dst));
+
+@@ -793,6 +801,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct in6_addr *saddr = NULL, *final_p, final;
++ struct ipv6_txoptions *opt;
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ int addr_type;
+@@ -892,7 +901,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ fl6.fl6_sport = inet->inet_sport;
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+- final_p = fl6_update_dst(&fl6, np->opt, &final);
++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++ final_p = fl6_update_dst(&fl6, opt, &final);
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+@@ -912,9 +922,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ __ip6_dst_store(sk, dst, NULL, NULL);
+
+ icsk->icsk_ext_hdr_len = 0;
+- if (np->opt != NULL)
+- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+- np->opt->opt_nflen);
++ if (opt)
++ icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
+
+ inet->inet_dport = usin->sin6_port;
+
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 8e8203d..ef7e2c4 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm);
+ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
+ int cmd);
+-static void mroute_clean_tables(struct mr_table *mrt);
++static void mroute_clean_tables(struct mr_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ static void ipmr_free_table(struct mr_table *mrt)
+ {
+ del_timer_sync(&mrt->ipmr_expire_timer);
+- mroute_clean_tables(mrt);
++ mroute_clean_tables(mrt, true);
+ kfree(mrt);
+ }
+
+@@ -1208,7 +1208,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+ * Close the multicast socket, and clear the vif tables etc
+ */
+
+-static void mroute_clean_tables(struct mr_table *mrt)
++static void mroute_clean_tables(struct mr_table *mrt, bool all)
+ {
+ int i;
+ LIST_HEAD(list);
+@@ -1217,8 +1217,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
+ /* Shut down all active vif entries */
+
+ for (i = 0; i < mrt->maxvif; i++) {
+- if (!(mrt->vif_table[i].flags & VIFF_STATIC))
+- vif_delete(mrt, i, 0, &list);
++ if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
++ continue;
++ vif_delete(mrt, i, 0, &list);
+ }
+ unregister_netdevice_many(&list);
+
+@@ -1226,7 +1227,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
+
+ for (i = 0; i < MFC_LINES; i++) {
+ list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+- if (c->mfc_flags & MFC_STATIC)
++ if (!all && (c->mfc_flags & MFC_STATIC))
+ continue;
+ list_del_rcu(&c->list);
+ mroute_netlink_event(mrt, c, RTM_DELROUTE);
+@@ -1261,7 +1262,7 @@ static void mrtsock_destruct(struct sock *sk)
+ NETCONFA_IFINDEX_ALL,
+ net->ipv4.devconf_all);
+ RCU_INIT_POINTER(mrt->mroute_sk, NULL);
+- mroute_clean_tables(mrt);
++ mroute_clean_tables(mrt, false);
+ }
+ }
+ rtnl_unlock();
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a8f515b..0a2b61d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4457,19 +4457,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
+ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ {
+ struct sk_buff *skb;
++ int err = -ENOMEM;
++ int data_len = 0;
+ bool fragstolen;
+
+ if (size == 0)
+ return 0;
+
+- skb = alloc_skb(size, sk->sk_allocation);
++ if (size > PAGE_SIZE) {
++ int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
++
++ data_len = npages << PAGE_SHIFT;
++ size = data_len + (size & ~PAGE_MASK);
++ }
++ skb = alloc_skb_with_frags(size - data_len, data_len,
++ PAGE_ALLOC_COSTLY_ORDER,
++ &err, sk->sk_allocation);
+ if (!skb)
+ goto err;
+
++ skb_put(skb, size - data_len);
++ skb->data_len = data_len;
++ skb->len = size;
++
+ if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+ goto err_free;
+
+- if (memcpy_from_msg(skb_put(skb, size), msg, size))
++ err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
++ if (err)
+ goto err_free;
+
+ TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+@@ -4485,7 +4500,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ err_free:
+ kfree_skb(skb);
+ err:
+- return -ENOMEM;
++ return err;
++
+ }
+
+ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+@@ -5643,6 +5659,7 @@ discard:
+ }
+
+ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
++ tp->copied_seq = tp->rcv_nxt;
+ tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 93898e0..a7739c8 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -922,7 +922,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ }
+
+ md5sig = rcu_dereference_protected(tp->md5sig_info,
+- sock_owned_by_user(sk));
++ sock_owned_by_user(sk) ||
++ lockdep_is_held(&sk->sk_lock.slock));
+ if (!md5sig) {
+ md5sig = kmalloc(sizeof(*md5sig), gfp);
+ if (!md5sig)
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 7149ebc..04f0a05 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
+ syn_set = true;
+ } else {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
++ /* Some middle-boxes may black-hole Fast Open _after_
++ * the handshake. Therefore we conservatively disable
++ * Fast Open on this path on recurring timeouts with
++ * few or zero bytes acked after Fast Open.
++ */
++ if (tp->syn_data_acked &&
++ tp->bytes_acked <= tp->rx_opt.mss_clamp) {
++ tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
++ if (icsk->icsk_retransmits == sysctl_tcp_retries1)
++ NET_INC_STATS_BH(sock_net(sk),
++ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
++ }
+ /* Black hole detection */
+ tcp_mtu_probing(icsk, sk);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index dd00828..3939dd2 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3628,7 +3628,7 @@ static void addrconf_dad_work(struct work_struct *w)
+
+ /* send a neighbour solicitation for our addr */
+ addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
+- ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
++ ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+ out:
+ in6_ifa_put(ifp);
+ rtnl_unlock();
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 44bb66b..38d66dd 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk)
+
+ /* Free tx options */
+
+- opt = xchg(&np->opt, NULL);
+- if (opt)
+- sock_kfree_s(sk, opt, opt->tot_len);
++ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
++ if (opt) {
++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++ txopt_put(opt);
++ }
+ }
+ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
+
+@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
+ fl6.fl6_sport = inet->inet_sport;
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+- final_p = fl6_update_dst(&fl6, np->opt, &final);
++ rcu_read_lock();
++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
++ &final);
++ rcu_read_unlock();
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ if (IS_ERR(dst)) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 9aadd57..a42a673 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -167,8 +167,10 @@ ipv4_connected:
+
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+- opt = flowlabel ? flowlabel->opt : np->opt;
++ rcu_read_lock();
++ opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+ final_p = fl6_update_dst(&fl6, opt, &final);
++ rcu_read_unlock();
+
+ dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ err = 0;
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index ce203b0..ea7c4d6 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
+ *((char **)&opt2->dst1opt) += dif;
+ if (opt2->srcrt)
+ *((char **)&opt2->srcrt) += dif;
++ atomic_set(&opt2->refcnt, 1);
+ }
+ return opt2;
+ }
+@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ return ERR_PTR(-ENOBUFS);
+
+ memset(opt2, 0, tot_len);
+-
++ atomic_set(&opt2->refcnt, 1);
+ opt2->tot_len = tot_len;
+ p = (char *)(opt2 + 1);
+
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index 6927f3f..9beed30 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = IPPROTO_TCP;
+ fl6->daddr = ireq->ir_v6_rmt_addr;
+- final_p = fl6_update_dst(fl6, np->opt, &final);
++ rcu_read_lock();
++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++ rcu_read_unlock();
+ fl6->saddr = ireq->ir_v6_loc_addr;
+ fl6->flowi6_oif = ireq->ir_iif;
+ fl6->flowi6_mark = ireq->ir_mark;
+@@ -207,7 +209,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ fl6->fl6_dport = inet->inet_dport;
+ security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+
+- final_p = fl6_update_dst(fl6, np->opt, &final);
++ rcu_read_lock();
++ final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++ rcu_read_unlock();
+
+ dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ if (!dst) {
+@@ -240,7 +244,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
+ /* Restore final destination back after routing done */
+ fl6.daddr = sk->sk_v6_daddr;
+
+- res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++ res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++ np->tclass);
+ rcu_read_unlock();
+ return res;
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index eabffbb..137fca4 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
+ int i;
+
+ for_each_possible_cpu(i)
+- ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
++ ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
+ }
+ EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
+
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 0e004cc..35eee72 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
+ int cmd);
+ static int ip6mr_rtm_dumproute(struct sk_buff *skb,
+ struct netlink_callback *cb);
+-static void mroute_clean_tables(struct mr6_table *mrt);
++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+
+ #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
+ static void ip6mr_free_table(struct mr6_table *mrt)
+ {
+ del_timer_sync(&mrt->ipmr_expire_timer);
+- mroute_clean_tables(mrt);
++ mroute_clean_tables(mrt, true);
+ kfree(mrt);
+ }
+
+@@ -1542,7 +1542,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
+ * Close the multicast socket, and clear the vif tables etc
+ */
+
+-static void mroute_clean_tables(struct mr6_table *mrt)
++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
+ {
+ int i;
+ LIST_HEAD(list);
+@@ -1552,8 +1552,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ * Shut down all active vif entries
+ */
+ for (i = 0; i < mrt->maxvif; i++) {
+- if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
+- mif6_delete(mrt, i, &list);
++ if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
++ continue;
++ mif6_delete(mrt, i, &list);
+ }
+ unregister_netdevice_many(&list);
+
+@@ -1562,7 +1563,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ */
+ for (i = 0; i < MFC6_LINES; i++) {
+ list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
+- if (c->mfc_flags & MFC_STATIC)
++ if (!all && (c->mfc_flags & MFC_STATIC))
+ continue;
+ write_lock_bh(&mrt_lock);
+ list_del(&c->list);
+@@ -1625,7 +1626,7 @@ int ip6mr_sk_done(struct sock *sk)
+ net->ipv6.devconf_all);
+ write_unlock_bh(&mrt_lock);
+
+- mroute_clean_tables(mrt);
++ mroute_clean_tables(mrt, false);
+ err = 0;
+ break;
+ }
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 63e6956..4449ad1 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
+ icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ }
+ }
+- opt = xchg(&inet6_sk(sk)->opt, opt);
++ opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
++ opt);
+ sk_dst_reset(sk);
+
+ return opt;
+@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ sk->sk_socket->ops = &inet_dgram_ops;
+ sk->sk_family = PF_INET;
+ }
+- opt = xchg(&np->opt, NULL);
+- if (opt)
+- sock_kfree_s(sk, opt, opt->tot_len);
++ opt = xchg((__force struct ipv6_txoptions **)&np->opt,
++ NULL);
++ if (opt) {
++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++ txopt_put(opt);
++ }
+ pktopt = xchg(&np->pktoptions, NULL);
+ kfree_skb(pktopt);
+
+@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+ break;
+
+- opt = ipv6_renew_options(sk, np->opt, optname,
++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++ opt = ipv6_renew_options(sk, opt, optname,
+ (struct ipv6_opt_hdr __user *)optval,
+ optlen);
+ if (IS_ERR(opt)) {
+@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ retv = 0;
+ opt = ipv6_update_options(sk, opt);
+ sticky_done:
+- if (opt)
+- sock_kfree_s(sk, opt, opt->tot_len);
++ if (opt) {
++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++ txopt_put(opt);
++ }
+ break;
+ }
+
+@@ -486,6 +493,7 @@ sticky_done:
+ break;
+
+ memset(opt, 0, sizeof(*opt));
++ atomic_set(&opt->refcnt, 1);
+ opt->tot_len = sizeof(*opt) + optlen;
+ retv = -EFAULT;
+ if (copy_from_user(opt+1, optval, optlen))
+@@ -502,8 +510,10 @@ update:
+ retv = 0;
+ opt = ipv6_update_options(sk, opt);
+ done:
+- if (opt)
+- sock_kfree_s(sk, opt, opt->tot_len);
++ if (opt) {
++ atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++ txopt_put(opt);
++ }
+ break;
+ }
+ case IPV6_UNICAST_HOPS:
+@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ case IPV6_RTHDR:
+ case IPV6_DSTOPTS:
+ {
++ struct ipv6_txoptions *opt;
+
+ lock_sock(sk);
+- len = ipv6_getsockopt_sticky(sk, np->opt,
+- optname, optval, len);
++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++ len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
+ release_sock(sk);
+ /* check if ipv6_getsockopt_sticky() returns err code */
+ if (len < 0)
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 083b292..41e3b5e 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1651,7 +1651,6 @@ out:
+ if (!err) {
+ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+ } else {
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ }
+@@ -2014,7 +2013,6 @@ out:
+ if (!err) {
+ ICMP6MSGOUT_INC_STATS(net, idev, type);
+ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
+ } else
+ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 64a7135..9ad46cd 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -553,8 +553,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+
+ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *solicit,
+- const struct in6_addr *daddr, const struct in6_addr *saddr,
+- struct sk_buff *oskb)
++ const struct in6_addr *daddr, const struct in6_addr *saddr)
+ {
+ struct sk_buff *skb;
+ struct in6_addr addr_buf;
+@@ -590,9 +589,6 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+ ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
+ dev->dev_addr);
+
+- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+- skb_dst_copy(skb, oskb);
+-
+ ndisc_send_skb(skb, daddr, saddr);
+ }
+
+@@ -679,12 +675,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
+ "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+ __func__, target);
+ }
+- ndisc_send_ns(dev, neigh, target, target, saddr, skb);
++ ndisc_send_ns(dev, neigh, target, target, saddr);
+ } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
+ neigh_app_ns(neigh);
+ } else {
+ addrconf_addr_solict_mult(target, &mcaddr);
+- ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
++ ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+ }
+ }
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index c7196ad..dc50143 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
+ /* Creation primitives. */
+ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ u32 user, struct in6_addr *src,
+- struct in6_addr *dst, u8 ecn)
++ struct in6_addr *dst, int iif, u8 ecn)
+ {
+ struct inet_frag_queue *q;
+ struct ip6_create_arg arg;
+@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ arg.user = user;
+ arg.src = src;
+ arg.dst = dst;
++ arg.iif = iif;
+ arg.ecn = ecn;
+
+ local_bh_disable();
+@@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ fhdr = (struct frag_hdr *)skb_transport_header(clone);
+
+ fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+- ip6_frag_ecn(hdr));
++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq == NULL) {
+ pr_debug("Can't find and can't create new queue\n");
+ goto ret_orig;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index fdbada156..fe97729 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -732,6 +732,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
+
+ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ {
++ struct ipv6_txoptions *opt_to_free = NULL;
+ struct ipv6_txoptions opt_space;
+ DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
+ struct in6_addr *daddr, *final_p, final;
+@@ -838,8 +839,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ if (!(opt->opt_nflen|opt->opt_flen))
+ opt = NULL;
+ }
+- if (!opt)
+- opt = np->opt;
++ if (!opt) {
++ opt = txopt_get(np);
++ opt_to_free = opt;
++ }
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ opt = ipv6_fixup_options(&opt_space, opt);
+@@ -905,6 +908,7 @@ done:
+ dst_release(dst);
+ out:
+ fl6_sock_release(flowlabel);
++ txopt_put(opt_to_free);
+ return err < 0 ? err : len;
+ do_confirm:
+ dst_confirm(dst);
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index f1159bb..04013a9 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
+ return fq->id == arg->id &&
+ fq->user == arg->user &&
+ ipv6_addr_equal(&fq->saddr, arg->src) &&
+- ipv6_addr_equal(&fq->daddr, arg->dst);
++ ipv6_addr_equal(&fq->daddr, arg->dst) &&
++ (arg->iif == fq->iif ||
++ !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
++ IPV6_ADDR_LINKLOCAL)));
+ }
+ EXPORT_SYMBOL(ip6_frag_match);
+
+@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
+
+ static struct frag_queue *
+ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+- const struct in6_addr *dst, u8 ecn)
++ const struct in6_addr *dst, int iif, u8 ecn)
+ {
+ struct inet_frag_queue *q;
+ struct ip6_create_arg arg;
+@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ arg.src = src;
+ arg.dst = dst;
++ arg.iif = iif;
+ arg.ecn = ecn;
+
+ hash = inet6_hash_frag(id, src, dst);
+@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ }
+
+ fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+- ip6_frag_ecn(hdr));
++ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq) {
+ int ret;
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 946880a..fd0e674 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -403,6 +403,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ }
+ }
+
++static bool __rt6_check_expired(const struct rt6_info *rt)
++{
++ if (rt->rt6i_flags & RTF_EXPIRES)
++ return time_after(jiffies, rt->dst.expires);
++ else
++ return false;
++}
++
+ static bool rt6_check_expired(const struct rt6_info *rt)
+ {
+ if (rt->rt6i_flags & RTF_EXPIRES) {
+@@ -538,7 +546,7 @@ static void rt6_probe_deferred(struct work_struct *w)
+ container_of(w, struct __rt6_probe_work, work);
+
+ addrconf_addr_solict_mult(&work->target, &mcaddr);
+- ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
++ ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+ dev_put(work->dev);
+ kfree(work);
+ }
+@@ -1270,7 +1278,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
+
+ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
+ {
+- if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
++ if (!__rt6_check_expired(rt) &&
++ rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+ return &rt->dst;
+ else
+@@ -1290,7 +1299,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
+
+ rt6_dst_from_metrics_check(rt);
+
+- if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
++ if (rt->rt6i_flags & RTF_PCPU ||
++ (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
+ return rt6_dst_from_check(rt, cookie);
+ else
+ return rt6_check(rt, cookie);
+@@ -1340,6 +1350,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
+ rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ }
+
++static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
++{
++ return !(rt->rt6i_flags & RTF_CACHE) &&
++ (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
++}
++
+ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+ const struct ipv6hdr *iph, u32 mtu)
+ {
+@@ -1353,7 +1369,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+ if (mtu >= dst_mtu(dst))
+ return;
+
+- if (rt6->rt6i_flags & RTF_CACHE) {
++ if (!rt6_cache_allowed_for_pmtu(rt6)) {
+ rt6_do_update_pmtu(rt6, mtu);
+ } else {
+ const struct in6_addr *daddr, *saddr;
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 0909f4e..f30bfdc 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -225,7 +225,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.daddr = ireq->ir_v6_rmt_addr;
+- final_p = fl6_update_dst(&fl6, np->opt, &final);
++ final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ fl6.saddr = ireq->ir_v6_loc_addr;
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+ fl6.flowi6_mark = ireq->ir_mark;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 97d9314..9e9b77b 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct in6_addr *saddr = NULL, *final_p, final;
++ struct ipv6_txoptions *opt;
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ int addr_type;
+@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ fl6.fl6_dport = usin->sin6_port;
+ fl6.fl6_sport = inet->inet_sport;
+
+- final_p = fl6_update_dst(&fl6, np->opt, &final);
++ opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++ final_p = fl6_update_dst(&fl6, opt, &final);
+
+ security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ tcp_fetch_timewait_stamp(sk, dst);
+
+ icsk->icsk_ext_hdr_len = 0;
+- if (np->opt)
+- icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+- np->opt->opt_nflen);
++ if (opt)
++ icsk->icsk_ext_hdr_len = opt->opt_flen +
++ opt->opt_nflen;
+
+ tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
+
+@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
+
+ skb_set_queue_mapping(skb, queue_mapping);
+- err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
++ err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
++ np->tclass);
+ err = net_xmit_eval(err);
+ }
+
+@@ -991,6 +994,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ struct inet_request_sock *ireq;
+ struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ struct tcp6_sock *newtcp6sk;
++ struct ipv6_txoptions *opt;
+ struct inet_sock *newinet;
+ struct tcp_sock *newtp;
+ struct sock *newsk;
+@@ -1126,13 +1130,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ but we make one more one thing there: reattach optmem
+ to newsk.
+ */
+- if (np->opt)
+- newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++ opt = rcu_dereference(np->opt);
++ if (opt) {
++ opt = ipv6_dup_options(newsk, opt);
++ RCU_INIT_POINTER(newnp->opt, opt);
++ }
+ inet_csk(newsk)->icsk_ext_hdr_len = 0;
+- if (newnp->opt)
+- inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+- newnp->opt->opt_flen);
++ if (opt)
++ inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++ opt->opt_flen;
+
+ tcp_ca_openreq_child(newsk, dst);
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 0aba654..8379fc2 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1107,6 +1107,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
+ struct in6_addr *daddr, *final_p, final;
+ struct ipv6_txoptions *opt = NULL;
++ struct ipv6_txoptions *opt_to_free = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+@@ -1260,8 +1261,10 @@ do_udp_sendmsg:
+ opt = NULL;
+ connected = 0;
+ }
+- if (!opt)
+- opt = np->opt;
++ if (!opt) {
++ opt = txopt_get(np);
++ opt_to_free = opt;
++ }
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ opt = ipv6_fixup_options(&opt_space, opt);
+@@ -1370,6 +1373,7 @@ release_dst:
+ out:
+ dst_release(dst);
+ fl6_sock_release(flowlabel);
++ txopt_put(opt_to_free);
+ if (!err)
+ return len;
+ /*
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index d1ded37..0ce9da9 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
+ struct in6_addr *daddr, *final_p, final;
+ struct ipv6_pinfo *np = inet6_sk(sk);
++ struct ipv6_txoptions *opt_to_free = NULL;
+ struct ipv6_txoptions *opt = NULL;
+ struct ip6_flowlabel *flowlabel = NULL;
+ struct dst_entry *dst = NULL;
+@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ opt = NULL;
+ }
+
+- if (opt == NULL)
+- opt = np->opt;
++ if (!opt) {
++ opt = txopt_get(np);
++ opt_to_free = opt;
++ }
+ if (flowlabel)
+ opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ opt = ipv6_fixup_options(&opt_space, opt);
+@@ -631,6 +634,7 @@ done:
+ dst_release(dst);
+ out:
+ fl6_sock_release(flowlabel);
++ txopt_put(opt_to_free);
+
+ return err < 0 ? err : len;
+
+diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
+index a7a80a6..653d073 100644
+--- a/net/openvswitch/dp_notify.c
++++ b/net/openvswitch/dp_notify.c
+@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
+- if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
++ if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
+ continue;
+
+ if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
+diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
+index f7e8dcc..ac14c48 100644
+--- a/net/openvswitch/vport-netdev.c
++++ b/net/openvswitch/vport-netdev.c
+@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
+ if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+ ovs_netdev_detach_dev(vport);
+
+- /* Early release so we can unregister the device */
++ /* We can be invoked by both explicit vport deletion and
++ * underlying netdev deregistration; delete the link only
++ * if it's not already shutting down.
++ */
++ if (vport->dev->reg_state == NETREG_REGISTERED)
++ rtnl_delete_link(vport->dev);
+ dev_put(vport->dev);
+- rtnl_delete_link(vport->dev);
+ vport->dev = NULL;
+ rtnl_unlock();
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 27b2898..4695a36 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
+ kfree_rcu(po->rollover, rcu);
+ }
+
++static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
++ struct sk_buff *skb)
++{
++ /* Earlier code assumed this would be a VLAN pkt, double-check
++ * this now that we have the actual packet in hand. We can only
++ * do this check on Ethernet devices.
++ */
++ if (unlikely(dev->type != ARPHRD_ETHER))
++ return false;
++
++ skb_reset_mac_header(skb);
++ return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
++}
++
+ static const struct proto_ops packet_ops;
+
+ static const struct proto_ops packet_ops_spkt;
+@@ -1902,18 +1916,10 @@ retry:
+ goto retry;
+ }
+
+- if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
+- /* Earlier code assumed this would be a VLAN pkt,
+- * double-check this now that we have the actual
+- * packet in hand.
+- */
+- struct ethhdr *ehdr;
+- skb_reset_mac_header(skb);
+- ehdr = eth_hdr(skb);
+- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+- err = -EMSGSIZE;
+- goto out_unlock;
+- }
++ if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
++ !packet_extra_vlan_len_allowed(dev, skb)) {
++ err = -EMSGSIZE;
++ goto out_unlock;
+ }
+
+ skb->protocol = proto;
+@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
+ return false;
+ }
+
++static void tpacket_set_protocol(const struct net_device *dev,
++ struct sk_buff *skb)
++{
++ if (dev->type == ARPHRD_ETHER) {
++ skb_reset_mac_header(skb);
++ skb->protocol = eth_hdr(skb)->h_proto;
++ }
++}
++
+ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ void *frame, struct net_device *dev, int size_max,
+ __be16 proto, unsigned char *addr, int hlen)
+@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ skb_reserve(skb, hlen);
+ skb_reset_network_header(skb);
+
+- if (!packet_use_direct_xmit(po))
+- skb_probe_transport_header(skb, 0);
+ if (unlikely(po->tp_tx_has_off)) {
+ int off_min, off_max, off;
+ off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ dev->hard_header_len);
+ if (unlikely(err))
+ return err;
++ if (!skb->protocol)
++ tpacket_set_protocol(dev, skb);
+
+ data += dev->hard_header_len;
+ to_write -= dev->hard_header_len;
+@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ len = ((to_write > len_max) ? len_max : to_write);
+ }
+
++ skb_probe_transport_header(skb, 0);
++
+ return tp_len;
+ }
+
+@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
+- reserve = dev->hard_header_len + VLAN_HLEN;
++ if (po->sk.sk_socket->type == SOCK_RAW)
++ reserve = dev->hard_header_len;
+ size_max = po->tx_ring.frame_size
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+
+- if (size_max > dev->mtu + reserve)
+- size_max = dev->mtu + reserve;
++ if (size_max > dev->mtu + reserve + VLAN_HLEN)
++ size_max = dev->mtu + reserve + VLAN_HLEN;
+
+ do {
+ ph = packet_current_frame(po, &po->tx_ring,
+@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
+ addr, hlen);
+ if (likely(tp_len >= 0) &&
+- tp_len > dev->mtu + dev->hard_header_len) {
+- struct ethhdr *ehdr;
+- /* Earlier code assumed this would be a VLAN pkt,
+- * double-check this now that we have the actual
+- * packet in hand.
+- */
++ tp_len > dev->mtu + reserve &&
++ !packet_extra_vlan_len_allowed(dev, skb))
++ tp_len = -EMSGSIZE;
+
+- skb_reset_mac_header(skb);
+- ehdr = eth_hdr(skb);
+- if (ehdr->h_proto != htons(ETH_P_8021Q))
+- tp_len = -EMSGSIZE;
+- }
+ if (unlikely(tp_len < 0)) {
+ if (po->tp_loss) {
+ __packet_set_status(po, ph,
+@@ -2757,18 +2767,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+
+ sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+
+- if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
+- /* Earlier code assumed this would be a VLAN pkt,
+- * double-check this now that we have the actual
+- * packet in hand.
+- */
+- struct ethhdr *ehdr;
+- skb_reset_mac_header(skb);
+- ehdr = eth_hdr(skb);
+- if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+- err = -EMSGSIZE;
+- goto out_free;
+- }
++ if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
++ !packet_extra_vlan_len_allowed(dev, skb)) {
++ err = -EMSGSIZE;
++ goto out_free;
+ }
+
+ skb->protocol = proto;
+@@ -2799,8 +2801,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ len += vnet_hdr_len;
+ }
+
+- if (!packet_use_direct_xmit(po))
+- skb_probe_transport_header(skb, reserve);
++ skb_probe_transport_header(skb, reserve);
++
+ if (unlikely(extra_len == 4))
+ skb->no_fcs = 1;
+
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index 49adeef..9b2de5e 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -190,12 +190,6 @@ new_conn:
+ }
+ }
+
+- if (trans == NULL) {
+- kmem_cache_free(rds_conn_slab, conn);
+- conn = ERR_PTR(-ENODEV);
+- goto out;
+- }
+-
+ conn->c_trans = trans;
+
+ ret = trans->conn_alloc(conn, gfp);
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 4df61a5..859de6f 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1009,11 +1009,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ release_sock(sk);
+ }
+
+- /* racing with another thread binding seems ok here */
++ lock_sock(sk);
+ if (daddr == 0 || rs->rs_bound_addr == 0) {
++ release_sock(sk);
+ ret = -ENOTCONN; /* XXX not a great errno */
+ goto out;
+ }
++ release_sock(sk);
+
+ if (payload_len > rds_sk_sndbuf(rs)) {
+ ret = -EMSGSIZE;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index f43c8f3..7ec667d 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
+ }
+
+ /* We know handle. Find qdisc among all qdisc's attached to device
+- (root qdisc, all its children, children of children etc.)
++ * (root qdisc, all its children, children of children etc.)
++ * Note: caller either uses rtnl or rcu_read_lock()
+ */
+
+ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+ root->handle == handle)
+ return root;
+
+- list_for_each_entry(q, &root->list, list) {
++ list_for_each_entry_rcu(q, &root->list, list) {
+ if (q->handle == handle)
+ return q;
+ }
+@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
+ struct Qdisc *root = qdisc_dev(q)->qdisc;
+
+ WARN_ON_ONCE(root == &noop_qdisc);
+- list_add_tail(&q->list, &root->list);
++ ASSERT_RTNL();
++ list_add_tail_rcu(&q->list, &root->list);
+ }
+ }
+ EXPORT_SYMBOL(qdisc_list_add);
+
+ void qdisc_list_del(struct Qdisc *q)
+ {
+- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+- list_del(&q->list);
++ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
++ ASSERT_RTNL();
++ list_del_rcu(&q->list);
++ }
+ }
+ EXPORT_SYMBOL(qdisc_list_del);
+
+@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ if (n == 0)
+ return;
+ drops = max_t(int, n, 0);
++ rcu_read_lock();
+ while ((parentid = sch->parent)) {
+ if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
+- return;
++ break;
+
++ if (sch->flags & TCQ_F_NOPARENT)
++ break;
++ /* TODO: perform the search on a per txq basis */
+ sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
+ if (sch == NULL) {
+- WARN_ON(parentid != TC_H_ROOT);
+- return;
++ WARN_ON_ONCE(parentid != TC_H_ROOT);
++ break;
+ }
+ cops = sch->ops->cl_ops;
+ if (cops->qlen_notify) {
+@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ sch->q.qlen -= n;
+ __qdisc_qstats_drop(sch, drops);
+ }
++ rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
+
+@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+ }
+ lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
+ if (!netif_is_multiqueue(dev))
+- sch->flags |= TCQ_F_ONETXQUEUE;
++ sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ }
+
+ sch->handle = handle;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index cb5d4ad..e82a1ad 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
+ return;
+ }
+ if (!netif_is_multiqueue(dev))
+- qdisc->flags |= TCQ_F_ONETXQUEUE;
++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ dev_queue->qdisc_sleeping = qdisc;
+ }
+
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index f3cbaec..3e82f04 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+ if (qdisc == NULL)
+ goto err;
+ priv->qdiscs[ntx] = qdisc;
+- qdisc->flags |= TCQ_F_ONETXQUEUE;
++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ }
+
+ sch->flags |= TCQ_F_MQROOT;
+@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+
+ *old = dev_graft_qdisc(dev_queue, new);
+ if (new)
+- new->flags |= TCQ_F_ONETXQUEUE;
++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+ return 0;
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 3811a74..ad70ecf 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ goto err;
+ }
+ priv->qdiscs[i] = qdisc;
+- qdisc->flags |= TCQ_F_ONETXQUEUE;
++ qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ }
+
+ /* If the mqprio options indicate that hardware should own
+@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ *old = dev_graft_qdisc(dev_queue, new);
+
+ if (new)
+- new->flags |= TCQ_F_ONETXQUEUE;
++ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 4f15b7d..1543e39 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ if (!has_sha1)
+ return -EINVAL;
+
+- memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
+- hmacs->shmac_num_idents * sizeof(__u16));
++ for (i = 0; i < hmacs->shmac_num_idents; i++)
++ ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
+ ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
+ hmacs->shmac_num_idents * sizeof(__u16));
+ return 0;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 17bef01..3ec88be 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7375,6 +7375,13 @@ struct proto sctp_prot = {
+
+ #if IS_ENABLED(CONFIG_IPV6)
+
++#include <net/transp_v6.h>
++static void sctp_v6_destroy_sock(struct sock *sk)
++{
++ sctp_destroy_sock(sk);
++ inet6_destroy_sock(sk);
++}
++
+ struct proto sctpv6_prot = {
+ .name = "SCTPv6",
+ .owner = THIS_MODULE,
+@@ -7384,7 +7391,7 @@ struct proto sctpv6_prot = {
+ .accept = sctp_accept,
+ .ioctl = sctp_ioctl,
+ .init = sctp_init_sock,
+- .destroy = sctp_destroy_sock,
++ .destroy = sctp_v6_destroy_sock,
+ .shutdown = sctp_shutdown,
+ .setsockopt = sctp_setsockopt,
+ .getsockopt = sctp_getsockopt,
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index cd7c5f1..86f2e7c 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -159,8 +159,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
+ struct sk_buff *clone;
+ struct rtable *rt;
+
+- if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+- pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
++ if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
++ err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
++ if (err)
++ goto tx_error;
++ }
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 94f6582..128b098 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -326,6 +326,118 @@ found:
+ return s;
+ }
+
++/* Support code for asymmetrically connected dgram sockets
++ *
++ * If a datagram socket is connected to a socket not itself connected
++ * to the first socket (eg, /dev/log), clients may only enqueue more
++ * messages if the present receive queue of the server socket is not
++ * "too large". This means there's a second writeability condition
++ * poll and sendmsg need to test. The dgram recv code will do a wake
++ * up on the peer_wait wait queue of a socket upon reception of a
++ * datagram which needs to be propagated to sleeping would-be writers
++ * since these might not have sent anything so far. This can't be
++ * accomplished via poll_wait because the lifetime of the server
++ * socket might be less than that of its clients if these break their
++ * association with it or if the server socket is closed while clients
++ * are still connected to it and there's no way to inform "a polling
++ * implementation" that it should let go of a certain wait queue
++ *
++ * In order to propagate a wake up, a wait_queue_t of the client
++ * socket is enqueued on the peer_wait queue of the server socket
++ * whose wake function does a wake_up on the ordinary client socket
++ * wait queue. This connection is established whenever a write (or
++ * poll for write) hit the flow control condition and broken when the
++ * association to the server socket is dissolved or after a wake up
++ * was relayed.
++ */
++
++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
++ void *key)
++{
++ struct unix_sock *u;
++ wait_queue_head_t *u_sleep;
++
++ u = container_of(q, struct unix_sock, peer_wake);
++
++ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
++ q);
++ u->peer_wake.private = NULL;
++
++ /* relaying can only happen while the wq still exists */
++ u_sleep = sk_sleep(&u->sk);
++ if (u_sleep)
++ wake_up_interruptible_poll(u_sleep, key);
++
++ return 0;
++}
++
++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
++{
++ struct unix_sock *u, *u_other;
++ int rc;
++
++ u = unix_sk(sk);
++ u_other = unix_sk(other);
++ rc = 0;
++ spin_lock(&u_other->peer_wait.lock);
++
++ if (!u->peer_wake.private) {
++ u->peer_wake.private = other;
++ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
++
++ rc = 1;
++ }
++
++ spin_unlock(&u_other->peer_wait.lock);
++ return rc;
++}
++
++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
++ struct sock *other)
++{
++ struct unix_sock *u, *u_other;
++
++ u = unix_sk(sk);
++ u_other = unix_sk(other);
++ spin_lock(&u_other->peer_wait.lock);
++
++ if (u->peer_wake.private == other) {
++ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
++ u->peer_wake.private = NULL;
++ }
++
++ spin_unlock(&u_other->peer_wait.lock);
++}
++
++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
++ struct sock *other)
++{
++ unix_dgram_peer_wake_disconnect(sk, other);
++ wake_up_interruptible_poll(sk_sleep(sk),
++ POLLOUT |
++ POLLWRNORM |
++ POLLWRBAND);
++}
++
++/* preconditions:
++ * - unix_peer(sk) == other
++ * - association is stable
++ */
++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
++{
++ int connected;
++
++ connected = unix_dgram_peer_wake_connect(sk, other);
++
++ if (unix_recvq_full(other))
++ return 1;
++
++ if (connected)
++ unix_dgram_peer_wake_disconnect(sk, other);
++
++ return 0;
++}
++
+ static inline int unix_writable(struct sock *sk)
+ {
+ return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+@@ -430,6 +542,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ skpair->sk_state_change(skpair);
+ sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
+ }
++
++ unix_dgram_peer_wake_disconnect(sk, skpair);
+ sock_put(skpair); /* It may now die */
+ unix_peer(sk) = NULL;
+ }
+@@ -440,6 +554,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ if (state == TCP_LISTEN)
+ unix_release_sock(skb->sk, 1);
+ /* passed fds are erased in the kfree_skb hook */
++ UNIXCB(skb).consumed = skb->len;
+ kfree_skb(skb);
+ }
+
+@@ -664,6 +779,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
+ INIT_LIST_HEAD(&u->link);
+ mutex_init(&u->readlock); /* single task reading lock */
+ init_waitqueue_head(&u->peer_wait);
++ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
+ unix_insert_socket(unix_sockets_unbound(sk), sk);
+ out:
+ if (sk == NULL)
+@@ -1031,6 +1147,8 @@ restart:
+ if (unix_peer(sk)) {
+ struct sock *old_peer = unix_peer(sk);
+ unix_peer(sk) = other;
++ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
++
+ unix_state_double_unlock(sk, other);
+
+ if (other != old_peer)
+@@ -1432,6 +1550,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
+ return err;
+ }
+
++static bool unix_passcred_enabled(const struct socket *sock,
++ const struct sock *other)
++{
++ return test_bit(SOCK_PASSCRED, &sock->flags) ||
++ !other->sk_socket ||
++ test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
++}
++
+ /*
+ * Some apps rely on write() giving SCM_CREDENTIALS
+ * We include credentials if source or destination socket
+@@ -1442,14 +1568,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
+ {
+ if (UNIXCB(skb).pid)
+ return;
+- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+- !other->sk_socket ||
+- test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
++ if (unix_passcred_enabled(sock, other)) {
+ UNIXCB(skb).pid = get_pid(task_tgid(current));
+ current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
+ }
+ }
+
++static int maybe_init_creds(struct scm_cookie *scm,
++ struct socket *socket,
++ const struct sock *other)
++{
++ int err;
++ struct msghdr msg = { .msg_controllen = 0 };
++
++ err = scm_send(socket, &msg, scm, false);
++ if (err)
++ return err;
++
++ if (unix_passcred_enabled(socket, other)) {
++ scm->pid = get_pid(task_tgid(current));
++ current_uid_gid(&scm->creds.uid, &scm->creds.gid);
++ }
++ return err;
++}
++
++static bool unix_skb_scm_eq(struct sk_buff *skb,
++ struct scm_cookie *scm)
++{
++ const struct unix_skb_parms *u = &UNIXCB(skb);
++
++ return u->pid == scm->pid &&
++ uid_eq(u->uid, scm->creds.uid) &&
++ gid_eq(u->gid, scm->creds.gid) &&
++ unix_secdata_eq(scm, skb);
++}
++
+ /*
+ * Send AF_UNIX data.
+ */
+@@ -1470,6 +1623,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie scm;
+ int max_level;
+ int data_len = 0;
++ int sk_locked;
+
+ wait_for_unix_gc();
+ err = scm_send(sock, msg, &scm, false);
+@@ -1548,12 +1702,14 @@ restart:
+ goto out_free;
+ }
+
++ sk_locked = 0;
+ unix_state_lock(other);
++restart_locked:
+ err = -EPERM;
+ if (!unix_may_send(sk, other))
+ goto out_unlock;
+
+- if (sock_flag(other, SOCK_DEAD)) {
++ if (unlikely(sock_flag(other, SOCK_DEAD))) {
+ /*
+ * Check with 1003.1g - what should
+ * datagram error
+@@ -1561,10 +1717,14 @@ restart:
+ unix_state_unlock(other);
+ sock_put(other);
+
++ if (!sk_locked)
++ unix_state_lock(sk);
++
+ err = 0;
+- unix_state_lock(sk);
+ if (unix_peer(sk) == other) {
+ unix_peer(sk) = NULL;
++ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
++
+ unix_state_unlock(sk);
+
+ unix_dgram_disconnected(sk, other);
+@@ -1590,21 +1750,38 @@ restart:
+ goto out_unlock;
+ }
+
+- if (unix_peer(other) != sk && unix_recvq_full(other)) {
+- if (!timeo) {
+- err = -EAGAIN;
+- goto out_unlock;
++ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++ if (timeo) {
++ timeo = unix_wait_for_peer(other, timeo);
++
++ err = sock_intr_errno(timeo);
++ if (signal_pending(current))
++ goto out_free;
++
++ goto restart;
+ }
+
+- timeo = unix_wait_for_peer(other, timeo);
++ if (!sk_locked) {
++ unix_state_unlock(other);
++ unix_state_double_lock(sk, other);
++ }
+
+- err = sock_intr_errno(timeo);
+- if (signal_pending(current))
+- goto out_free;
++ if (unix_peer(sk) != other ||
++ unix_dgram_peer_wake_me(sk, other)) {
++ err = -EAGAIN;
++ sk_locked = 1;
++ goto out_unlock;
++ }
+
+- goto restart;
++ if (!sk_locked) {
++ sk_locked = 1;
++ goto restart_locked;
++ }
+ }
+
++ if (unlikely(sk_locked))
++ unix_state_unlock(sk);
++
+ if (sock_flag(other, SOCK_RCVTSTAMP))
+ __net_timestamp(skb);
+ maybe_add_creds(skb, sock, other);
+@@ -1618,6 +1795,8 @@ restart:
+ return len;
+
+ out_unlock:
++ if (sk_locked)
++ unix_state_unlock(sk);
+ unix_state_unlock(other);
+ out_free:
+ kfree_skb(skb);
+@@ -1739,8 +1918,10 @@ out_err:
+ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+ int offset, size_t size, int flags)
+ {
+- int err = 0;
+- bool send_sigpipe = true;
++ int err;
++ bool send_sigpipe = false;
++ bool init_scm = true;
++ struct scm_cookie scm;
+ struct sock *other, *sk = socket->sk;
+ struct sk_buff *skb, *newskb = NULL, *tail = NULL;
+
+@@ -1758,7 +1939,7 @@ alloc_skb:
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+ &err, 0);
+ if (!newskb)
+- return err;
++ goto err;
+ }
+
+ /* we must acquire readlock as we modify already present
+@@ -1767,12 +1948,12 @@ alloc_skb:
+ err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+ if (err) {
+ err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+- send_sigpipe = false;
+ goto err;
+ }
+
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ err = -EPIPE;
++ send_sigpipe = true;
+ goto err_unlock;
+ }
+
+@@ -1781,23 +1962,34 @@ alloc_skb:
+ if (sock_flag(other, SOCK_DEAD) ||
+ other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -EPIPE;
++ send_sigpipe = true;
+ goto err_state_unlock;
+ }
+
++ if (init_scm) {
++ err = maybe_init_creds(&scm, socket, other);
++ if (err)
++ goto err_state_unlock;
++ init_scm = false;
++ }
++
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+- } else if (!skb) {
+- if (newskb)
++ } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
++ if (newskb) {
+ skb = newskb;
+- else
++ } else {
++ tail = skb;
+ goto alloc_skb;
++ }
+ } else if (newskb) {
+ /* this is fast path, we don't necessarily need to
+ * call to kfree_skb even though with newskb == NULL
+ * this - does no harm
+ */
+ consume_skb(newskb);
++ newskb = NULL;
+ }
+
+ if (skb_append_pagefrags(skb, page, offset, size)) {
+@@ -1810,14 +2002,20 @@ alloc_skb:
+ skb->truesize += size;
+ atomic_add(size, &sk->sk_wmem_alloc);
+
+- if (newskb)
++ if (newskb) {
++ err = unix_scm_to_skb(&scm, skb, false);
++ if (err)
++ goto err_state_unlock;
++ spin_lock(&other->sk_receive_queue.lock);
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
++ spin_unlock(&other->sk_receive_queue.lock);
++ }
+
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->readlock);
+
+ other->sk_data_ready(other);
+-
++ scm_destroy(&scm);
+ return size;
+
+ err_state_unlock:
+@@ -1828,6 +2026,8 @@ err:
+ kfree_skb(newskb);
+ if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
++ if (!init_scm)
++ scm_destroy(&scm);
+ return err;
+ }
+
+@@ -2071,6 +2271,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
+
+ do {
+ int chunk;
++ bool drop_skb;
+ struct sk_buff *skb, *last;
+
+ unix_state_lock(sk);
+@@ -2130,10 +2331,7 @@ unlock:
+
+ if (check_creds) {
+ /* Never glue messages from different writers */
+- if ((UNIXCB(skb).pid != scm.pid) ||
+- !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
+- !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
+- !unix_secdata_eq(&scm, skb))
++ if (!unix_skb_scm_eq(skb, &scm))
+ break;
+ } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ /* Copy credentials */
+@@ -2151,7 +2349,11 @@ unlock:
+ }
+
+ chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
++ skb_get(skb);
+ chunk = state->recv_actor(skb, skip, chunk, state);
++ drop_skb = !unix_skb_len(skb);
++ /* skb is only safe to use if !drop_skb */
++ consume_skb(skb);
+ if (chunk < 0) {
+ if (copied == 0)
+ copied = -EFAULT;
+@@ -2160,6 +2362,18 @@ unlock:
+ copied += chunk;
+ size -= chunk;
+
++ if (drop_skb) {
++ /* the skb was touched by a concurrent reader;
++ * we should not expect anything from this skb
++ * anymore and assume it invalid - we can be
++ * sure it was dropped from the socket queue
++ *
++ * let's report a short read
++ */
++ err = 0;
++ break;
++ }
++
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ UNIXCB(skb).consumed += chunk;
+@@ -2453,14 +2667,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+ return mask;
+
+ writable = unix_writable(sk);
+- other = unix_peer_get(sk);
+- if (other) {
+- if (unix_peer(other) != sk) {
+- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+- if (unix_recvq_full(other))
+- writable = 0;
+- }
+- sock_put(other);
++ if (writable) {
++ unix_state_lock(sk);
++
++ other = unix_peer(sk);
++ if (other && unix_peer(other) != sk &&
++ unix_recvq_full(other) &&
++ unix_dgram_peer_wake_me(sk, other))
++ writable = 0;
++
++ unix_state_unlock(sk);
+ }
+
+ if (writable)
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index edfc1b8..656ce39 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -25,7 +25,7 @@ config SND_ALS300
+ select SND_PCM
+ select SND_AC97_CODEC
+ select SND_OPL3_LIB
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
+
+@@ -50,7 +50,7 @@ config SND_ALI5451
+ tristate "ALi M5451 PCI Audio Controller"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for the integrated AC97 sound
+ device on motherboards using the ALi M5451 Audio Controller
+@@ -155,7 +155,7 @@ config SND_AZT3328
+ select SND_PCM
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for Aztech AZF3328 (PCI168)
+ soundcards.
+@@ -463,7 +463,7 @@ config SND_EMU10K1
+ select SND_HWDEP
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y to include support for Sound Blaster PCI 512, Live!,
+ Audigy and E-mu APS (partially supported) soundcards.
+@@ -479,7 +479,7 @@ config SND_EMU10K1X
+ tristate "Emu10k1X (Dell OEM Version)"
+ select SND_AC97_CODEC
+ select SND_RAWMIDI
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for the Dell OEM version of the
+ Sound Blaster Live!.
+@@ -513,7 +513,7 @@ config SND_ES1938
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Solo-1
+ (ES1938, ES1946, ES1969) chips.
+@@ -525,7 +525,7 @@ config SND_ES1968
+ tristate "ESS ES1968/1978 (Maestro-1/2/2E)"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro
+ 1/2/2E chips.
+@@ -612,7 +612,7 @@ config SND_ICE1712
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+ select BITREVERSE
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the
+ ICE1712 (Envy24) chip.
+@@ -700,7 +700,7 @@ config SND_LX6464ES
+ config SND_MAESTRO3
+ tristate "ESS Allegro/Maestro3"
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro 3
+ (Allegro) chips.
+@@ -806,7 +806,7 @@ config SND_SIS7019
+ tristate "SiS 7019 Audio Accelerator"
+ depends on X86_32
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for the SiS 7019 Audio Accelerator.
+
+@@ -818,7 +818,7 @@ config SND_SONICVIBES
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the S3
+ SonicVibes chip.
+@@ -830,7 +830,7 @@ config SND_TRIDENT
+ tristate "Trident 4D-Wave DX/NX; SiS 7018"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+- select ZONE_DMA
++ depends on ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on Trident
+ 4D-Wave DX/NX or SiS 7018 chips.
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index acbfbe08..f22f5c4 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -50,8 +50,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+ #define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
+ #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
+ #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
++#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
+ #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
+- || is_skylake(codec))
++ || is_skylake(codec) || is_broxton(codec))
+
+ #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
+ #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
+diff --git a/tools/net/Makefile b/tools/net/Makefile
+index ee577ea..ddf8880 100644
+--- a/tools/net/Makefile
++++ b/tools/net/Makefile
+@@ -4,6 +4,9 @@ CC = gcc
+ LEX = flex
+ YACC = bison
+
++CFLAGS += -Wall -O2
++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
++
+ %.yacc.c: %.y
+ $(YACC) -o $@ -d $<
+
+@@ -12,15 +15,13 @@ YACC = bison
+
+ all : bpf_jit_disasm bpf_dbg bpf_asm
+
+-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
++bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
+ bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
+ bpf_jit_disasm : bpf_jit_disasm.o
+
+-bpf_dbg : CFLAGS = -Wall -O2
+ bpf_dbg : LDLIBS = -lreadline
+ bpf_dbg : bpf_dbg.o
+
+-bpf_asm : CFLAGS = -Wall -O2 -I.
+ bpf_asm : LDLIBS =
+ bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
+ bpf_exp.lex.o : bpf_exp.yacc.c
diff --git a/4.2.7/4420_grsecurity-3.1-4.2.7-201512092320.patch b/4.3.3/4420_grsecurity-3.1-4.3.3-201512151908.patch
index 0e128e6..38b71b4 100644
--- a/4.2.7/4420_grsecurity-3.1-4.2.7-201512092320.patch
+++ b/4.3.3/4420_grsecurity-3.1-4.3.3-201512151908.patch
@@ -313,10 +313,10 @@ index 13f888a..250729b 100644
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 1d6f045..2714987 100644
+index 22a4b68..8c70743 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1244,6 +1244,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1246,6 +1246,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
Default: 1024
@@ -330,7 +330,7 @@ index 1d6f045..2714987 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -2364,6 +2371,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2379,6 +2386,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
@@ -341,7 +341,7 @@ index 1d6f045..2714987 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2662,6 +2673,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2677,6 +2688,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -406,7 +406,7 @@ index 6fccb69..60c7c7a 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index f5014ea..5cc28a1 100644
+index 2070d16..0bc2be1 100644
--- a/Makefile
+++ b/Makefile
@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -501,12 +501,12 @@ index f5014ea..5cc28a1 100644
+else
+gcc-plugins:
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
-+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
+ $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)"
+else
+ $(CONFIG_SHELL) -x $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)"
+endif
++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
+else
+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
+endif
@@ -517,7 +517,7 @@ index f5014ea..5cc28a1 100644
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
-@@ -714,7 +795,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+@@ -707,7 +788,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
@@ -526,16 +526,16 @@ index f5014ea..5cc28a1 100644
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -886,7 +967,7 @@ export mod_sign_cmd
+@@ -878,7 +959,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
--core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+-core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -936,6 +1017,8 @@ endif
+@@ -928,6 +1009,8 @@ endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -544,7 +544,7 @@ index f5014ea..5cc28a1 100644
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -945,7 +1028,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+@@ -937,7 +1020,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -553,7 +553,7 @@ index f5014ea..5cc28a1 100644
$(Q)$(MAKE) $(build)=$@
define filechk_kernel.release
-@@ -988,10 +1071,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+@@ -980,10 +1063,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -567,7 +567,7 @@ index f5014ea..5cc28a1 100644
prepare: prepare0
# Generate some files
-@@ -1099,6 +1185,8 @@ all: modules
+@@ -1091,6 +1177,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -576,7 +576,7 @@ index f5014ea..5cc28a1 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1114,7 +1202,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1106,7 +1194,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -585,10 +585,10 @@ index f5014ea..5cc28a1 100644
# Target to install modules
PHONY += modules_install
-@@ -1180,7 +1268,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1172,7 +1260,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
- signing_key.priv signing_key.x509 x509.genkey \
- extra_certificates signing_key.x509.keyid \
+ signing_key.pem signing_key.priv signing_key.x509 \
+ x509.genkey extra_certificates signing_key.x509.keyid \
- signing_key.x509.signer vmlinux-gdb.py
+ signing_key.x509.signer vmlinux-gdb.py \
+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
@@ -597,7 +597,7 @@ index f5014ea..5cc28a1 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1219,7 +1310,7 @@ distclean: mrproper
+@@ -1211,7 +1302,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -606,7 +606,7 @@ index f5014ea..5cc28a1 100644
-type f -print | xargs rm -f
-@@ -1385,6 +1476,8 @@ PHONY += $(module-dirs) modules
+@@ -1377,6 +1468,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -615,7 +615,7 @@ index f5014ea..5cc28a1 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1525,17 +1618,21 @@ else
+@@ -1518,17 +1611,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -641,7 +641,7 @@ index f5014ea..5cc28a1 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1547,11 +1644,15 @@ endif
+@@ -1540,11 +1637,15 @@ endif
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
Documentation/: headers_install
@@ -660,10 +660,10 @@ index f5014ea..5cc28a1 100644
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir) $(@:.ko=.o)
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
-index 8f8eafb..3405f46 100644
+index e8c9560..b585f83 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
-@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+@@ -251,4 +251,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define atomic_dec(v) atomic_sub(1,(v))
#define atomic64_dec(v) atomic64_sub(1,(v))
@@ -775,10 +775,10 @@ index 2fd00b7..cfd5069 100644
for (i = 0; i < n; i++) {
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index 36dc91a..6769cb0 100644
+index 6cc0816..3dd424d 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
static unsigned long
@@ -792,7 +792,7 @@ index 36dc91a..6769cb0 100644
info.flags = 0;
info.length = len;
-@@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
info.high_limit = limit;
info.align_mask = 0;
info.align_offset = 0;
@@ -800,7 +800,7 @@ index 36dc91a..6769cb0 100644
return vm_unmapped_area(&info);
}
-@@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -990,10 +990,10 @@ index 4a905bd..0a4da53 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
-index bd4670d..920c97a 100644
+index 78c0621..94cd626 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
-@@ -485,6 +485,7 @@ config ARC_DBG_TLB_MISS_COUNT
+@@ -487,6 +487,7 @@ config ARC_DBG_TLB_MISS_COUNT
bool "Profile TLB Misses"
default n
select DEBUG_FS
@@ -1002,10 +1002,10 @@ index bd4670d..920c97a 100644
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index ede2526..9e12300 100644
+index 639411f..82e6320 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1770,7 +1770,7 @@ config ALIGNMENT_TRAP
+@@ -1785,7 +1785,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -1014,16 +1014,16 @@ index ede2526..9e12300 100644
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
-@@ -2006,6 +2006,7 @@ config KEXEC
- bool "Kexec system call (EXPERIMENTAL)"
+@@ -2022,6 +2022,7 @@ config KEXEC
depends on (!SMP || PM_SLEEP_SMP)
depends on !CPU_V7M
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
-index a2e16f9..b26e911 100644
+index 0cfd7f947..63ed4c0 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -7,6 +7,7 @@ config ARM_PTDUMP
@@ -1035,7 +1035,7 @@ index a2e16f9..b26e911 100644
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
-index e22c119..abe7041 100644
+index fe3ef39..9406984 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -18,17 +18,41 @@
@@ -1080,7 +1080,7 @@ index e22c119..abe7041 100644
#if __LINUX_ARM_ARCH__ >= 6
-@@ -38,26 +62,50 @@
+@@ -38,38 +62,64 @@
* to ensure that the update happens.
*/
@@ -1128,17 +1128,16 @@ index e22c119..abe7041 100644
} \
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
--static inline int atomic_##op##_return(int i, atomic_t *v) \
+-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
-+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
{ \
unsigned long tmp; \
int result; \
-@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
- smp_mb(); \
+ \
prefetchw(&v->counter); \
\
- __asm__ __volatile__("@ atomic_" #op "_return\n" \
@@ -1154,17 +1153,17 @@ index e22c119..abe7041 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "Ir" (i) \
: "cc"); \
-@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
- return result; \
- }
+@@ -80,6 +130,9 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
+
- static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
-@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -113,12 +166,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
__asm__ __volatile__ ("@ atomic_add_unless\n"
"1: ldrex %0, [%4]\n"
" teq %0, %5\n"
@@ -1192,7 +1191,7 @@ index e22c119..abe7041 100644
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -129,14 +194,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval;
}
@@ -1231,7 +1230,7 @@ index e22c119..abe7041 100644
{ \
unsigned long flags; \
\
-@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
+@@ -145,8 +232,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
raw_local_irq_restore(flags); \
} \
@@ -1245,7 +1244,7 @@ index e22c119..abe7041 100644
{ \
unsigned long flags; \
int val; \
-@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+@@ -159,6 +249,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return val; \
}
@@ -1255,7 +1254,7 @@ index e22c119..abe7041 100644
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
-@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -173,6 +266,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
@@ -1267,7 +1266,7 @@ index e22c119..abe7041 100644
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
-@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
+@@ -201,16 +299,38 @@ ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -1278,7 +1277,7 @@ index e22c119..abe7041 100644
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
+{
-+ return xchg(&v->counter, new);
++ return xchg_relaxed(&v->counter, new);
+}
#define atomic_inc(v) atomic_add(1, v)
@@ -1295,18 +1294,18 @@ index e22c119..abe7041 100644
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
-+ return atomic_add_return_unchecked(1, v) == 0;
++ return atomic_add_return_unchecked_relaxed(1, v) == 0;
+}
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
-+ return atomic_add_return_unchecked(1, v);
++ return atomic_add_return_unchecked_relaxed(1, v);
+}
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -216,6 +336,14 @@ typedef struct {
+@@ -221,6 +341,14 @@ typedef struct {
long long counter;
} atomic64_t;
@@ -1321,7 +1320,7 @@ index e22c119..abe7041 100644
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -237,6 +365,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1341,7 +1340,7 @@ index e22c119..abe7041 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
__asm__ __volatile__("@ atomic64_set\n"
-@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -245,6 +386,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
);
}
@@ -1357,7 +1356,7 @@ index e22c119..abe7041 100644
#else
static inline long long atomic64_read(const atomic64_t *v)
{
-@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+@@ -259,6 +409,19 @@ static inline long long atomic64_read(const atomic64_t *v)
return result;
}
@@ -1377,7 +1376,7 @@ index e22c119..abe7041 100644
static inline void atomic64_set(atomic64_t *v, long long i)
{
long long tmp;
-@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+@@ -273,43 +436,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "r" (&v->counter), "r" (i)
: "cc");
}
@@ -1432,17 +1431,17 @@ index e22c119..abe7041 100644
} \
-#define ATOMIC64_OP_RETURN(op, op1, op2) \
--static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
-+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
+ static inline long long \
+-atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
++atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
{ \
long long result; \
unsigned long tmp; \
-@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
- smp_mb(); \
+ \
prefetchw(&v->counter); \
\
- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
@@ -1459,7 +1458,7 @@ index e22c119..abe7041 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
: "cc"); \
-@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+@@ -317,6 +510,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
return result; \
}
@@ -1469,7 +1468,7 @@ index e22c119..abe7041 100644
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_OP_RETURN(op, op1, op2)
-@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
+@@ -336,7 +532,12 @@ ATOMIC64_OP(xor, eor, eor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
@@ -1480,13 +1479,14 @@ index e22c119..abe7041 100644
+#undef __OVERFLOW_POST_RETURN
+#undef __OVERFLOW_POST
- static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- long long new)
-@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
- return oldval;
+ static inline long long
+ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -362,6 +563,32 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
}
+ #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
++static inline long long
++atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old,
+ long long new)
+{
+ long long oldval;
@@ -1511,10 +1511,10 @@ index e22c119..abe7041 100644
+ return oldval;
+}
+
- static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
{
long long result;
-@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+@@ -385,21 +612,35 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long result;
@@ -1556,7 +1556,7 @@ index e22c119..abe7041 100644
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter)
: "cc");
-@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -423,13 +664,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
" teq %0, %5\n"
" teqeq %H0, %H5\n"
" moveq %1, #0\n"
@@ -1585,7 +1585,7 @@ index e22c119..abe7041 100644
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (u), "r" (a)
: "cc");
-@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+@@ -442,10 +695,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
@@ -1599,19 +1599,6 @@ index e22c119..abe7041 100644
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
-index 6c2327e..85beac4 100644
---- a/arch/arm/include/asm/barrier.h
-+++ b/arch/arm/include/asm/barrier.h
-@@ -67,7 +67,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 75fe66b..ba3dee4 100644
--- a/arch/arm/include/asm/cache.h
@@ -1636,7 +1623,7 @@ index 75fe66b..ba3dee4 100644
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index 4812cda..9da8116 100644
+index d5525bf..e55725d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
@@ -1674,10 +1661,10 @@ index 5233151..87a71fa 100644
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
-index 1692a05..1835802 100644
+index 916a274..f988f55 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
-@@ -107,6 +107,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+@@ -105,6 +105,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -1702,17 +1689,21 @@ index 0f84249..8e83c55 100644
struct of_cpuidle_method {
const char *method;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
-index 6ddbe44..b5e38b1a 100644
+index fc8ba16..805a183 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
-@@ -48,18 +48,37 @@
+@@ -42,7 +42,7 @@
+ #define DOMAIN_USER 1
+ #define DOMAIN_IO 0
+ #endif
+-#define DOMAIN_VECTORS 3
++//#define DOMAIN_VECTORS 3
+
+ /*
* Domain types
- */
- #define DOMAIN_NOACCESS 0
--#define DOMAIN_CLIENT 1
+@@ -51,8 +51,26 @@
+ #define DOMAIN_CLIENT 1
#ifdef CONFIG_CPU_USE_DOMAINS
-+#define DOMAIN_USERCLIENT 1
-+#define DOMAIN_KERNELCLIENT 1
#define DOMAIN_MANAGER 3
+#define DOMAIN_VECTORS DOMAIN_USER
#else
@@ -1736,32 +1727,16 @@ index 6ddbe44..b5e38b1a 100644
+
#endif
- #define domain_val(dom,type) ((type) << (2*(dom)))
-
- #ifndef __ASSEMBLY__
-
--#ifdef CONFIG_CPU_USE_DOMAINS
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
- static inline void set_domain(unsigned val)
- {
- asm volatile(
-@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
- isb();
- }
-
--#define modify_domain(dom,type) \
-- do { \
-- struct thread_info *thread = current_thread_info(); \
-- unsigned int domain = thread->cpu_domain; \
-- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
-- thread->cpu_domain = domain | domain_val(dom, type); \
-- set_domain(thread->cpu_domain); \
-- } while (0)
--
-+extern void modify_domain(unsigned int dom, unsigned int type);
+ #define domain_mask(dom) ((3) << (2 * (dom)))
+@@ -62,7 +80,7 @@
+ #define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#else
- static inline void set_domain(unsigned val) { }
- static inline void modify_domain(unsigned dom, unsigned type) { }
+ #define DACR_INIT \
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ff..f60b47b 100644
--- a/arch/arm/include/asm/elf.h
@@ -1797,10 +1772,10 @@ index de53547..52b9a28 100644
(unsigned long)(dest_buf) + (size)); \
\
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index 5eed828..365e018 100644
+index 6795368..b784325 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
-@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+@@ -52,6 +52,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -1809,8 +1784,8 @@ index 5eed828..365e018 100644
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
-@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- : "cc", "memory");
+@@ -71,6 +73,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ uaccess_restore(__ua_flags);
smp_mb();
+ pax_close_userland();
@@ -1818,25 +1793,25 @@ index 5eed828..365e018 100644
*uval = val;
return ret;
}
-@@ -94,6 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+@@ -107,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
preempt_disable();
+ pax_open_userland();
+
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
-@@ -104,6 +110,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+@@ -119,6 +125,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
+ uaccess_restore(__ua_flags);
+ pax_close_userland();
+
*uval = val;
preempt_enable();
-@@ -131,6 +139,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+@@ -146,6 +154,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
preempt_disable();
#endif
pagefault_disable();
@@ -1844,7 +1819,7 @@ index 5eed828..365e018 100644
switch (op) {
case FUTEX_OP_SET:
-@@ -152,6 +161,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+@@ -167,6 +176,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
@@ -1909,7 +1884,7 @@ index f98c7f3..e5c626d 100644
MT_MEMORY_DMA_READY,
};
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
-index 563b92f..689d58e 100644
+index c2bf24f..69e437c 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -39,7 +39,7 @@ struct outer_cache_fns {
@@ -1995,10 +1970,10 @@ index 19cfab5..3f5c7e9 100644
pmdval_t prot)
{
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
-index 5e68278..1869bae 100644
+index d0131ee..23a0939 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
-@@ -27,7 +27,7 @@
+@@ -28,7 +28,7 @@
/*
* - section
*/
@@ -2007,7 +1982,7 @@ index 5e68278..1869bae 100644
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
-@@ -39,6 +39,7 @@
+@@ -40,6 +40,7 @@
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
#define PMD_SECT_AF (_AT(pmdval_t, 0))
@@ -2015,7 +1990,7 @@ index 5e68278..1869bae 100644
#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
-@@ -68,6 +69,7 @@
+@@ -69,6 +70,7 @@
* - extended small page/tiny page
*/
#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
@@ -2155,21 +2130,8 @@ index f403541..b10df68 100644
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
-diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
-index c25ef3e..735f14b 100644
---- a/arch/arm/include/asm/psci.h
-+++ b/arch/arm/include/asm/psci.h
-@@ -32,7 +32,7 @@ struct psci_operations {
- int (*affinity_info)(unsigned long target_affinity,
- unsigned long lowest_affinity_level);
- int (*migrate_info_type)(void);
--};
-+} __no_const;
-
- extern struct psci_operations psci_ops;
- extern struct smp_operations psci_smp_ops;
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
-index 2f3ac1b..67182ae0 100644
+index ef35665..d69146d 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -108,7 +108,7 @@ struct smp_operations {
@@ -2182,36 +2144,21 @@ index 2f3ac1b..67182ae0 100644
struct of_cpu_method {
const char *method;
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index bd32ede..bd90a0b 100644
+index 776757d..5a598df 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
-@@ -74,9 +74,9 @@ struct thread_info {
- .flags = 0, \
- .preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
-- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
-+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
-+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
-+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
- }
-
- #define init_thread_info (init_thread_union.thread_info)
-@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- #define TIF_SYSCALL_AUDIT 9
- #define TIF_SYSCALL_TRACEPOINT 10
- #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
--#define TIF_NOHZ 12 /* in adaptive nohz mode */
+@@ -143,6 +143,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+/* within 8 bits of TIF_SYSCALL_TRACE
+ * to meet flexible second operand requirements
+ */
-+#define TIF_GRSEC_SETXID 12
-+#define TIF_NOHZ 13 /* in adaptive nohz mode */
++#define TIF_GRSEC_SETXID 8
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
- #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
- #define TIF_RESTORE_SIGMASK 20
-@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -158,10 +162,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
@@ -2247,7 +2194,7 @@ index 5f833f7..76e6644 100644
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 74b17d0..7e6da4b 100644
+index 8cc85a4..5f24fe2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -18,6 +18,7 @@
@@ -2258,7 +2205,7 @@ index 74b17d0..7e6da4b 100644
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h>
-@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
+@@ -99,11 +100,38 @@ extern int __put_user_bad(void);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
@@ -2298,7 +2245,7 @@ index 74b17d0..7e6da4b 100644
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
-@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
+@@ -229,8 +257,12 @@ extern int __get_user_64t_4(void *);
#define get_user(x, p) \
({ \
@@ -2312,7 +2259,7 @@ index 74b17d0..7e6da4b 100644
})
extern int __put_user_1(void *, unsigned int);
-@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
+@@ -277,8 +309,12 @@ extern int __put_user_8(void *, unsigned long long);
#define put_user(x, p) \
({ \
@@ -2326,7 +2273,7 @@ index 74b17d0..7e6da4b 100644
})
#else /* CONFIG_MMU */
-@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
+@@ -302,6 +338,7 @@ static inline void set_fs(mm_segment_t fs)
#endif /* CONFIG_MMU */
@@ -2334,7 +2281,7 @@ index 74b17d0..7e6da4b 100644
#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
#define user_addr_max() \
-@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
+@@ -319,13 +356,17 @@ static inline void set_fs(mm_segment_t fs)
#define __get_user(x, ptr) \
({ \
long __gu_err = 0; \
@@ -2352,7 +2299,7 @@ index 74b17d0..7e6da4b 100644
(void) 0; \
})
-@@ -368,13 +409,17 @@ do { \
+@@ -392,13 +433,17 @@ do { \
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
@@ -2370,57 +2317,57 @@ index 74b17d0..7e6da4b 100644
(void) 0; \
})
-@@ -474,11 +519,44 @@ do { \
+@@ -490,35 +535,41 @@ do { \
#ifdef CONFIG_MMU
--extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
--extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
--extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
--extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
--extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
-+extern unsigned long __must_check __size_overflow(3) ___copy_from_user(void *to, const void __user *from, unsigned long n);
-+extern unsigned long __must_check __size_overflow(3) ___copy_to_user(void __user *to, const void *from, unsigned long n);
-+
-+static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n)
-+{
-+ unsigned long ret;
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+-static inline unsigned long __must_check
++static inline unsigned long __must_check __size_overflow(3)
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- unsigned int __ua_flags = uaccess_save_and_enable();
++ unsigned int __ua_flags;
+
+ check_object_size(to, n, false);
-+ pax_open_userland();
-+ ret = ___copy_from_user(to, from, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
-+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
-+{
-+ unsigned long ret;
++ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+ static inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- unsigned int __ua_flags = uaccess_save_and_enable();
++ unsigned int __ua_flags;
+
+ check_object_size(from, n, true);
-+ pax_open_userland();
-+ ret = ___copy_to_user(to, from, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
-+extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-+extern unsigned long __must_check __size_overflow(2) ___clear_user(void __user *addr, unsigned long n);
-+extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n);
-+
-+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
-+{
-+ unsigned long ret;
-+ pax_open_userland();
-+ ret = ___clear_user(addr, n);
-+ pax_close_userland();
-+ return ret;
-+}
-+
- #else
- #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
- #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
-@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
++ __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ arm_clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ __clear_user_std(void __user *addr, unsigned long n);
+
+ static inline unsigned long __must_check
+@@ -538,6 +589,9 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -2430,7 +2377,7 @@ index 74b17d0..7e6da4b 100644
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else /* security hole - plug it */
-@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+@@ -547,6 +601,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -2454,7 +2401,7 @@ index 5af0ed1..cea83883 100644
#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
-index 5e5a51a..b21eeef 100644
+index f89811f..1d110d1 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
@@ -2466,19 +2413,6 @@ index 5e5a51a..b21eeef 100644
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
-@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
- #ifdef CONFIG_MMU
- EXPORT_SYMBOL(copy_page);
-
--EXPORT_SYMBOL(__copy_from_user);
--EXPORT_SYMBOL(__copy_to_user);
--EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(___copy_from_user);
-+EXPORT_SYMBOL(___copy_to_user);
-+EXPORT_SYMBOL(___clear_user);
-
- EXPORT_SYMBOL(__get_user_1);
- EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 318da33..373689f 100644
--- a/arch/arm/kernel/cpuidle.c
@@ -2493,7 +2427,7 @@ index 318da33..373689f 100644
/**
* arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index cb4fb1e..dc7fcaf 100644
+index 3e1c26e..9ea61e6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -50,6 +50,87 @@
@@ -2600,19 +2534,17 @@ index cb4fb1e..dc7fcaf 100644
mov r1, #\reason
.endm
-@@ -152,7 +237,11 @@ ENDPROC(__und_invalid)
- .macro svc_entry, stack_hole=0, trace=1
+@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
+
+ pax_enter_kernel
+
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
-+
+ sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
- SPFIX( mov r0, sp )
-@@ -167,7 +256,12 @@ ENDPROC(__und_invalid)
+@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
@@ -2620,13 +2552,13 @@ index cb4fb1e..dc7fcaf 100644
+ @ offset sp by 8 as done in pax_enter_kernel
+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
+#else
- add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+#endif
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
-@@ -371,6 +465,9 @@ ENDPROC(__fiq_abt)
- .macro usr_entry, trace=1
+@@ -376,6 +469,9 @@ ENDPROC(__fiq_abt)
+ .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
+
@@ -2635,7 +2567,7 @@ index cb4fb1e..dc7fcaf 100644
sub sp, sp, #S_FRAME_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
-@@ -481,7 +578,9 @@ __und_usr:
+@@ -490,7 +586,9 @@ __und_usr:
tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
@@ -2644,8 +2576,8 @@ index cb4fb1e..dc7fcaf 100644
+ pax_close_userland
ARM_BE8(rev r0, r0) @ little endian instruction
- @ r0 = 32-bit ARM instruction which caused the exception
-@@ -515,11 +614,15 @@ __und_usr_thumb:
+ uaccess_disable ip
+@@ -526,11 +624,15 @@ __und_usr_thumb:
*/
.arch armv6t2
#endif
@@ -2654,14 +2586,14 @@ index cb4fb1e..dc7fcaf 100644
+ pax_close_userland
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16 @ 16bit undefined instruction
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
+ pax_open_userland
3: ldrht r0, [r2]
+ pax_close_userland
ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
-@@ -549,7 +652,8 @@ ENDPROC(__und_usr)
+@@ -561,7 +663,8 @@ ENDPROC(__und_usr)
*/
.pushsection .text.fixup, "ax"
.align 2
@@ -2671,16 +2603,16 @@ index cb4fb1e..dc7fcaf 100644
ret r9
.popsection
.pushsection __ex_table,"a"
-@@ -769,7 +873,7 @@ ENTRY(__switch_to)
+@@ -783,7 +886,7 @@ ENTRY(__switch_to)
THUMB( str lr, [ip], #4 )
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
-#ifdef CONFIG_CPU_USE_DOMAINS
+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
- #endif
- switch_tls r1, r4, r5, r3, r7
-@@ -778,7 +882,7 @@ ENTRY(__switch_to)
+@@ -794,7 +897,7 @@ ENTRY(__switch_to)
ldr r8, =__stack_chk_guard
ldr r7, [r7, #TSK_STACK_CANARY]
#endif
@@ -2690,7 +2622,7 @@ index cb4fb1e..dc7fcaf 100644
#endif
mov r5, r0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index b48dd4f..9f9a72f 100644
+index 30a7228..fc55cca 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -11,18 +11,46 @@
@@ -2709,28 +2641,28 @@ index b48dd4f..9f9a72f 100644
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ @ save regs
+ stmdb sp!, {r1, r2}
-+ @ read DACR from cpu_domain into r1
-+ mov r2, sp
-+ @ assume 8K pages, since we have to split the immediate in two
-+ bic r2, r2, #(0x1fc0)
-+ bic r2, r2, #(0x3f)
-+ ldr r1, [r2, #TI_CPU_DOMAIN]
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
+#ifdef CONFIG_PAX_KERNEXEC
-+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
-+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
+#endif
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ @ set current DOMAIN_USER to DOMAIN_UDEREF
-+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
-+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
-+#endif
-+ @ write r1 to current_thread_info()->cpu_domain
-+ str r1, [r2, #TI_CPU_DOMAIN]
-+ @ write r1 to DACR
-+ mcr p15, 0, r1, c3, c0, 0
-+ @ instruction sync
-+ instr_sync
++ @ set current DOMAIN_USER to DOMAIN_UDEREF
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
+ @ restore regs
+ ldmia sp!, {r1, r2}
+#endif
@@ -2741,11 +2673,11 @@ index b48dd4f..9f9a72f 100644
-
-
.align 5
+ #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*
- * This is the fast syscall return path. We do as little as
-@@ -174,6 +202,12 @@ ENTRY(vector_swi)
- USER( ldr scno, [lr, #-4] ) @ get SWI instruction
- #endif
+@@ -199,6 +227,12 @@ ENTRY(vector_swi)
+
+ uaccess_disable tbl
+ /*
+ * do this here to avoid a performance hit of wrapping the code above
@@ -2757,10 +2689,10 @@ index b48dd4f..9f9a72f 100644
#if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
-index 1a0045a..9b4f34d 100644
+index 0d22ad2..d776aa0 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
-@@ -196,6 +196,60 @@
+@@ -196,6 +196,59 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
@@ -2817,30 +2749,18 @@ index 1a0045a..9b4f34d 100644
+ ldmia sp!, {r0, r1}
+#endif
+ .endm
-+
- #ifndef CONFIG_THUMB2_KERNEL
+
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
-@@ -215,6 +269,9 @@
- blne trace_hardirqs_off
- #endif
+@@ -217,6 +270,8 @@
.endif
-+
+ uaccess_restore
+
+ pax_exit_kernel
+
+ #ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
msr spsr_cxsf, \rpsr
- #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
- @ We must avoid clrex due to Cortex-A15 erratum #830321
-@@ -291,6 +348,9 @@
- blne trace_hardirqs_off
- #endif
- .endif
-+
-+ pax_exit_kernel
-+
- ldr lr, [sp, #S_SP] @ top of the stack
- ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 059c3da..8e45cfc 100644
--- a/arch/arm/kernel/fiq.c
@@ -2856,19 +2776,6 @@ index 059c3da..8e45cfc 100644
if (!cache_is_vipt_nonaliasing())
flush_icache_range((unsigned long)base + offset, offset +
length);
-diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
-index 29e2991..7bc5757 100644
---- a/arch/arm/kernel/head.S
-+++ b/arch/arm/kernel/head.S
-@@ -467,7 +467,7 @@ __enable_mmu:
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
-+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
- mcr p15, 0, r5, c3, c0, 0 @ load domain access register
- mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
- #endif
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 097e2e2..3927085 100644
--- a/arch/arm/kernel/module-plts.c
@@ -2969,10 +2876,10 @@ index 69bda1a..755113a 100644
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index f192a2a..1a40523 100644
+index 7a7c4ce..bc91093 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
+@@ -98,8 +98,8 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT);
@@ -2983,7 +2890,7 @@ index f192a2a..1a40523 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -309,12 +309,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -2996,7 +2903,7 @@ index f192a2a..1a40523 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
+@@ -330,7 +324,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -3005,7 +2912,7 @@ index f192a2a..1a40523 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -359,91 +353,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return is_gate_vma(vma) ? "[vectors]" : NULL;
}
@@ -3099,19 +3006,6 @@ index f192a2a..1a40523 100644
+ return 0;
}
#endif
-diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
-index f90fdf4..24e8c84 100644
---- a/arch/arm/kernel/psci.c
-+++ b/arch/arm/kernel/psci.c
-@@ -26,7 +26,7 @@
- #include <asm/psci.h>
- #include <asm/system_misc.h>
-
--struct psci_operations psci_ops;
-+struct psci_operations psci_ops __read_only;
-
- static int (*invoke_psci_fn)(u32, u32, u32, u32);
- typedef int (*psci_initcall_t)(const struct device_node *);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ef9119f..31995a3 100644
--- a/arch/arm/kernel/ptrace.c
@@ -3149,10 +3043,10 @@ index 3826935..8ed63ed 100644
/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index 36c18b7..0d78292 100644
+index 20edd34..e18ac81 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
-@@ -108,21 +108,23 @@ EXPORT_SYMBOL(elf_hwcap);
+@@ -110,21 +110,23 @@ EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
@@ -3181,7 +3075,7 @@ index 36c18b7..0d78292 100644
EXPORT_SYMBOL(outer_cache);
#endif
-@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
+@@ -255,9 +257,13 @@ static int __get_cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
@@ -3198,7 +3092,7 @@ index 36c18b7..0d78292 100644
cpu_arch = CPU_ARCH_ARMv6;
else
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 586eef2..61aabd4 100644
+index 7b8f214..ece8e28 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -24,8 +24,6 @@
@@ -3210,7 +3104,7 @@ index 586eef2..61aabd4 100644
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
-@@ -390,8 +388,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+@@ -388,8 +386,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
* except when the MPU has protected the vectors
* page from PL0
*/
@@ -3220,7 +3114,7 @@ index 586eef2..61aabd4 100644
} else
#endif
{
-@@ -597,33 +594,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+@@ -601,33 +598,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
@@ -3255,10 +3149,10 @@ index 586eef2..61aabd4 100644
- return page;
-}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 3d6b782..8b3baeb 100644
+index 48185a7..426ae3a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -76,7 +76,7 @@ enum ipi_msg_type {
+@@ -78,7 +78,7 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
@@ -3291,7 +3185,7 @@ index b10e136..cb5edf9 100644
start, end);
itcm_present = true;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index d358226..bfd4019 100644
+index 969f9d9..8a96d0d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3322,19 +3216,6 @@ index d358226..bfd4019 100644
if (signr)
do_exit(signr);
}
-@@ -870,7 +875,11 @@ void __init early_trap_init(void *vectors_base)
- kuser_init(vectors_base);
-
- flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
-+
-+#ifndef CONFIG_PAX_MEMORY_UDEREF
-+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
-+#endif
-+
- #else /* ifndef CONFIG_CPU_V7M */
- /*
- * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8b60fde..8d986dd 100644
--- a/arch/arm/kernel/vmlinux.lds.S
@@ -3367,7 +3248,7 @@ index 8b60fde..8d986dd 100644
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index f9c341c..c9cead1 100644
+index 78b2869..9255093 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
@@ -3379,7 +3260,7 @@ index f9c341c..c9cead1 100644
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
+@@ -369,7 +369,7 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
@@ -3388,7 +3269,7 @@ index f9c341c..c9cead1 100644
}
/**
-@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -402,7 +402,7 @@ static void update_vttbr(struct kvm *kvm)
/* First user of a new VMID generation? */
if (unlikely(kvm_next_vmid == 0)) {
@@ -3397,7 +3278,7 @@ index f9c341c..c9cead1 100644
kvm_next_vmid = 1;
/*
-@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -419,7 +419,7 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
@@ -3406,63 +3287,6 @@ index f9c341c..c9cead1 100644
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
-diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
-index 1710fd7..ec3e014 100644
---- a/arch/arm/lib/clear_user.S
-+++ b/arch/arm/lib/clear_user.S
-@@ -12,14 +12,14 @@
-
- .text
-
--/* Prototype: int __clear_user(void *addr, size_t sz)
-+/* Prototype: int ___clear_user(void *addr, size_t sz)
- * Purpose : clear some user memory
- * Params : addr - user memory address to clear
- * : sz - number of bytes to clear
- * Returns : number of bytes NOT cleared
- */
- ENTRY(__clear_user_std)
--WEAK(__clear_user)
-+WEAK(___clear_user)
- stmfd sp!, {r1, lr}
- mov r2, #0
- cmp r1, #4
-@@ -44,7 +44,7 @@ WEAK(__clear_user)
- USER( strnebt r2, [r0])
- mov r0, #0
- ldmfd sp!, {r1, pc}
--ENDPROC(__clear_user)
-+ENDPROC(___clear_user)
- ENDPROC(__clear_user_std)
-
- .pushsection .text.fixup,"ax"
-diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
-index 7a235b9..73a0556 100644
---- a/arch/arm/lib/copy_from_user.S
-+++ b/arch/arm/lib/copy_from_user.S
-@@ -17,7 +17,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_from_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -89,11 +89,11 @@
-
- .text
-
--ENTRY(__copy_from_user)
-+ENTRY(___copy_from_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_from_user)
-+ENDPROC(___copy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ee2f67..d1cce76 100644
--- a/arch/arm/lib/copy_page.S
@@ -3475,38 +3299,11 @@ index 6ee2f67..d1cce76 100644
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
-diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
-index 9648b06..19c333c 100644
---- a/arch/arm/lib/copy_to_user.S
-+++ b/arch/arm/lib/copy_to_user.S
-@@ -17,7 +17,7 @@
- /*
- * Prototype:
- *
-- * size_t __copy_to_user(void *to, const void *from, size_t n)
-+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
- *
- * Purpose:
- *
-@@ -93,11 +93,11 @@
- .text
-
- ENTRY(__copy_to_user_std)
--WEAK(__copy_to_user)
-+WEAK(___copy_to_user)
-
- #include "copy_template.S"
-
--ENDPROC(__copy_to_user)
-+ENDPROC(___copy_to_user)
- ENDPROC(__copy_to_user_std)
-
- .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
-index 1d0957e..f708846 100644
+index 1712f13..a3165dc 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
-@@ -57,8 +57,8 @@
+@@ -71,8 +71,8 @@
* Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
*/
@@ -3531,7 +3328,7 @@ index 8044591..c9b2609 100644
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
-index 4b39af2..9ae747d 100644
+index d72b909..0521929 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -85,7 +85,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
@@ -3543,16 +3340,7 @@ index 4b39af2..9ae747d 100644
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
{
int atomic;
-@@ -136,7 +136,7 @@ out:
- }
-
- unsigned long
--__copy_to_user(void __user *to, const void *from, unsigned long n)
-+___copy_to_user(void __user *to, const void *from, unsigned long n)
- {
- /*
- * This test is stubbed out of the main function above to keep
-@@ -150,7 +150,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+@@ -150,7 +150,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user_memcpy(to, from, n);
}
@@ -3561,20 +3349,11 @@ index 4b39af2..9ae747d 100644
__clear_user_memset(void __user *addr, unsigned long n)
{
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
-@@ -190,7 +190,7 @@ out:
- return n;
- }
-
--unsigned long __clear_user(void __user *addr, unsigned long n)
-+unsigned long ___clear_user(void __user *addr, unsigned long n)
- {
- /* See rational for this in __copy_to_user() above. */
- if (n < 64)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
-index f572219..2cf36d5 100644
+index e00eb39..0f95491 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
-@@ -732,8 +732,10 @@ void __init exynos_pm_init(void)
+@@ -730,8 +730,10 @@ void __init exynos_pm_init(void)
tmp |= pm_data->wake_disable_mask;
pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
@@ -3588,10 +3367,10 @@ index f572219..2cf36d5 100644
register_syscore_ops(&exynos_pm_syscore_ops);
suspend_set_ops(&exynos_suspend_ops);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
-index e46e9ea..9141c83 100644
+index 44eedf3..13a0528 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
-@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
+@@ -105,7 +105,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
/*
* This ioremap hook is used on Armada 375/38x to ensure that PCIe
@@ -3600,7 +3379,7 @@ index e46e9ea..9141c83 100644
* is needed as a workaround for a deadlock issue between the PCIe
* interface and the cache controller.
*/
-@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+@@ -118,7 +118,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
@@ -3623,7 +3402,7 @@ index b6443a4..20a0b74 100644
};
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-index 79f49d9..70bf184 100644
+index 65024af..70bf184 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -86,7 +86,7 @@ struct cpu_pm_ops {
@@ -3639,7 +3418,7 @@ index 79f49d9..70bf184 100644
static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
{}
--struct cpu_pm_ops omap_pm_ops = {
+-static struct cpu_pm_ops omap_pm_ops = {
+static struct cpu_pm_ops omap_pm_ops __read_only = {
.finish_suspend = default_finish_suspend,
.resume = dummy_cpu_resume,
@@ -3670,7 +3449,7 @@ index e1d2e99..d9b3177 100644
};
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
-index 4cb8fd9..5ce65bc 100644
+index 72ebc4c..18c4406 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
@@ -3711,10 +3490,10 @@ index 78c02b3..c94109a 100644
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 486cc4d..8d1a0b7 100644
+index cc8a987..dab541b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
-@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
+@@ -200,10 +200,10 @@ struct omap_hwmod_soc_ops {
int (*init_clkdm)(struct omap_hwmod *oh);
void (*update_context_lost)(struct omap_hwmod *oh);
int (*get_context_lost)(struct omap_hwmod *oh);
@@ -3775,7 +3554,7 @@ index ff0a68c..b312aa0 100644
sizeof(struct omap_wd_timer_platform_data));
WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
-index b0790fc..71eb21f 100644
+index 4e54512..ed7c349 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -22,6 +22,7 @@
@@ -3796,52 +3575,6 @@ index b0790fc..71eb21f 100644
+ pax_close_kernel();
}
#endif
-diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
-index 34608fc..344d7c0 100644
---- a/arch/arm/mach-shmobile/pm-r8a7740.c
-+++ b/arch/arm/mach-shmobile/pm-r8a7740.c
-@@ -11,6 +11,7 @@
- #include <linux/console.h>
- #include <linux/io.h>
- #include <linux/suspend.h>
-+#include <asm/pgtable.h>
-
- #include "common.h"
- #include "pm-rmobile.h"
-@@ -117,7 +118,9 @@ static int r8a7740_enter_suspend(suspend_state_t suspend_state)
-
- static void r8a7740_suspend_init(void)
- {
-- shmobile_suspend_ops.enter = r8a7740_enter_suspend;
-+ pax_open_kernel();
-+ *(void **)&shmobile_suspend_ops.enter = r8a7740_enter_suspend;
-+ pax_close_kernel();
- }
- #else
- static void r8a7740_suspend_init(void) {}
-diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
-index a7e4668..83334f33 100644
---- a/arch/arm/mach-shmobile/pm-sh73a0.c
-+++ b/arch/arm/mach-shmobile/pm-sh73a0.c
-@@ -9,6 +9,7 @@
- */
-
- #include <linux/suspend.h>
-+#include <asm/pgtable.h>
- #include "common.h"
-
- #ifdef CONFIG_SUSPEND
-@@ -20,7 +21,9 @@ static int sh73a0_enter_suspend(suspend_state_t suspend_state)
-
- static void sh73a0_suspend_init(void)
- {
-- shmobile_suspend_ops.enter = sh73a0_enter_suspend;
-+ pax_open_kernel();
-+ *(void **)&shmobile_suspend_ops.enter = sh73a0_enter_suspend;
-+ pax_close_kernel();
- }
- #else
- static void sh73a0_suspend_init(void) {}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 7469347..1ecc350 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -3892,7 +3625,7 @@ index f66816c..228b951 100644
#include "common.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index 7c6b976..055db09 100644
+index df7537f..b931a5f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -446,6 +446,7 @@ config CPU_32v5
@@ -3930,7 +3663,7 @@ index 7c6b976..055db09 100644
If all of the binaries and libraries which run on your platform
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index 9769f1e..16aaa55 100644
+index 00b7f7d..6fc28bc 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -216,10 +216,12 @@ union offset_union {
@@ -3995,7 +3728,7 @@ index 9769f1e..16aaa55 100644
goto fault; \
} while (0)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
-index 71b3d33..8af9ade 100644
+index 493692d..42a4504 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -44,7 +44,7 @@ struct l2c_init_data {
@@ -4482,33 +4215,10 @@ index 407dc78..047ce9d 100644
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
-index 870838a..070df1d 100644
+index 7cd1514..0307305 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
-@@ -41,6 +41,22 @@
- #include "mm.h"
- #include "tcm.h"
-
-+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+void modify_domain(unsigned int dom, unsigned int type)
-+{
-+ struct thread_info *thread = current_thread_info();
-+ unsigned int domain = thread->cpu_domain;
-+ /*
-+ * DOMAIN_MANAGER might be defined to some other value,
-+ * use the arch-defined constant
-+ */
-+ domain &= ~domain_val(dom, 3);
-+ thread->cpu_domain = domain | domain_val(dom, type);
-+ set_domain(thread->cpu_domain);
-+}
-+EXPORT_SYMBOL(modify_domain);
-+#endif
-+
- /*
- * empty_zero_page is a special page that is used for
- * zero-initialized data and COW.
-@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
+@@ -242,7 +242,15 @@ __setup("noalign", noalign_setup);
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
@@ -4525,7 +4235,7 @@ index 870838a..070df1d 100644
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
-@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
+@@ -271,19 +279,19 @@ static struct mem_type mem_types[] = {
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
@@ -4550,26 +4260,16 @@ index 870838a..070df1d 100644
.domain = DOMAIN_KERNEL,
},
#endif
-@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_RDONLY,
+@@ -299,7 +307,7 @@ static struct mem_type mem_types[] = {
.prot_l1 = PMD_TYPE_TABLE,
-- .domain = DOMAIN_USER,
-+ .domain = DOMAIN_VECTORS,
- },
- [MT_HIGH_VECTORS] = {
- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_USER | L_PTE_RDONLY,
- .prot_l1 = PMD_TYPE_TABLE,
-- .domain = DOMAIN_USER,
-+ .domain = DOMAIN_VECTORS,
+ .domain = DOMAIN_VECTORS,
},
- [MT_MEMORY_RWX] = {
+ [__MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
-@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
+@@ -312,17 +320,30 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -4603,7 +4303,7 @@ index 870838a..070df1d 100644
[MT_MEMORY_RW_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
-@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
+@@ -330,9 +351,10 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
@@ -4616,7 +4316,7 @@ index 870838a..070df1d 100644
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_RW_SO] = {
-@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
+@@ -585,9 +607,14 @@ static void __init build_mem_type_table(void)
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
*/
@@ -4634,7 +4334,7 @@ index 870838a..070df1d 100644
#endif
/*
-@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
+@@ -604,13 +631,17 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
@@ -4656,7 +4356,7 @@ index 870838a..070df1d 100644
}
}
-@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
+@@ -621,15 +652,20 @@ static void __init build_mem_type_table(void)
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
@@ -4680,7 +4380,7 @@ index 870838a..070df1d 100644
}
#ifdef CONFIG_ARM_LPAE
-@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
+@@ -650,6 +686,8 @@ static void __init build_mem_type_table(void)
user_pgprot |= PTE_EXT_PXN;
#endif
@@ -4689,7 +4389,7 @@ index 870838a..070df1d 100644
for (i = 0; i < 16; i++) {
pteval_t v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
-@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
+@@ -667,21 +705,24 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -4720,18 +4420,18 @@ index 870838a..070df1d 100644
break;
}
pr_info("Memory policy: %sData cache %s\n",
-@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
+@@ -895,7 +936,7 @@ static void __init create_mapping(struct map_desc *md)
return;
}
- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
- md->virtual >= PAGE_OFFSET &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
-@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+@@ -1265,18 +1306,15 @@ void __init arm_mm_memblock_reserve(void)
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
*/
+
+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
@@ -4750,9 +4450,9 @@ index 870838a..070df1d 100644
- early_trap_init(vectors);
+ early_trap_init(&vectors);
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
- pmd_clear(pmd_off_k(addr));
-@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ /*
+ * Clear page table except top pmd used by early fixmaps
+@@ -1292,7 +1330,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
@@ -4761,7 +4461,7 @@ index 870838a..070df1d 100644
create_mapping(&map);
#endif
-@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+@@ -1303,14 +1341,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
@@ -4778,7 +4478,7 @@ index 870838a..070df1d 100644
create_mapping(&map);
#endif
-@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+@@ -1319,7 +1357,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
@@ -4787,7 +4487,7 @@ index 870838a..070df1d 100644
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
#ifdef CONFIG_KUSER_HELPERS
-@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
+@@ -1379,8 +1417,10 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
@@ -4798,7 +4498,7 @@ index 870838a..070df1d 100644
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
-@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
+@@ -1393,11 +1433,48 @@ static void __init map_lowmem(void)
if (start >= end)
break;
@@ -4848,7 +4548,7 @@ index 870838a..070df1d 100644
create_mapping(&map);
} else if (start >= kernel_x_end) {
-@@ -1377,7 +1470,7 @@ static void __init map_lowmem(void)
+@@ -1421,7 +1498,7 @@ static void __init map_lowmem(void)
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
@@ -4857,7 +4557,7 @@ index 870838a..070df1d 100644
create_mapping(&map);
-@@ -1390,6 +1483,7 @@ static void __init map_lowmem(void)
+@@ -1434,6 +1511,7 @@ static void __init map_lowmem(void)
create_mapping(&map);
}
}
@@ -4866,7 +4566,7 @@ index 870838a..070df1d 100644
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index c011e22..92a0260 100644
+index b8efb8c..88fa837 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -20,6 +20,7 @@
@@ -5021,12 +4721,12 @@ index d6285ef..b684dac 100644
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
-index 7047051..44e8675 100644
+index 35a6778..caabbd36 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
-@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
- #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
- #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+@@ -91,5 +91,15 @@
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_read_unchecked(v) atomic64_read(v)
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
@@ -5040,24 +4740,11 @@ index 7047051..44e8675 100644
+
#endif
#endif
-diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
-index 0fa47c4..b167938 100644
---- a/arch/arm64/include/asm/barrier.h
-+++ b/arch/arm64/include/asm/barrier.h
-@@ -44,7 +44,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
-index 4fde8c1..441f84f 100644
+index 0a456be..7799ff5 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
-@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+@@ -127,16 +127,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
@@ -5095,10 +4782,10 @@ index 7642056..bffc904 100644
#if CONFIG_PGTABLE_LEVELS > 3
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
-index 07e1ba44..ec8cbbb 100644
+index b2ede967..865eed5 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
-@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
+@@ -102,6 +102,7 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
@@ -5107,7 +4794,7 @@ index 07e1ba44..ec8cbbb 100644
#define user_addr_max get_fs
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
-index d16a1ce..a5acc60 100644
+index 99224dc..148dfb7 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
@@ -5283,10 +4970,10 @@ index 7caf25d..ee65ac5 100644
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
-index 102190a..5334cea 100644
+index 0da689d..3aad5fb 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
-@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
+@@ -166,6 +166,16 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
@@ -5381,13 +5068,13 @@ index 69952c18..4fa2908 100644
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index 42a91a7..29d446e 100644
+index eb0249e..388ff32 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
-@@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
- config KEXEC
+@@ -519,6 +519,7 @@ config KEXEC
bool "kexec system call"
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
help
kexec is a system call that implements the ability to shutdown your
@@ -5404,10 +5091,10 @@ index 970d0bd..e750b9b 100644
make_nr_irqs_h: FORCE
$(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
-index 0bf0350..2ad1957 100644
+index be4beeb..c0ec564 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
-@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
@@ -5422,19 +5109,6 @@ index 0bf0350..2ad1957 100644
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
#endif /* _ASM_IA64_ATOMIC_H */
-diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
-index 843ba43..fa118fb 100644
---- a/arch/ia64/include/asm/barrier.h
-+++ b/arch/ia64/include/asm/barrier.h
-@@ -66,7 +66,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
index 988254a..e1ee885 100644
--- a/arch/ia64/include/asm/cache.h
@@ -5832,7 +5506,7 @@ index f50d4b3..c7975ee 100644
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index 97e48b0..fc59c36 100644
+index 1841ef6..74d8330 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
@@ -5921,19 +5595,6 @@ index 0395c51..5f26031 100644
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
-index 5a696e5..070490d 100644
---- a/arch/metag/include/asm/barrier.h
-+++ b/arch/metag/include/asm/barrier.h
-@@ -90,7 +90,7 @@ static inline void fence(void)
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index 53f0f6c..2dc07fd 100644
--- a/arch/metag/mm/hugetlbpage.c
@@ -5965,32 +5626,32 @@ index 4efe96a..60e8699 100644
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index 199a835..822b487 100644
+index e3aa5b0..2ed7912 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2591,6 +2591,7 @@ source "kernel/Kconfig.preempt"
-
+@@ -2598,6 +2598,7 @@ source "kernel/Kconfig.preempt"
config KEXEC
bool "Kexec system call"
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
-index d8960d4..77dbd31 100644
+index 2cd45f5..d0f4900 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
-@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
- if (dma_release_from_coherent(dev, order, vaddr))
- return;
-
+@@ -191,7 +191,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
+ static void octeon_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
-index 26d4363..3c9a82e 100644
+index 4c42fd9..fc7a48e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -22,15 +22,39 @@
@@ -6199,7 +5860,7 @@ index 26d4363..3c9a82e 100644
raw_local_irq_restore(flags); \
} \
\
-@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+@@ -130,20 +194,25 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
return result; \
}
@@ -6214,9 +5875,16 @@ index 26d4363..3c9a82e 100644
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op)
-+
+
+-ATOMIC_OP(and, &=, and)
+-ATOMIC_OP(or, |=, or)
+-ATOMIC_OP(xor, ^=, xor)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
++
++ATOMIC_OP(and, and)
++ATOMIC_OP(or, or)
++ATOMIC_OP(xor, xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -6226,7 +5894,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
-@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
+@@ -153,7 +222,7 @@ ATOMIC_OP(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
@@ -6235,7 +5903,7 @@ index 26d4363..3c9a82e 100644
{
int result;
-@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+@@ -163,7 +232,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
@@ -6244,7 +5912,7 @@ index 26d4363..3c9a82e 100644
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
-@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+@@ -212,8 +281,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
return result;
}
@@ -6273,7 +5941,7 @@ index 26d4363..3c9a82e 100644
/**
* __atomic_add_unless - add unless the number is a given value
-@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -241,6 +328,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
@@ -6284,7 +5952,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic_sub_and_test - subtract value from variable and test result
-@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -262,6 +353,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
@@ -6295,7 +5963,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic_dec_and_test - decrement by 1 and test
-@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -286,6 +381,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1, (v))
@@ -6306,7 +5974,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic_dec - decrement and test
-@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -294,6 +393,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1, (v))
@@ -6317,7 +5985,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic_add_negative - add and test if negative
-@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -315,54 +418,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
@@ -6415,7 +6083,7 @@ index 26d4363..3c9a82e 100644
{ \
long result; \
\
-@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+@@ -372,12 +498,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
@@ -6434,7 +6102,7 @@ index 26d4363..3c9a82e 100644
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
-@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+@@ -385,27 +514,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
} else if (kernel_uses_llsc) { \
long temp; \
\
@@ -6485,7 +6153,7 @@ index 26d4363..3c9a82e 100644
raw_local_irq_restore(flags); \
} \
\
-@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+@@ -414,19 +551,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \
}
@@ -6497,12 +6165,19 @@ index 26d4363..3c9a82e 100644
-ATOMIC64_OPS(add, +=, daddu)
-ATOMIC64_OPS(sub, -=, dsubu)
+-ATOMIC64_OP(and, &=, and)
+-ATOMIC64_OP(or, |=, or)
+-ATOMIC64_OP(xor, ^=, xor)
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, dadd)
+ATOMIC64_OPS(sub, dsub)
++
++ATOMIC64_OP(and, and)
++ATOMIC64_OP(or, or)
++ATOMIC64_OP(xor, xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
@@ -6514,7 +6189,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic
-@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
+@@ -437,7 +582,7 @@ ATOMIC64_OP(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
@@ -6523,7 +6198,7 @@ index 26d4363..3c9a82e 100644
{
long result;
-@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+@@ -447,7 +592,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
@@ -6532,7 +6207,7 @@ index 26d4363..3c9a82e 100644
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
-@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+@@ -496,9 +641,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
return result;
}
@@ -6562,7 +6237,7 @@ index 26d4363..3c9a82e 100644
/**
* atomic64_add_unless - add unless the number is a given value
-@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -528,6 +690,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
@@ -6570,7 +6245,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_sub_and_test - subtract value from variable and test result
-@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -549,6 +712,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* other cases.
*/
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
@@ -6578,7 +6253,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_dec_and_test - decrement by 1 and test
-@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -573,6 +737,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* Atomically increments @v by 1.
*/
#define atomic64_inc(v) atomic64_add(1, (v))
@@ -6586,7 +6261,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_dec - decrement and test
-@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -581,6 +746,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* Atomically decrements @v by 1.
*/
#define atomic64_dec(v) atomic64_sub(1, (v))
@@ -6594,19 +6269,6 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_add_negative - add and test if negative
-diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
-index 7ecba84..21774af 100644
---- a/arch/mips/include/asm/barrier.h
-+++ b/arch/mips/include/asm/barrier.h
-@@ -133,7 +133,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index b4db69f..8f3b093 100644
--- a/arch/mips/include/asm/cache.h
@@ -6625,10 +6287,10 @@ index b4db69f..8f3b093 100644
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index f19e890..a4f8177 100644
+index 53b2693..13803b9 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
-@@ -417,6 +417,13 @@ extern const char *__elf_platform;
+@@ -419,6 +419,13 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
@@ -6788,7 +6450,7 @@ index b336037..5b874cc 100644
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index ae85694..4cdbba8 100644
+index 8957f15..c5b802e 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
@@ -6802,12 +6464,12 @@ index ae85694..4cdbba8 100644
struct vm_area_struct;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index 9c0014e..5101ef5 100644
+index e309d8f..20eefec 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
-@@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_SECCOMP 4 /* secure computing */
+@@ -101,6 +101,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+ #define TIF_UPROBE 6 /* breakpointed or singlestepping */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
+/* li takes a 32bit immediate */
+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
@@ -6815,7 +6477,7 @@ index 9c0014e..5101ef5 100644
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_NOHZ 19 /* in adaptive nohz mode */
-@@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
+@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_USEDMSA (1<<TIF_USEDMSA)
#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
@@ -6834,7 +6496,7 @@ index 9c0014e..5101ef5 100644
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
-@@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
+@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
_TIF_WORK_SYSCALL_EXIT | \
@@ -6891,19 +6553,6 @@ index 9287678..f870e47 100644
#include <asm/processor.h>
#include <linux/module.h>
-diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
-index 74f6752..f3d7a47 100644
---- a/arch/mips/kernel/i8259.c
-+++ b/arch/mips/kernel/i8259.c
-@@ -205,7 +205,7 @@ spurious_8259A_irq:
- printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
-- atomic_inc(&irq_err_count);
-+ atomic_inc_unchecked(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 44a1f79..2bd6aa3 100644
--- a/arch/mips/kernel/irq-gt641xx.c
@@ -6960,7 +6609,7 @@ index 8eb5af8..2baf465 100644
}
#else
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
-index 0614717..002fa43 100644
+index f63a289..53037c22 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
@@ -6996,10 +6645,10 @@ index f2975d4..f61d355 100644
{
struct pt_regs *regs;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
-index e933a30..0d02625 100644
+index 4f0ac78..491124a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
-@@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -873,6 +873,10 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
@@ -7010,7 +6659,7 @@ index e933a30..0d02625 100644
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
-@@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+@@ -891,6 +895,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
tracehook_report_syscall_entry(regs))
ret = -1;
@@ -7092,10 +6741,10 @@ index 2242bdd..b284048 100644
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index 8ea28e6..c8873d5 100644
+index fdb392b..c5cf284 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+@@ -692,7 +692,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
siginfo_t info;
prev_state = exception_enter();
@@ -7116,7 +6765,7 @@ index 8ea28e6..c8873d5 100644
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index 852a41c..75b9d38 100644
+index 4b88fa0..b16bc17 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -31,6 +31,23 @@
@@ -7143,7 +6792,7 @@ index 852a41c..75b9d38 100644
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
-@@ -207,6 +224,14 @@ bad_area:
+@@ -205,6 +222,14 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
@@ -7261,30 +6910,6 @@ index 5c81fdd..db158d3 100644
int __virt_addr_valid(const volatile void *kaddr)
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
-diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
-index dabf417..0be1d6d 100644
---- a/arch/mips/net/bpf_jit_asm.S
-+++ b/arch/mips/net/bpf_jit_asm.S
-@@ -62,7 +62,9 @@ sk_load_word_positive:
- is_offset_in_header(4, word)
- /* Offset within header boundaries */
- PTR_ADDU t1, $r_skb_data, offset
-+ .set reorder
- lw $r_A, 0(t1)
-+ .set noreorder
- #ifdef CONFIG_CPU_LITTLE_ENDIAN
- # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- wsbh t0, $r_A
-@@ -90,7 +92,9 @@ sk_load_half_positive:
- is_offset_in_header(2, half)
- /* Offset within header boundaries */
- PTR_ADDU t1, $r_skb_data, offset
-+ .set reorder
- lh $r_A, 0(t1)
-+ .set noreorder
- #ifdef CONFIG_CPU_LITTLE_ENDIAN
- # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- wsbh t0, $r_A
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index a2358b4..7cead4f 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
@@ -7419,10 +7044,10 @@ index 4ce7a01..449202a 100644
#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
-index 226f8ca9..9d9b87d 100644
+index 2536965..5d3e884 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
-@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+@@ -280,6 +280,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
@@ -7739,7 +7364,7 @@ index b99b39f..e3915ae 100644
fault_space = regs->iasq[0];
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index 15503ad..4b1b8b6 100644
+index a762864..664087f 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -16,6 +16,7 @@
@@ -7912,19 +7537,19 @@ index 15503ad..4b1b8b6 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 5ef2711..21be2c3 100644
+index 9a7057e..5691c0b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -415,6 +415,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
- config KEXEC
+@@ -421,6 +421,7 @@ config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
-index 512d278..d31fadd 100644
+index 55f106e..70cc82a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -12,6 +12,11 @@
@@ -8036,7 +7661,7 @@ index 512d278..d31fadd 100644
#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
ATOMIC_OPS(add, add)
-@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
+@@ -73,42 +122,29 @@ ATOMIC_OP(xor, xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -8094,7 +7719,7 @@ index 512d278..d31fadd 100644
}
/*
-@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
+@@ -121,43 +157,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
@@ -8161,7 +7786,7 @@ index 512d278..d31fadd 100644
/**
* __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
-@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -175,11 +206,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
@@ -8191,7 +7816,7 @@ index 512d278..d31fadd 100644
PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
2:"
-@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
+@@ -252,6 +299,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
}
#define atomic_dec_if_positive atomic_dec_if_positive
@@ -8203,7 +7828,7 @@ index 512d278..d31fadd 100644
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+@@ -265,37 +317,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
return t;
}
@@ -8268,7 +7893,7 @@ index 512d278..d31fadd 100644
PPC_ATOMIC_EXIT_BARRIER \
: "=&r" (t) \
: "r" (a), "r" (&v->counter) \
-@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
+@@ -304,6 +379,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
return t; \
}
@@ -8278,7 +7903,7 @@ index 512d278..d31fadd 100644
#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
ATOMIC64_OPS(add, add)
-@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
+@@ -314,40 +392,33 @@ ATOMIC64_OP(xor, xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
@@ -8338,7 +7963,7 @@ index 512d278..d31fadd 100644
}
/*
-@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
+@@ -360,36 +431,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
*/
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
@@ -8386,7 +8011,7 @@ index 512d278..d31fadd 100644
}
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+@@ -422,6 +475,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -8403,7 +8028,7 @@ index 512d278..d31fadd 100644
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
-@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -437,13 +500,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
@@ -8436,32 +8061,20 @@ index 512d278..d31fadd 100644
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
-diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
-index 51ccc72..35de789 100644
---- a/arch/powerpc/include/asm/barrier.h
-+++ b/arch/powerpc/include/asm/barrier.h
-@@ -76,7 +76,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_lwsync(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
-index 0dc42c5..b80a3a1 100644
+index 5f8229e..385d90b 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
-@@ -4,6 +4,7 @@
+@@ -3,6 +3,8 @@
+
#ifdef __KERNEL__
- #include <asm/reg.h>
++#include <asm/reg.h>
+#include <linux/const.h>
/* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-@@ -23,7 +24,7 @@
+@@ -22,7 +24,7 @@
#define L1_CACHE_SHIFT 7
#endif
@@ -8694,7 +8307,7 @@ index 4b0be20..c15a27d 100644
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
-index 11a3863..108f194 100644
+index 0717693..6a1f488 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
@@ -8718,10 +8331,10 @@ index 62cfb0c..50c6402 100644
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index af56b5c..f86f3f6 100644
+index a908ada..f3c8966 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
-@@ -253,6 +253,7 @@
+@@ -252,6 +252,7 @@
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
#define DSISR_NOHPTE 0x40000000 /* no translation found */
@@ -8743,10 +8356,10 @@ index 825663c..f9e9134 100644
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
-index 4dbe072..b803275 100644
+index 523673d..4aeef3b 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
-@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+@@ -202,13 +202,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
__asm__ __volatile__(
"1: " PPC_LWARX(%0,0,%1,1) "\n"
__DO_SIGN_EXTEND
@@ -8779,7 +8392,7 @@ index 4dbe072..b803275 100644
: "r" (&rw->lock)
: "cr0", "xer", "memory");
-@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+@@ -284,11 +300,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
__asm__ __volatile__(
"# read_unlock\n\t"
PPC_RELEASE_BARRIER
@@ -9015,7 +8628,7 @@ index 2a8ebae..5643c6f 100644
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index 12868b1..5155667 100644
+index ba33693..d8db875 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
@@ -9040,7 +8653,7 @@ index 12868b1..5155667 100644
irq.o align.o signal_32.o pmc.o vdso.o \
process.o systbl.o idle.o \
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
-index 3e68d1c..72a5ee6 100644
+index f3bd5e7..50040455 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1010,6 +1010,7 @@ storage_fault_common:
@@ -9078,7 +8691,7 @@ index 0a0399c2..262a2e6 100644
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 4509603..cdb491f 100644
+index 290559d..0094ddb 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -460,6 +460,8 @@ void migrate_irqs(void)
@@ -9141,7 +8754,7 @@ index c94d2e0..992a9ce 100644
sechdrs, module);
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index 64e6e9d..cf90ed5 100644
+index 75b6676..41c72b5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1033,8 +1033,8 @@ void show_regs(struct pt_regs * regs)
@@ -9228,33 +8841,33 @@ index 64e6e9d..cf90ed5 100644
-}
-
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
-index f21897b..28c0428 100644
+index 737c0d0..59c7417 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
-@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
- return ret;
- }
+@@ -1800,6 +1800,10 @@ static int do_seccomp(struct pt_regs *regs)
+ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+ #endif /* CONFIG_SECCOMP */
+#ifdef CONFIG_GRKERNSEC_SETXID
+extern void gr_delayed_cred_worker(void);
+#endif
+
- /*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
-@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
-
- secure_computing_strict(regs->gpr[0]);
+ /**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+@@ -1828,6 +1832,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ if (do_seccomp(regs))
+ return -1;
+#ifdef CONFIG_GRKERNSEC_SETXID
+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
+ gr_delayed_cred_worker();
+#endif
+
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
/*
-@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ * The tracer may decide to abort the syscall, if so tracehook
+@@ -1870,6 +1879,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
@@ -9267,10 +8880,10 @@ index f21897b..28c0428 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index da50e0c..5ff6307 100644
+index 0dbee46..97b77b9 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
-@@ -1009,7 +1009,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+@@ -1014,7 +1014,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
@@ -9280,10 +8893,10 @@ index da50e0c..5ff6307 100644
tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index c7c24d2..1bf7039 100644
+index 20756df..300e2a4 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
-@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
+@@ -765,7 +765,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
current->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
@@ -9591,10 +9204,10 @@ index 0f432a7..abfe841 100644
if (!fixed && addr) {
addr = _ALIGN_UP(addr, 1ul << pshift);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
-index d966bbe..372124a 100644
+index 5038fd5..87a2033 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
-@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -263,9 +263,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
@@ -9619,10 +9232,10 @@ index c56878e..073d04e 100644
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
-index adbe380..adb7516 100644
+index 117fa5c..e2f6e51 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
-@@ -317,4 +317,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+@@ -324,4 +324,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@@ -9637,19 +9250,6 @@ index adbe380..adb7516 100644
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
#endif /* __ARCH_S390_ATOMIC__ */
-diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
-index e6f8615..4a66339 100644
---- a/arch/s390/include/asm/barrier.h
-+++ b/arch/s390/include/asm/barrier.h
-@@ -42,7 +42,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 4d7ccac..d03d0ad 100644
--- a/arch/s390/include/asm/cache.h
@@ -9812,10 +9412,10 @@ index 0c1a679..e1df357 100644
if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index 8f587d8..0642516b 100644
+index f2dac9f..936c8c2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
-@@ -200,27 +200,3 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -232,27 +232,3 @@ unsigned long get_wchan(struct task_struct *p)
}
return 0;
}
@@ -10020,7 +9620,7 @@ index 6777177..cb5e44f 100644
addr = vm_unmapped_area(&info);
}
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
-index 4082749..fd97781 100644
+index 917084a..4ff965d 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -15,18 +15,38 @@
@@ -10068,7 +9668,7 @@ index 4082749..fd97781 100644
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
+@@ -39,13 +59,23 @@ ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -10092,7 +9692,7 @@ index 4082749..fd97781 100644
/*
* atomic_inc_and_test - increment and test
-@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
+@@ -56,6 +86,10 @@ ATOMIC_OP(xor)
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
@@ -10103,7 +9703,7 @@ index 4082749..fd97781 100644
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
+@@ -65,25 +99,60 @@ ATOMIC_OP(xor)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
@@ -10167,7 +9767,7 @@ index 4082749..fd97781 100644
if (likely(old == c))
break;
c = old;
-@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -94,20 +163,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -10207,19 +9807,6 @@ index 4082749..fd97781 100644
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
-index 809941e..b443309 100644
---- a/arch/sparc/include/asm/barrier_64.h
-+++ b/arch/sparc/include/asm/barrier_64.h
-@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
index 5bb6991..5c2132e 100644
--- a/arch/sparc/include/asm/cache.h
@@ -10320,7 +9907,7 @@ index 59ba6f6..4518128 100644
+
#endif
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
-index f06b36a..bca3189 100644
+index 91b963a..9a806c1 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
@@ -10694,7 +10281,7 @@ index 7cf9c6e..6206648 100644
extra-y := head_$(BITS).o
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
-index 50e7b62..79fae35 100644
+index c5113c7..52322e4 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
@@ -11331,7 +10918,7 @@ index 3269b02..64f5231 100644
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
-index 05dac43..76f8ed4 100644
+index d6b0363..552bcbb6 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -15,11 +15,22 @@
@@ -11389,7 +10976,7 @@ index 05dac43..76f8ed4 100644
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
-@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
+@@ -53,13 +71,16 @@ ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
@@ -11409,7 +10996,7 @@ index 05dac43..76f8ed4 100644
casx [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -69,11 +90,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \
@@ -11428,7 +11015,7 @@ index 05dac43..76f8ed4 100644
casx [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -83,6 +108,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return);
@@ -11438,7 +11025,7 @@ index 05dac43..76f8ed4 100644
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
ATOMIC64_OPS(add)
-@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
+@@ -93,7 +121,12 @@ ATOMIC64_OP(xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
@@ -11452,7 +11039,7 @@ index 05dac43..76f8ed4 100644
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index 8069ce1..c2e23c4 100644
+index 8eb454c..9f95c5b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
@@ -11473,8 +11060,8 @@ index 8069ce1..c2e23c4 100644
+EXPORT_SYMBOL(atomic_add_ret_unchecked);
+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
ATOMIC_OPS(sub)
-
- #undef ATOMIC_OPS
+ ATOMIC_OP(and)
+ ATOMIC_OP(or)
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 30c3ecc..736f015 100644
--- a/arch/sparc/mm/Makefile
@@ -12470,22 +12057,22 @@ index 4ac88b7..bac6cb2 100644
#endif /* CONFIG_DEBUG_DCFLUSH */
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index 9def1f5..cf0cabc 100644
+index 106c21b..185bf0f 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
-@@ -204,6 +204,7 @@ source "kernel/Kconfig.hz"
-
+@@ -206,6 +206,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
-index 0496970..1a57e5f 100644
+index 096a56d..bffafc0 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
-@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+@@ -145,6 +145,16 @@ static inline void atomic64_xor(long i, atomic64_t *v)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@@ -12557,12 +12144,12 @@ index c034dc3..cf1cc96 100644
/*
diff --git a/arch/um/Makefile b/arch/um/Makefile
-index 098ab33..fc54a33 100644
+index e3abe6f..ae224ef 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -73,6 +73,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
- -D_FILE_OFFSET_BITS=64 -idirafter include \
- -D__KERNEL__ -D__UM_HOST__
+ -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
+ -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
+ifdef CONSTIFY_PLUGIN
+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
@@ -12630,10 +12217,10 @@ index 2b4274e..754fe06 100644
#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
-index 68b9119..f72353c 100644
+index a6d9226..d243e1e 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
-@@ -345,22 +345,6 @@ int singlestepping(void * t)
+@@ -347,22 +347,6 @@ int singlestepping(void * t)
return 2;
}
@@ -12674,10 +12261,10 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index b3a1a5d..8dbc2d6 100644
+index 96d058a..b581500 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -35,13 +35,12 @@ config X86
+@@ -36,14 +36,13 @@ config X86
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
@@ -12688,11 +12275,12 @@ index b3a1a5d..8dbc2d6 100644
select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select ARCH_WANT_OPTIONAL_GPIOLIB
-@@ -85,7 +84,7 @@ config X86
+@@ -87,7 +86,7 @@ config X86
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if X86_64
@@ -12701,7 +12289,7 @@ index b3a1a5d..8dbc2d6 100644
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING if X86_64
-@@ -274,7 +273,7 @@ config X86_64_SMP
+@@ -276,7 +275,7 @@ config X86_64_SMP
config X86_32_LAZY_GS
def_bool y
@@ -12710,7 +12298,7 @@ index b3a1a5d..8dbc2d6 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -646,6 +645,7 @@ config SCHED_OMIT_FRAME_POINTER
+@@ -648,6 +647,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig HYPERVISOR_GUEST
bool "Linux guest support"
@@ -12718,15 +12306,15 @@ index b3a1a5d..8dbc2d6 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1014,6 +1014,7 @@ config VM86
+@@ -1039,6 +1039,7 @@ config VM86
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
+ depends on !GRKERNSEC
default y
+ depends on MODIFY_LDT_SYSCALL
---help---
- This option is required by programs like Wine to run 16-bit
-@@ -1182,6 +1183,7 @@ choice
+@@ -1208,6 +1209,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -12734,7 +12322,7 @@ index b3a1a5d..8dbc2d6 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1218,6 +1220,7 @@ config NOHIGHMEM
+@@ -1244,6 +1246,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -12742,7 +12330,7 @@ index b3a1a5d..8dbc2d6 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1270,7 +1273,7 @@ config PAGE_OFFSET
+@@ -1296,7 +1299,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -12751,7 +12339,7 @@ index b3a1a5d..8dbc2d6 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1290,7 +1293,6 @@ config X86_PAE
+@@ -1317,7 +1320,6 @@ config X86_PAE
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
@@ -12759,15 +12347,15 @@ index b3a1a5d..8dbc2d6 100644
config ARCH_DMA_ADDR_T_64BIT
def_bool y
-@@ -1724,6 +1726,7 @@ source kernel/Kconfig.hz
-
+@@ -1757,6 +1759,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
+ select KEXEC_CORE
+ depends on !GRKERNSEC_KMEM
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1906,7 +1909,9 @@ config X86_NEED_RELOCS
+@@ -1939,7 +1942,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -12778,7 +12366,7 @@ index b3a1a5d..8dbc2d6 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1989,6 +1994,7 @@ config COMPAT_VDSO
+@@ -2022,6 +2027,7 @@ config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
depends on X86_32 || IA32_EMULATION
@@ -12786,9 +12374,9 @@ index b3a1a5d..8dbc2d6 100644
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
-@@ -2053,6 +2059,22 @@ config CMDLINE_OVERRIDE
- This is used to work around broken boot loaders. This should
- be set to 'N' under normal conditions.
+@@ -2102,6 +2108,22 @@ config MODIFY_LDT_SYSCALL
+
+ Saying 'N' here may make sense for embedded or server kernels.
+config DEFAULT_MODIFY_LDT_SYSCALL
+ bool "Allow userspace to modify the LDT by default"
@@ -12879,10 +12467,10 @@ index d8c0d32..28e3117 100644
---help---
This is a debug driver, which gets the power states
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 118e6de..e02efff 100644
+index 747860c..0374d1e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
-@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
+@@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y)
# CPU-specific tuning. Anything which can be shared with UML should go here.
include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
@@ -12892,7 +12480,7 @@ index 118e6de..e02efff 100644
else
BITS := 64
UTS_MACHINE := x86_64
-@@ -116,6 +113,9 @@ else
+@@ -126,6 +123,9 @@ else
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
endif
@@ -12902,7 +12490,7 @@ index 118e6de..e02efff 100644
# Make sure compiler does not have buggy stack-protector support.
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-@@ -184,6 +184,7 @@ archheaders:
+@@ -191,6 +191,7 @@ archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archprepare:
@@ -12910,7 +12498,7 @@ index 118e6de..e02efff 100644
ifeq ($(CONFIG_KEXEC_FILE),y)
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
endif
-@@ -267,3 +268,9 @@ define archhelp
+@@ -276,3 +277,9 @@ define archhelp
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
endef
@@ -12921,7 +12509,7 @@ index 118e6de..e02efff 100644
+*** Please upgrade your binutils to 2.18 or newer
+endef
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
-index 57bbf2f..b100fce 100644
+index 0d553e5..cecccf9 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -58,6 +58,9 @@ clean-files += cpustr.h
@@ -12957,7 +12545,7 @@ index 878e4b9..20537ab 100644
#endif /* BOOT_BITOPS_H */
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
-index bd49ec6..94c7f58 100644
+index 0033e96..b3179b9 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -84,7 +84,7 @@ static inline void io_delay(void)
@@ -13099,10 +12687,10 @@ index b0c0d16..3b44ff8 100644
.quad 0x0000000000000000 /* TS continued */
gdt_end:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index e28437e..6a17460 100644
+index 79dac17..1549446 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
-@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
+@@ -259,7 +259,7 @@ static void handle_relocations(void *output, unsigned long output_len)
* Calculate the delta between where vmlinux was linked to load
* and where it was actually loaded.
*/
@@ -13111,7 +12699,7 @@ index e28437e..6a17460 100644
if (!delta) {
debug_putstr("No relocation needed... ");
return;
-@@ -324,7 +324,7 @@ static void parse_elf(void *output)
+@@ -341,7 +341,7 @@ static void parse_elf(void *output)
Elf32_Ehdr ehdr;
Elf32_Phdr *phdrs, *phdr;
#endif
@@ -13120,7 +12708,7 @@ index e28437e..6a17460 100644
int i;
memcpy(&ehdr, output, sizeof(ehdr));
-@@ -351,13 +351,16 @@ static void parse_elf(void *output)
+@@ -368,13 +368,16 @@ static void parse_elf(void *output)
case PT_LOAD:
#ifdef CONFIG_RELOCATABLE
dest = output;
@@ -13138,7 +12726,7 @@ index e28437e..6a17460 100644
break;
default: /* Ignore other PT_* */ break;
}
-@@ -419,7 +422,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+@@ -443,7 +446,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
@@ -13193,7 +12781,7 @@ index 1fd7d57..0f7d096 100644
err = check_cpuflags();
} else if (err == 0x01 &&
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
-index 16ef025..91e033b 100644
+index 2d6b309..65defa1 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
@@ -14818,7 +14406,7 @@ index a350c99..c1bac24 100644
ret
ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
-index f4e6308..7ba29a1 100644
+index 3c71dd9..008b8db 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -93,23 +93,26 @@ For 32-bit we have the following conventions - kernel is built with
@@ -14857,7 +14445,7 @@ index f4e6308..7ba29a1 100644
.endm
.macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
-@@ -128,76 +131,87 @@ For 32-bit we have the following conventions - kernel is built with
+@@ -128,67 +131,78 @@ For 32-bit we have the following conventions - kernel is built with
.endm
.macro SAVE_EXTRA_REGS offset=0
@@ -14876,10 +14464,6 @@ index f4e6308..7ba29a1 100644
+ movq %rbp, RBP+\offset(%rsp)
+ movq %rbx, RBX+\offset(%rsp)
.endm
- .macro SAVE_EXTRA_REGS_RBP offset=0
-- movq %rbp, 4*8+\offset(%rsp)
-+ movq %rbp, RBP+\offset(%rsp)
- .endm
.macro RESTORE_EXTRA_REGS offset=0
- movq 0*8+\offset(%rsp), %r15
@@ -14965,21 +14549,93 @@ index f4e6308..7ba29a1 100644
- RESTORE_C_REGS_HELPER 1,0,0,1,1
+ RESTORE_C_REGS_HELPER 1,0,0,1,1,1
.endm
- .macro RESTORE_RSI_RDI
-- RESTORE_C_REGS_HELPER 0,0,0,0,0
-+ RESTORE_C_REGS_HELPER 0,0,0,0,0,1
- .endm
- .macro RESTORE_RSI_RDI_RDX
-- RESTORE_C_REGS_HELPER 0,0,0,0,1
-+ RESTORE_C_REGS_HELPER 0,0,0,0,1,1
- .endm
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 80dcc92..da58bb6 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -150,6 +150,10 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
+ return 1; /* Something is enabled that we can't handle in phase 1 */
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /* Returns the syscall nr to run (which should match regs->orig_ax). */
+ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
+ unsigned long phase1_result)
+@@ -160,6 +164,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
+
+ BUG_ON(regs != task_pt_regs(current));
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /*
+ * If we stepped into a sysenter/syscall insn, it trapped in
+ * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
+@@ -207,13 +216,6 @@ long syscall_trace_enter(struct pt_regs *regs)
+ return syscall_trace_enter_phase2(regs, arch, phase1_result);
+ }
+
+-static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
+-{
+- unsigned long top_of_stack =
+- (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
+- return (struct thread_info *)(top_of_stack - THREAD_SIZE);
+-}
+-
+ /* Called with IRQs disabled. */
+ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
+ {
+@@ -230,7 +232,7 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
+ */
+ while (true) {
+ u32 cached_flags =
+- READ_ONCE(pt_regs_to_thread_info(regs)->flags);
++ READ_ONCE(current_thread_info()->flags);
+
+ if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
+ _TIF_UPROBE | _TIF_NEED_RESCHED |
+@@ -271,7 +273,7 @@ __visible void prepare_exit_to_usermode(struct pt_regs *regs)
+ */
+ __visible void syscall_return_slowpath(struct pt_regs *regs)
+ {
+- struct thread_info *ti = pt_regs_to_thread_info(regs);
++ struct thread_info *ti = current_thread_info();
+ u32 cached_flags = READ_ONCE(ti->flags);
+ bool step;
+
+@@ -281,6 +283,11 @@ __visible void syscall_return_slowpath(struct pt_regs *regs)
+ regs->orig_ax))
+ local_irq_enable();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /*
+ * First do one-time work. If these work items are enabled, we
+ * want to run them exactly once per syscall exit with IRQs on.
+@@ -301,7 +308,7 @@ __visible void syscall_return_slowpath(struct pt_regs *regs)
+ step = unlikely(
+ (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
+ == _TIF_SINGLESTEP);
+- if (step || cached_flags & _TIF_SYSCALL_TRACE)
++ if (step || (cached_flags & _TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+ }
+
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index 21dc60a..844def1 100644
+index b2909bf..47ba402 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -157,13 +157,154 @@
+@@ -147,13 +147,154 @@
movl \reg, PT_GS(%esp)
.endm
.macro SET_KERNEL_GS reg
@@ -15135,7 +14791,7 @@ index 21dc60a..844def1 100644
cld
PUSH_GS
pushl %fs
-@@ -176,7 +317,7 @@
+@@ -166,7 +307,7 @@
pushl %edx
pushl %ecx
pushl %ebx
@@ -15144,7 +14800,7 @@ index 21dc60a..844def1 100644
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
-@@ -184,6 +325,15 @@
+@@ -174,6 +315,15 @@
SET_KERNEL_GS %edx
.endm
@@ -15160,7 +14816,7 @@ index 21dc60a..844def1 100644
.macro RESTORE_INT_REGS
popl %ebx
popl %ecx
-@@ -222,7 +372,7 @@ ENTRY(ret_from_fork)
+@@ -212,7 +362,7 @@ ENTRY(ret_from_fork)
pushl $0x0202 # Reset kernel eflags
popfl
jmp syscall_exit
@@ -15169,7 +14825,7 @@ index 21dc60a..844def1 100644
ENTRY(ret_from_kernel_thread)
pushl %eax
-@@ -262,7 +412,15 @@ ret_from_intr:
+@@ -252,7 +402,15 @@ ret_from_intr:
andl $SEGMENT_RPL_MASK, %eax
#endif
cmpl $USER_RPL, %eax
@@ -15185,10 +14841,10 @@ index 21dc60a..844def1 100644
ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT
-@@ -274,8 +432,8 @@ ENTRY(resume_userspace)
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
- # int/exception return?
- jne work_pending
+@@ -260,8 +418,8 @@ ENTRY(resume_userspace)
+ TRACE_IRQS_OFF
+ movl %esp, %eax
+ call prepare_exit_to_usermode
- jmp restore_all
-END(ret_from_exception)
+ jmp restore_all_pax
@@ -15196,7 +14852,7 @@ index 21dc60a..844def1 100644
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
-@@ -287,7 +445,7 @@ need_resched:
+@@ -273,7 +431,7 @@ need_resched:
jz restore_all
call preempt_schedule_irq
jmp need_resched
@@ -15205,7 +14861,7 @@ index 21dc60a..844def1 100644
#endif
/*
-@@ -312,32 +470,44 @@ sysenter_past_esp:
+@@ -298,32 +456,44 @@ sysenter_past_esp:
pushl $__USER_CS
/*
* Push current_thread_info()->sysenter_return to the stack.
@@ -15254,11 +14910,11 @@ index 21dc60a..844def1 100644
+#endif
+
testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
- jnz sysenter_audit
+ jnz syscall_trace_entry
sysenter_do_call:
-@@ -353,12 +523,24 @@ sysenter_after_call:
+@@ -339,20 +509,38 @@ sysenter_after_call:
testl $_TIF_ALLWORK_MASK, %ecx
- jnz sysexit_audit
+ jnz syscall_exit_work_irqs_off
sysenter_exit:
+
+#ifdef CONFIG_PAX_RANDKSTACK
@@ -15281,19 +14937,6 @@ index 21dc60a..844def1 100644
PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT
-@@ -372,6 +554,9 @@ sysenter_audit:
- pushl PT_ESI(%esp) /* a3: 5th arg */
- pushl PT_EDX+4(%esp) /* a2: 4th arg */
- call __audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- popl %ecx /* get that remapped edx off the stack */
- popl %ecx /* get that remapped esi off the stack */
- movl PT_EAX(%esp), %eax /* reload syscall number */
-@@ -397,10 +582,16 @@ sysexit_audit:
- #endif
-
.pushsection .fixup, "ax"
-2: movl $0, PT_FS(%esp)
+4: movl $0, PT_FS(%esp)
@@ -15310,7 +14953,7 @@ index 21dc60a..844def1 100644
PTGS_TO_GS_EX
ENDPROC(entry_SYSENTER_32)
-@@ -410,6 +601,11 @@ ENTRY(entry_INT80_32)
+@@ -362,6 +550,11 @@ ENTRY(entry_INT80_32)
pushl %eax # save orig_eax
SAVE_ALL
GET_THREAD_INFO(%ebp)
@@ -15322,9 +14965,9 @@ index 21dc60a..844def1 100644
# system call tracing in operation / emulation
testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
jnz syscall_trace_entry
-@@ -429,6 +625,15 @@ syscall_exit:
- testl $_TIF_ALLWORK_MASK, %ecx # current->work
- jnz syscall_exit_work
+@@ -375,6 +568,15 @@ syscall_exit:
+ LOCKDEP_SYS_EXIT
+ jmp syscall_exit_work
+restore_all_pax:
+
@@ -15338,7 +14981,7 @@ index 21dc60a..844def1 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -483,14 +688,34 @@ ldt_ss:
+@@ -429,14 +631,34 @@ ldt_ss:
* compensating for the offset by changing to the ESPFIX segment with
* a base address that matches for the difference.
*/
@@ -15376,39 +15019,7 @@ index 21dc60a..844def1 100644
pushl $__ESPFIX_SS
pushl %eax /* new kernel esp */
/*
-@@ -519,20 +744,18 @@ work_resched:
- movl TI_flags(%ebp), %ecx
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
-- jz restore_all
-+ jz restore_all_pax
- testb $_TIF_NEED_RESCHED, %cl
- jnz work_resched
-
- work_notifysig: # deal with pending signals and
- # notify-resume requests
-+ movl %esp, %eax
- #ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-- movl %esp, %eax
- jnz work_notifysig_v86 # returning to kernel-space or
- # vm86-space
- 1:
--#else
-- movl %esp, %eax
- #endif
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -553,7 +776,7 @@ work_notifysig_v86:
- movl %eax, %esp
- jmp 1b
- #endif
--END(work_pending)
-+ENDPROC(work_pending)
-
- # perform syscall exit tracing
- ALIGN
-@@ -561,11 +784,14 @@ syscall_trace_entry:
+@@ -456,11 +678,14 @@ syscall_trace_entry:
movl $-ENOSYS, PT_EAX(%esp)
movl %esp, %eax
call syscall_trace_enter
@@ -15424,11 +15035,13 @@ index 21dc60a..844def1 100644
# perform syscall exit tracing
ALIGN
-@@ -578,24 +804,28 @@ syscall_exit_work:
+@@ -471,25 +696,29 @@ syscall_exit_work_irqs_off:
+ syscall_exit_work:
movl %esp, %eax
- call syscall_trace_leave
- jmp resume_userspace
+ call syscall_return_slowpath
+- jmp restore_all
-END(syscall_exit_work)
++ jmp restore_all_pax
+ENDPROC(syscall_exit_work)
syscall_fault:
@@ -15457,7 +15070,7 @@ index 21dc60a..844def1 100644
.macro FIXUP_ESPFIX_STACK
/*
-@@ -607,8 +837,15 @@ END(sysenter_badsys)
+@@ -501,8 +730,15 @@ END(sysenter_badsys)
*/
#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */
@@ -15475,7 +15088,7 @@ index 21dc60a..844def1 100644
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS
-@@ -644,7 +881,7 @@ ENTRY(irq_entries_start)
+@@ -538,7 +774,7 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
@@ -15484,7 +15097,7 @@ index 21dc60a..844def1 100644
/*
* the CPU automatically disables interrupts when executing an IRQ vector,
-@@ -691,7 +928,7 @@ ENTRY(coprocessor_error)
+@@ -585,7 +821,7 @@ ENTRY(coprocessor_error)
pushl $0
pushl $do_coprocessor_error
jmp error_code
@@ -15493,7 +15106,7 @@ index 21dc60a..844def1 100644
ENTRY(simd_coprocessor_error)
ASM_CLAC
-@@ -705,25 +942,25 @@ ENTRY(simd_coprocessor_error)
+@@ -599,25 +835,25 @@ ENTRY(simd_coprocessor_error)
pushl $do_simd_coprocessor_error
#endif
jmp error_code
@@ -15523,7 +15136,7 @@ index 21dc60a..844def1 100644
#endif
ENTRY(overflow)
-@@ -731,59 +968,59 @@ ENTRY(overflow)
+@@ -625,59 +861,59 @@ ENTRY(overflow)
pushl $0
pushl $do_overflow
jmp error_code
@@ -15592,7 +15205,7 @@ index 21dc60a..844def1 100644
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
-@@ -791,7 +1028,7 @@ ENTRY(machine_check)
+@@ -685,7 +921,7 @@ ENTRY(machine_check)
pushl $0
pushl machine_check_vector
jmp error_code
@@ -15601,7 +15214,7 @@ index 21dc60a..844def1 100644
#endif
ENTRY(spurious_interrupt_bug)
-@@ -799,7 +1036,7 @@ ENTRY(spurious_interrupt_bug)
+@@ -693,7 +929,7 @@ ENTRY(spurious_interrupt_bug)
pushl $0
pushl $do_spurious_interrupt_bug
jmp error_code
@@ -15610,7 +15223,7 @@ index 21dc60a..844def1 100644
#ifdef CONFIG_XEN
/*
-@@ -906,7 +1143,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+@@ -800,7 +1036,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
ENTRY(mcount)
ret
@@ -15619,7 +15232,7 @@ index 21dc60a..844def1 100644
ENTRY(ftrace_caller)
pushl %eax
-@@ -936,7 +1173,7 @@ ftrace_graph_call:
+@@ -830,7 +1066,7 @@ ftrace_graph_call:
.globl ftrace_stub
ftrace_stub:
ret
@@ -15628,7 +15241,7 @@ index 21dc60a..844def1 100644
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
-@@ -1034,7 +1271,7 @@ trace:
+@@ -928,7 +1164,7 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
@@ -15637,7 +15250,7 @@ index 21dc60a..844def1 100644
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
-@@ -1052,7 +1289,7 @@ ENTRY(ftrace_graph_caller)
+@@ -946,7 +1182,7 @@ ENTRY(ftrace_graph_caller)
popl %ecx
popl %eax
ret
@@ -15646,7 +15259,7 @@ index 21dc60a..844def1 100644
.globl return_to_handler
return_to_handler:
-@@ -1100,14 +1337,17 @@ error_code:
+@@ -994,14 +1230,17 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -15666,7 +15279,7 @@ index 21dc60a..844def1 100644
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1145,7 +1385,7 @@ debug_stack_correct:
+@@ -1039,7 +1278,7 @@ debug_stack_correct:
movl %esp, %eax # pt_regs pointer
call do_debug
jmp ret_from_exception
@@ -15675,7 +15288,7 @@ index 21dc60a..844def1 100644
/*
* NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1184,6 +1424,9 @@ nmi_stack_correct:
+@@ -1078,6 +1317,9 @@ nmi_stack_correct:
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
call do_nmi
@@ -15685,7 +15298,7 @@ index 21dc60a..844def1 100644
jmp restore_all_notrace
nmi_stack_fixup:
-@@ -1217,11 +1460,14 @@ nmi_espfix_stack:
+@@ -1111,11 +1353,14 @@ nmi_espfix_stack:
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
@@ -15701,7 +15314,7 @@ index 21dc60a..844def1 100644
ENTRY(int3)
ASM_CLAC
-@@ -1232,17 +1478,17 @@ ENTRY(int3)
+@@ -1126,17 +1371,17 @@ ENTRY(int3)
movl %esp, %eax # pt_regs pointer
call do_int3
jmp ret_from_exception
@@ -15723,10 +15336,10 @@ index 21dc60a..844def1 100644
+ENDPROC(async_page_fault)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index d330840..4f1925e 100644
+index 055a01d..8dddafe 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -37,6 +37,8 @@
+@@ -36,6 +36,8 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <linux/err.h>
@@ -15735,7 +15348,7 @@ index d330840..4f1925e 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -54,6 +56,402 @@ ENTRY(native_usergs_sysret64)
+@@ -53,6 +55,402 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -16138,7 +15751,7 @@ index d330840..4f1925e 100644
.macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS
bt $9, EFLAGS(%rsp) /* interrupts off? */
-@@ -89,7 +487,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -88,7 +486,7 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
@@ -16147,7 +15760,7 @@ index d330840..4f1925e 100644
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
-@@ -149,14 +547,6 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+@@ -148,14 +546,6 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
@@ -16162,7 +15775,7 @@ index d330840..4f1925e 100644
pushq %r11 /* pt_regs->flags */
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
-@@ -172,7 +562,27 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+@@ -171,7 +561,27 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
@@ -16191,7 +15804,7 @@ index d330840..4f1925e 100644
jnz tracesys
entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0
-@@ -205,9 +615,13 @@ entry_SYSCALL_64_fastpath:
+@@ -204,9 +614,13 @@ entry_SYSCALL_64_fastpath:
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
* very bad.
*/
@@ -16206,7 +15819,7 @@ index d330840..4f1925e 100644
RESTORE_C_REGS_EXCEPT_RCX_R11
movq RIP(%rsp), %rcx
movq EFLAGS(%rsp), %r11
-@@ -236,6 +650,9 @@ tracesys:
+@@ -240,6 +654,9 @@ tracesys:
call syscall_trace_enter_phase1
test %rax, %rax
jnz tracesys_phase2 /* if needed, run the slow path */
@@ -16216,7 +15829,7 @@ index d330840..4f1925e 100644
RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
movq ORIG_RAX(%rsp), %rax
jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
-@@ -247,6 +664,8 @@ tracesys_phase2:
+@@ -251,6 +668,8 @@ tracesys_phase2:
movq %rax, %rdx
call syscall_trace_enter_phase2
@@ -16225,16 +15838,16 @@ index d330840..4f1925e 100644
/*
* Reload registers from stack in case ptrace changed them.
* We don't reload %rax because syscall_trace_entry_phase2() returned
-@@ -284,6 +703,8 @@ GLOBAL(int_with_check)
- andl %edi, %edx
- jnz int_careful
- andl $~TS_COMPAT, TI_status(%rcx)
+@@ -279,6 +698,8 @@ GLOBAL(int_ret_from_sys_call)
+ SAVE_EXTRA_REGS
+ movq %rsp, %rdi
+ call syscall_return_slowpath /* returns with IRQs disabled */
+ pax_exit_kernel_user
+ pax_erase_kstack
- jmp syscall_return
+ RESTORE_EXTRA_REGS
+ TRACE_IRQS_IRETQ /* we're about to change IF */
- /*
-@@ -407,14 +828,14 @@ syscall_return_via_sysret:
+@@ -353,14 +774,14 @@ syscall_return_via_sysret:
opportunistic_sysret_failed:
SWAPGS
jmp restore_c_regs_and_iret
@@ -16251,7 +15864,7 @@ index d330840..4f1925e 100644
.endm
FORK_LIKE clone
-@@ -434,7 +855,7 @@ return_from_execve:
+@@ -380,7 +801,7 @@ return_from_execve:
ZERO_EXTRA_REGS
movq %rax, RAX(%rsp)
jmp int_ret_from_sys_call
@@ -16260,7 +15873,7 @@ index d330840..4f1925e 100644
/*
* Remaining execve stubs are only 7 bytes long.
* ENTRY() often aligns to 16 bytes, which in this case has no benefits.
-@@ -443,7 +864,7 @@ END(stub_execve)
+@@ -389,7 +810,7 @@ END(stub_execve)
GLOBAL(stub_execveat)
call sys_execveat
jmp return_from_execve
@@ -16269,7 +15882,7 @@ index d330840..4f1925e 100644
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
.align 8
-@@ -451,15 +872,15 @@ GLOBAL(stub_x32_execve)
+@@ -397,15 +818,15 @@ GLOBAL(stub_x32_execve)
GLOBAL(stub32_execve)
call compat_sys_execve
jmp return_from_execve
@@ -16289,7 +15902,7 @@ index d330840..4f1925e 100644
#endif
/*
-@@ -488,7 +909,7 @@ ENTRY(stub_x32_rt_sigreturn)
+@@ -434,7 +855,7 @@ ENTRY(stub_x32_rt_sigreturn)
SAVE_EXTRA_REGS 8
call sys32_x32_rt_sigreturn
jmp return_from_stub
@@ -16298,7 +15911,7 @@ index d330840..4f1925e 100644
#endif
/*
-@@ -527,7 +948,7 @@ ENTRY(ret_from_fork)
+@@ -473,7 +894,7 @@ ENTRY(ret_from_fork)
movl $0, RAX(%rsp)
RESTORE_EXTRA_REGS
jmp int_ret_from_sys_call
@@ -16307,7 +15920,7 @@ index d330840..4f1925e 100644
/*
* Build the entry stubs with some assembler magic.
-@@ -542,7 +963,7 @@ ENTRY(irq_entries_start)
+@@ -488,7 +909,7 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
@@ -16316,37 +15929,10 @@ index d330840..4f1925e 100644
/*
* Interrupt entry/exit.
-@@ -555,21 +976,13 @@ END(irq_entries_start)
- /* 0(%rsp): ~(interrupt number) */
- .macro interrupt func
- cld
-- /*
-- * Since nothing in interrupt handling code touches r12...r15 members
-- * of "struct pt_regs", and since interrupts can nest, we can save
-- * four stack slots and simultaneously provide
-- * an unwind-friendly stack layout by saving "truncated" pt_regs
-- * exactly up to rbp slot, without these members.
-- */
-- ALLOC_PT_GPREGS_ON_STACK -RBP
-- SAVE_C_REGS -RBP
-- /* this goes to 0(%rsp) for unwinder, not for saving the value: */
-- SAVE_EXTRA_REGS_RBP -RBP
-+ ALLOC_PT_GPREGS_ON_STACK
-+ SAVE_C_REGS
-+ SAVE_EXTRA_REGS
-
-- leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */
-+ movq %rsp, %rdi /* arg1 for \func (pointer to pt_regs) */
-
-- testb $3, CS-RBP(%rsp)
-+ testb $3, CS(%rsp)
- jz 1f
- SWAPGS
- 1:
-@@ -584,6 +997,18 @@ END(irq_entries_start)
+@@ -529,6 +950,18 @@ END(irq_entries_start)
incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
- pushq %rsi
+ pushq %rdi
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ testb $3, CS(%rdi)
@@ -16362,25 +15948,16 @@ index d330840..4f1925e 100644
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
-@@ -608,7 +1033,7 @@ ret_from_intr:
- /* Restore saved previous stack */
- popq %rsi
- /* return code expects complete pt_regs - adjust rsp accordingly: */
-- leaq -RBP(%rsi), %rsp
-+ movq %rsi, %rsp
-
- testb $3, CS(%rsp)
- jz retint_kernel
-@@ -630,6 +1055,8 @@ retint_swapgs: /* return to user-space */
- * The iretq could re-enable interrupts:
- */
- DISABLE_INTERRUPTS(CLBR_ANY)
+@@ -561,6 +994,8 @@ ret_from_intr:
+ GLOBAL(retint_user)
+ mov %rsp,%rdi
+ call prepare_exit_to_usermode
+ pax_exit_kernel_user
+# pax_erase_kstack
TRACE_IRQS_IRETQ
-
SWAPGS
-@@ -648,6 +1075,21 @@ retint_kernel:
+ jmp restore_regs_and_iret
+@@ -578,6 +1013,21 @@ retint_kernel:
jmp 0b
1:
#endif
@@ -16402,7 +15979,7 @@ index d330840..4f1925e 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -689,15 +1131,15 @@ native_irq_return_ldt:
+@@ -621,15 +1071,15 @@ native_irq_return_ldt:
SWAPGS
movq PER_CPU_VAR(espfix_waddr), %rdi
movq %rax, (0*8)(%rdi) /* RAX */
@@ -16423,16 +16000,16 @@ index d330840..4f1925e 100644
movq %rax, (4*8)(%rdi)
andl $0xffff0000, %eax
popq %rdi
-@@ -738,7 +1180,7 @@ retint_signal:
- GET_THREAD_INFO(%rcx)
- jmp retint_with_reschedule
-
+@@ -639,7 +1089,7 @@ native_irq_return_ldt:
+ popq %rax
+ jmp native_irq_return_iret
+ #endif
-END(common_interrupt)
+ENDPROC(common_interrupt)
/*
* APIC interrupts.
-@@ -750,7 +1192,7 @@ ENTRY(\sym)
+@@ -651,7 +1101,7 @@ ENTRY(\sym)
.Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
@@ -16441,7 +16018,7 @@ index d330840..4f1925e 100644
.endm
#ifdef CONFIG_TRACING
-@@ -815,7 +1257,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
+@@ -716,7 +1166,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
/*
* Exception entry points.
*/
@@ -16450,7 +16027,7 @@ index d330840..4f1925e 100644
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
-@@ -862,6 +1304,12 @@ ENTRY(\sym)
+@@ -763,6 +1213,12 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
@@ -16463,7 +16040,7 @@ index d330840..4f1925e 100644
subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
-@@ -905,7 +1353,7 @@ ENTRY(\sym)
+@@ -806,7 +1262,7 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */
.endif
@@ -16472,7 +16049,7 @@ index d330840..4f1925e 100644
.endm
#ifdef CONFIG_TRACING
-@@ -947,8 +1395,9 @@ gs_change:
+@@ -848,8 +1304,9 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq
@@ -16483,7 +16060,7 @@ index d330840..4f1925e 100644
_ASM_EXTABLE(gs_change, bad_gs)
.section .fixup, "ax"
-@@ -970,8 +1419,9 @@ ENTRY(do_softirq_own_stack)
+@@ -871,8 +1328,9 @@ ENTRY(do_softirq_own_stack)
call __do_softirq
leaveq
decl PER_CPU_VAR(irq_count)
@@ -16494,7 +16071,7 @@ index d330840..4f1925e 100644
#ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-@@ -1007,7 +1457,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+@@ -908,7 +1366,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
call xen_maybe_preempt_hcall
#endif
jmp error_exit
@@ -16503,7 +16080,7 @@ index d330840..4f1925e 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1052,7 +1502,7 @@ ENTRY(xen_failsafe_callback)
+@@ -953,7 +1411,7 @@ ENTRY(xen_failsafe_callback)
SAVE_C_REGS
SAVE_EXTRA_REGS
jmp error_exit
@@ -16512,7 +16089,7 @@ index d330840..4f1925e 100644
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1101,8 +1551,36 @@ ENTRY(paranoid_entry)
+@@ -1002,8 +1460,36 @@ ENTRY(paranoid_entry)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx, %ebx
@@ -16551,7 +16128,7 @@ index d330840..4f1925e 100644
/*
* "Paranoid" exit path from exception stack. This is invoked
-@@ -1119,19 +1597,26 @@ END(paranoid_entry)
+@@ -1020,19 +1506,26 @@ END(paranoid_entry)
ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -16580,10 +16157,11 @@ index d330840..4f1925e 100644
/*
* Save all registers in pt_regs, and switch gs if needed.
-@@ -1149,7 +1634,18 @@ ENTRY(error_entry)
- SWAPGS
+@@ -1059,8 +1552,18 @@ ENTRY(error_entry)
+ #endif
- error_entry_done:
+ .Lerror_entry_done:
+-
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ testb $3, CS+8(%rsp)
+ jnz 1f
@@ -16599,17 +16177,22 @@ index d330840..4f1925e 100644
ret
/*
-@@ -1199,7 +1695,7 @@ error_bad_iret:
+@@ -1109,11 +1612,11 @@ ENTRY(error_entry)
mov %rax, %rsp
decl %ebx
- jmp error_entry_done
+ jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+ENDPROC(error_entry)
/*
-@@ -1212,10 +1708,10 @@ ENTRY(error_exit)
- RESTORE_EXTRA_REGS
+- * On entry, EBS is a "return to kernel mode" flag:
++ * On entry, EBX is a "return to kernel mode" flag:
+ * 1: already in kernel mode, don't need SWAPGS
+ * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
+ */
+@@ -1121,10 +1624,10 @@ ENTRY(error_exit)
+ movl %ebx, %eax
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl %eax, %eax
@@ -16621,7 +16204,7 @@ index d330840..4f1925e 100644
/* Runs on exception stack */
ENTRY(nmi)
-@@ -1269,6 +1765,8 @@ ENTRY(nmi)
+@@ -1178,6 +1681,8 @@ ENTRY(nmi)
* other IST entries.
*/
@@ -16630,7 +16213,7 @@ index d330840..4f1925e 100644
/* Use %rdx as our temp variable throughout */
pushq %rdx
-@@ -1312,6 +1810,12 @@ ENTRY(nmi)
+@@ -1221,6 +1726,12 @@ ENTRY(nmi)
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
@@ -16643,7 +16226,7 @@ index d330840..4f1925e 100644
/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
-@@ -1322,12 +1826,19 @@ ENTRY(nmi)
+@@ -1231,12 +1742,19 @@ ENTRY(nmi)
movq $-1, %rsi
call do_nmi
@@ -16663,7 +16246,7 @@ index d330840..4f1925e 100644
jmp restore_c_regs_and_iret
.Lnmi_from_kernel:
-@@ -1449,6 +1960,7 @@ nested_nmi_out:
+@@ -1358,6 +1876,7 @@ nested_nmi_out:
popq %rdx
/* We are returning to kernel mode, so this cannot result in a fault. */
@@ -16671,7 +16254,7 @@ index d330840..4f1925e 100644
INTERRUPT_RETURN
first_nmi:
-@@ -1522,20 +2034,22 @@ end_repeat_nmi:
+@@ -1431,20 +1950,22 @@ end_repeat_nmi:
ALLOC_PT_GPREGS_ON_STACK
/*
@@ -16697,7 +16280,7 @@ index d330840..4f1925e 100644
jnz nmi_restore
nmi_swapgs:
SWAPGS_UNSAFE_STACK
-@@ -1546,6 +2060,8 @@ nmi_restore:
+@@ -1455,6 +1976,8 @@ nmi_restore:
/* Point RSP at the "iret" frame. */
REMOVE_PT_GPREGS_FROM_STACK 6*8
@@ -16706,7 +16289,7 @@ index d330840..4f1925e 100644
/*
* Clear "NMI executing". Set DF first so that we can easily
* distinguish the remaining code between here and IRET from
-@@ -1563,9 +2079,9 @@ nmi_restore:
+@@ -1472,9 +1995,9 @@ nmi_restore:
* mode, so this cannot result in a fault.
*/
INTERRUPT_RETURN
@@ -16719,7 +16302,7 @@ index d330840..4f1925e 100644
-END(ignore_sysret)
+ENDPROC(ignore_sysret)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
-index a7e257d..3a6ad23 100644
+index a9360d4..e87da3e 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -13,8 +13,10 @@
@@ -16853,7 +16436,7 @@ index a7e257d..3a6ad23 100644
jnz sysexit_audit
sysexit_from_sys_call:
/*
-@@ -138,7 +191,9 @@ sysexit_from_sys_call:
+@@ -138,11 +191,18 @@ sysexit_from_sys_call:
* This code path is still called 'sysexit' because it pairs
* with 'sysenter' and it uses the SYSENTER calling convention.
*/
@@ -16863,8 +16446,17 @@ index a7e257d..3a6ad23 100644
+ andl $~TS_COMPAT, TI_status(%r11)
movl RIP(%rsp), %ecx /* User %eip */
movq RAX(%rsp), %rax
- RESTORE_RSI_RDI
-@@ -194,6 +249,8 @@ sysexit_from_sys_call:
+ movl RSI(%rsp), %esi
+ movl RDI(%rsp), %edi
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq R12(%rsp), %r12
++#endif
++
+ xorl %edx, %edx /* Do not leak kernel information */
+ xorq %r8, %r8
+ xorq %r9, %r9
+@@ -195,6 +255,8 @@ sysexit_from_sys_call:
movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
call __audit_syscall_entry
@@ -16873,16 +16465,16 @@ index a7e257d..3a6ad23 100644
/*
* We are going to jump back to the syscall dispatch code.
* Prepare syscall args as required by the 64-bit C ABI.
-@@ -209,7 +266,7 @@ sysexit_from_sys_call:
- .endm
-
+@@ -212,7 +274,7 @@ sysexit_from_sys_call:
.macro auditsys_exit exit
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
jnz ia32_ret_from_sys_call
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -220,10 +277,11 @@ sysexit_from_sys_call:
+ movl %eax, %esi /* second arg, syscall return value */
+ cmpl $-MAX_ERRNO, %eax /* is it an error ? */
+@@ -221,10 +283,11 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
@@ -16895,7 +16487,7 @@ index a7e257d..3a6ad23 100644
jz \exit
xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp)
-@@ -249,7 +307,7 @@ sysenter_fix_flags:
+@@ -250,7 +313,7 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -16904,7 +16496,7 @@ index a7e257d..3a6ad23 100644
jz sysenter_auditsys
#endif
SAVE_EXTRA_REGS
-@@ -269,6 +327,9 @@ sysenter_tracesys:
+@@ -270,6 +333,9 @@ sysenter_tracesys:
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS
@@ -16914,7 +16506,7 @@ index a7e257d..3a6ad23 100644
jmp sysenter_do_call
ENDPROC(entry_SYSENTER_compat)
-@@ -311,7 +372,6 @@ ENTRY(entry_SYSCALL_compat)
+@@ -312,7 +378,6 @@ ENTRY(entry_SYSCALL_compat)
SWAPGS_UNSAFE_STACK
movl %esp, %r8d
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -16922,7 +16514,7 @@ index a7e257d..3a6ad23 100644
/* Zero-extending 32-bit regs, do not remove */
movl %eax, %eax
-@@ -331,16 +391,41 @@ ENTRY(entry_SYSCALL_compat)
+@@ -332,16 +397,40 @@ ENTRY(entry_SYSCALL_compat)
pushq $-ENOSYS /* pt_regs->ax */
sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
@@ -16944,9 +16536,8 @@ index a7e257d..3a6ad23 100644
*/
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
++ addq pax_user_shadow_base, %r8
+ ASM_PAX_OPEN_USERLAND
-+ movq pax_user_shadow_base, %r8
-+ addq RSP(%rsp), %r8
+#endif
+
ASM_STAC
@@ -16966,7 +16557,7 @@ index a7e257d..3a6ad23 100644
jnz cstar_tracesys
cstar_do_call:
-@@ -358,13 +443,16 @@ cstar_dispatch:
+@@ -359,19 +448,27 @@ cstar_dispatch:
call *ia32_sys_call_table(, %rax, 8)
movq %rax, RAX(%rsp)
1:
@@ -16982,10 +16573,22 @@ index a7e257d..3a6ad23 100644
+ pax_exit_kernel_user
+ pax_erase_kstack
+ andl $~TS_COMPAT, TI_status(%r11)
- RESTORE_RSI_RDI_RDX
+ movl RDX(%rsp), %edx
+ movl RSI(%rsp), %esi
+ movl RDI(%rsp), %edi
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
-@@ -403,7 +491,7 @@ sysretl_audit:
+- movq RAX(%rsp), %rax
++ movq RAX(%rsp), %rax
++
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq R12(%rsp), %r12
++#endif
++
+ xorq %r10, %r10
+ xorq %r9, %r9
+ xorq %r8, %r8
+@@ -406,7 +503,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -16994,7 +16597,7 @@ index a7e257d..3a6ad23 100644
jz cstar_auditsys
#endif
xchgl %r9d, %ebp
-@@ -426,11 +514,19 @@ cstar_tracesys:
+@@ -429,6 +526,9 @@ cstar_tracesys:
RESTORE_EXTRA_REGS
xchgl %ebp, %r9d
@@ -17004,17 +16607,19 @@ index a7e257d..3a6ad23 100644
jmp cstar_do_call
END(entry_SYSCALL_compat)
- ia32_badarg:
- ASM_CLAC
+@@ -449,6 +549,11 @@ ia32_badarg:
+ */
+
+ ASM_CLAC /* undo STAC */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_CLOSE_USERLAND
+#endif
+
- movq $-EFAULT, RAX(%rsp)
- ia32_ret_from_sys_call:
- xorl %eax, %eax /* Do not leak kernel information */
-@@ -462,14 +558,8 @@ ia32_ret_from_sys_call:
+ movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
+
+ /* Fill in the rest of pt_regs */
+@@ -505,14 +610,8 @@ ia32_ret_from_sys_call:
*/
ENTRY(entry_INT80_compat)
@@ -17029,7 +16634,7 @@ index a7e257d..3a6ad23 100644
/* Zero-extending 32-bit regs, do not remove */
movl %eax, %eax
-@@ -488,8 +578,26 @@ ENTRY(entry_INT80_compat)
+@@ -531,8 +630,26 @@ ENTRY(entry_INT80_compat)
cld
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
@@ -17058,7 +16663,7 @@ index a7e257d..3a6ad23 100644
jnz ia32_tracesys
ia32_do_call:
-@@ -524,6 +632,9 @@ ia32_tracesys:
+@@ -567,6 +684,9 @@ ia32_tracesys:
movl RDI(%rsp), %edi
movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS
@@ -17089,15 +16694,15 @@ index efb2b93..8a9cb8e 100644
_ASM_NOKPROBE(restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
-index e970320..c006fea 100644
+index a3d0767..36d66c9 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
--VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
GCOV_PROFILE := n
@@ -17142,7 +16747,7 @@ index 0224987..0359810 100644
fprintf(outfile, "const struct vdso_image %s = {\n", name);
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
-index 1c9f750..cfddb1a 100644
+index 4345431..50ae49a 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -19,10 +19,7 @@
@@ -17236,9 +16841,9 @@ index 1c9f750..cfddb1a 100644
return map_vdso(&vdso_image_x32, true);
- }
#endif
-
+ #ifdef CONFIG_IA32_EMULATION
return load_vdso32();
-@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -234,15 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#endif
#ifdef CONFIG_X86_64
@@ -17255,7 +16860,7 @@ index 1c9f750..cfddb1a 100644
{
int cpu = smp_processor_id();
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
-index 2dcc6ff..082dc7a 100644
+index b160c0c..a00ee2f 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -38,15 +38,13 @@
@@ -17285,7 +16890,7 @@ index 2dcc6ff..082dc7a 100644
}
/*
-@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
+@@ -283,8 +280,8 @@ static const struct vm_operations_struct gate_vma_ops = {
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_ADDR,
.vm_end = VSYSCALL_ADDR + PAGE_SIZE,
@@ -17335,10 +16940,10 @@ index ae6aad1..719d6d9 100644
set_fs(KERNEL_DS);
has_dumped = 1;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index ae3a29a..cea65e9 100644
+index a0a19b7..10b0289 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
-@@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
+@@ -123,7 +123,7 @@ asmlinkage long sys32_sigreturn(void)
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_COMPAT_NSIG_WORDS > 1
&& __copy_from_user((((char *) &set.sig) + 4),
@@ -17347,7 +16952,7 @@ index ae3a29a..cea65e9 100644
sizeof(frame->extramask))))
goto badframe;
-@@ -336,7 +336,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+@@ -243,7 +243,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
@@ -17356,7 +16961,7 @@ index ae3a29a..cea65e9 100644
return (void __user *) sp;
}
-@@ -381,10 +381,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+@@ -288,10 +288,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
} else {
/* Return stub is in 32bit vsyscall page */
if (current->mm->context.vdso)
@@ -17370,7 +16975,7 @@ index ae3a29a..cea65e9 100644
}
put_user_try {
-@@ -394,7 +394,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+@@ -301,7 +301,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
* These are actually not used anymore, but left because some
* gdb versions depend on them as a marker.
*/
@@ -17379,7 +16984,7 @@ index ae3a29a..cea65e9 100644
} put_user_catch(err);
if (err)
-@@ -436,7 +436,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -343,7 +343,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
0xb8,
__NR_ia32_rt_sigreturn,
0x80cd,
@@ -17388,7 +16993,7 @@ index ae3a29a..cea65e9 100644
};
frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
-@@ -459,16 +459,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -366,16 +366,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
@@ -17596,7 +17201,7 @@ index 7bfc85b..65d1ec4 100644
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index c839363..b9a8c43 100644
+index ebf6d5e..91c1cf2 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
@@ -17631,7 +17236,7 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index e916895..42d729d 100644
+index fb52aa6..527487e 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -28,6 +28,17 @@ static __always_inline int atomic_read(const atomic_t *v)
@@ -17897,10 +17502,10 @@ index e916895..42d729d 100644
+ return xchg(&v->counter, new);
+}
+
- /**
- * __atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
-@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ #define ATOMIC_OP(op) \
+ static inline void atomic_##op(int i, atomic_t *v) \
+ { \
+@@ -208,12 +354,25 @@ ATOMIC_OP(xor)
*/
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
@@ -17929,7 +17534,7 @@ index e916895..42d729d 100644
if (likely(old == c))
break;
c = old;
-@@ -207,6 +366,49 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -222,6 +381,49 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
/**
@@ -17979,53 +17584,8 @@ index e916895..42d729d 100644
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
-@@ -220,14 +422,37 @@ static __always_inline short int atomic_inc_short(short int *v)
- }
-
- /* These are x86-specific, used by some header files */
--#define atomic_clear_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "andl %0,%1" \
-- : : "r" (~(mask)), "m" (*(addr)) : "memory")
-+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-
--#define atomic_set_mask(mask, addr) \
-- asm volatile(LOCK_PREFIX "orl %0,%1" \
-- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
-- : "memory")
-+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "andl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (~(mask))
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-+
-+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
-+{
-+ asm volatile(LOCK_PREFIX "orl %1,%0"
-+ : "+m" (v->counter)
-+ : "r" (mask)
-+ : "memory");
-+}
-
- #ifdef CONFIG_X86_32
- # include <asm/atomic64_32.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index b154de7..3dc335d 100644
+index a11c30b..66fd8a0 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -12,6 +12,14 @@ typedef struct {
@@ -18200,7 +17760,7 @@ index b154de7..3dc335d 100644
* @i: integer value to subtract
* @v: pointer to type atomic64_t
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index b965f9e..8e22dd3 100644
+index 50e33ef..d36dd50 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
@@ -18472,28 +18032,6 @@ index b965f9e..8e22dd3 100644
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
-index e51a8f8..ee075df 100644
---- a/arch/x86/include/asm/barrier.h
-+++ b/arch/x86/include/asm/barrier.h
-@@ -57,7 +57,7 @@
- do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
-@@ -74,7 +74,7 @@ do { \
- do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
-- ACCESS_ONCE(*p) = (v); \
-+ ACCESS_ONCE_RW(*p) = (v); \
- } while (0)
-
- #define smp_load_acquire(p) \
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index cfe3b95..d01b118 100644
--- a/arch/x86/include/asm/bitops.h
@@ -18612,19 +18150,6 @@ index 48f99f1..d78ebf9 100644
#ifdef CONFIG_X86_VSMP
#ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
-index 9bf3ea1..4693729 100644
---- a/arch/x86/include/asm/cacheflush.h
-+++ b/arch/x86/include/asm/cacheflush.h
-@@ -133,7 +133,7 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
- * before the WARN+BUG.
- */
- unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
-- (void __user *) src, n);
-+ (void __force_user *) src, n);
- if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
- __func__, dst, src, unwritten))
- BUG();
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index f50de69..2b0a458 100644
--- a/arch/x86/include/asm/checksum_32.h
@@ -18745,10 +18270,10 @@ index acdee09..e5c31cd 100644
struct compat_timespec {
compat_time_t tv_sec;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 3d6606f..300641d 100644
+index 9727b3b..183784a 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
-@@ -214,7 +214,8 @@
+@@ -216,7 +216,8 @@
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
@@ -18758,7 +18283,7 @@ index 3d6606f..300641d 100644
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-@@ -222,7 +223,7 @@
+@@ -224,7 +225,7 @@
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
@@ -18767,7 +18292,7 @@ index 3d6606f..300641d 100644
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-@@ -401,6 +402,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+@@ -404,6 +405,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
@@ -18775,7 +18300,7 @@ index 3d6606f..300641d 100644
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
-@@ -454,7 +456,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -457,7 +459,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
@@ -18785,7 +18310,7 @@ index 3d6606f..300641d 100644
return false;
#endif
-@@ -475,7 +478,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -478,7 +481,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
@@ -18794,7 +18319,7 @@ index 3d6606f..300641d 100644
"3: movb $1,%0\n"
"4:\n"
".previous\n"
-@@ -510,7 +513,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -513,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n"
@@ -18803,7 +18328,7 @@ index 3d6606f..300641d 100644
"4: jmp %l[t_no]\n"
"5:\n"
".previous\n"
-@@ -545,7 +548,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -548,7 +551,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
@@ -18812,7 +18337,7 @@ index 3d6606f..300641d 100644
"3: movb $0,%0\n"
"4:\n"
".previous\n"
-@@ -560,7 +563,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -563,7 +566,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
".previous\n"
@@ -19105,7 +18630,7 @@ index fe884e1..46149ae 100644
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index f161c18..97d43e8 100644
+index 141c561..120c5386 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
@@ -19115,7 +18640,7 @@ index f161c18..97d43e8 100644
-#ifdef CONFIG_X86_64
-extern unsigned int vdso64_enabled;
-#endif
- #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
extern unsigned int vdso32_enabled;
#endif
@@ -250,7 +247,25 @@ extern int force_personality32;
@@ -19535,7 +19060,7 @@ index b4c1f54..e290c08 100644
pagefault_enable();
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
-index 6615032..9c233be 100644
+index 1e3408e..67c5ba1 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -158,8 +158,8 @@ static inline void unlock_vector_lock(void) {}
@@ -19563,7 +19088,7 @@ index 39bcefc..272d904 100644
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index cc9c61b..7b17f40 100644
+index de25aad..dc04476 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -42,6 +42,7 @@
@@ -19600,7 +19125,7 @@ index cc9c61b..7b17f40 100644
{
return __pa(address);
}
-@@ -192,7 +193,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+@@ -194,7 +195,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
return ioremap_nocache(offset, size);
}
@@ -19609,7 +19134,7 @@ index cc9c61b..7b17f40 100644
extern void set_iounmap_nonlazy(void);
-@@ -200,6 +201,17 @@ extern void set_iounmap_nonlazy(void);
+@@ -202,6 +203,17 @@ extern void set_iounmap_nonlazy(void);
#include <asm-generic/iomap.h>
@@ -19867,10 +19392,10 @@ index 0000000..2bfd3ba
+
+#endif /* X86_MMAN_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 364d274..e51b4bc 100644
+index 55234d5..7e3c4bf 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
-@@ -17,7 +17,19 @@ typedef struct {
+@@ -19,7 +19,19 @@ typedef struct {
#endif
struct mutex lock;
@@ -19892,10 +19417,10 @@ index 364d274..e51b4bc 100644
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index 984abfe..9996c62 100644
+index 379cd36..25f4ba2 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
-@@ -45,7 +45,7 @@ struct ldt_struct {
+@@ -46,7 +46,7 @@ struct ldt_struct {
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
@@ -19903,8 +19428,8 @@ index 984abfe..9996c62 100644
+ unsigned int size;
};
- static inline void load_mm_ldt(struct mm_struct *mm)
-@@ -86,26 +86,95 @@ void destroy_context(struct mm_struct *mm);
+ /*
+@@ -98,26 +98,95 @@ static inline void load_mm_ldt(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -20000,10 +19525,10 @@ index 984abfe..9996c62 100644
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
-@@ -128,9 +197,31 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- */
+@@ -142,9 +211,31 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (unlikely(prev->context.ldt != next->context.ldt))
load_mm_ldt(next);
+ #endif
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+ if (!(__supported_pte_mask & _PAGE_NX)) {
@@ -20033,7 +19558,7 @@ index 984abfe..9996c62 100644
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-@@ -147,13 +238,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -161,13 +252,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -20241,10 +19766,10 @@ index b3bebf9..cb419e7 100644
#define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index d143bfa..30d1f41 100644
+index 10d0596..16a2a7c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
-@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
+@@ -530,7 +530,7 @@ static inline pmd_t __pmd(pmdval_t val)
return (pmd_t) { ret };
}
@@ -20253,7 +19778,7 @@ index d143bfa..30d1f41 100644
{
pmdval_t ret;
-@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+@@ -596,6 +596,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
val);
}
@@ -20272,7 +19797,7 @@ index d143bfa..30d1f41 100644
static inline void pgd_clear(pgd_t *pgdp)
{
set_pgd(pgdp, __pgd(0));
-@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+@@ -680,6 +692,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
@@ -20294,7 +19819,7 @@ index d143bfa..30d1f41 100644
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
#ifdef CONFIG_QUEUED_SPINLOCKS
-@@ -933,7 +960,7 @@ extern void default_banner(void);
+@@ -903,7 +930,7 @@ extern void default_banner(void);
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
@@ -20303,7 +19828,7 @@ index d143bfa..30d1f41 100644
#endif
#define INTERRUPT_RETURN \
-@@ -1003,6 +1030,21 @@ extern void default_banner(void);
+@@ -973,6 +1000,21 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
@@ -20326,7 +19851,7 @@ index d143bfa..30d1f41 100644
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index a6b8f9f..fd61ef7 100644
+index 31247b5..6b5b8ef 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
@@ -20338,7 +19863,7 @@ index a6b8f9f..fd61ef7 100644
struct pv_lazy_ops {
-@@ -92,13 +92,13 @@ struct pv_lazy_ops {
+@@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
@@ -20348,13 +19873,12 @@ index a6b8f9f..fd61ef7 100644
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
- unsigned long (*get_tsc_khz)(void);
-};
+} __no_const __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
-@@ -193,7 +193,7 @@ struct pv_cpu_ops {
+@@ -190,7 +190,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
@@ -20363,7 +19887,7 @@ index a6b8f9f..fd61ef7 100644
struct pv_irq_ops {
/*
-@@ -216,7 +216,7 @@ struct pv_irq_ops {
+@@ -213,7 +213,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
@@ -20372,7 +19896,7 @@ index a6b8f9f..fd61ef7 100644
struct pv_apic_ops {
#ifdef CONFIG_X86_LOCAL_APIC
-@@ -224,7 +224,7 @@ struct pv_apic_ops {
+@@ -221,7 +221,7 @@ struct pv_apic_ops {
unsigned long start_eip,
unsigned long start_esp);
#endif
@@ -20381,7 +19905,7 @@ index a6b8f9f..fd61ef7 100644
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
-@@ -314,6 +314,7 @@ struct pv_mmu_ops {
+@@ -311,6 +311,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
@@ -20389,7 +19913,7 @@ index a6b8f9f..fd61ef7 100644
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-@@ -325,7 +326,13 @@ struct pv_mmu_ops {
+@@ -322,7 +323,13 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -20404,7 +19928,7 @@ index a6b8f9f..fd61ef7 100644
struct arch_spinlock;
#ifdef CONFIG_SMP
-@@ -347,11 +354,14 @@ struct pv_lock_ops {
+@@ -344,11 +351,14 @@ struct pv_lock_ops {
struct paravirt_callee_save lock_spinning;
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
#endif /* !CONFIG_QUEUED_SPINLOCKS */
@@ -20421,7 +19945,7 @@ index a6b8f9f..fd61ef7 100644
struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
-@@ -360,7 +370,7 @@ struct paravirt_patch_template {
+@@ -357,7 +367,7 @@ struct paravirt_patch_template {
struct pv_apic_ops pv_apic_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
@@ -21007,6 +20531,19 @@ index 13f310b..f0ef42e 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
+index d8ce3ec..96b12e2 100644
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -41,7 +41,7 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
+ * before the WARN+BUG.
+ */
+ unwritten = __copy_from_user_inatomic_nocache((void __force *) dst,
+- (void __user *) src, n);
++ (void __force_user *) src, n);
+ if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n",
+ __func__, dst, src, unwritten))
+ BUG();
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index b12f810..aedcc13 100644
--- a/arch/x86/include/asm/preempt.h
@@ -21021,7 +20558,7 @@ index b12f810..aedcc13 100644
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 944f178..f2269de 100644
+index 19577dd..f4acc54 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -136,7 +136,7 @@ struct cpuinfo_x86 {
@@ -21087,7 +20624,7 @@ index 944f178..f2269de 100644
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
-@@ -415,13 +429,6 @@ struct thread_struct {
+@@ -409,13 +423,6 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
@@ -21101,7 +20638,7 @@ index 944f178..f2269de 100644
};
/*
-@@ -463,10 +470,10 @@ static inline void native_swapgs(void)
+@@ -457,10 +464,10 @@ static inline void native_swapgs(void)
#endif
}
@@ -21114,7 +20651,7 @@ index 944f178..f2269de 100644
#else
/* sp0 on x86_32 is special in and around vm86 mode. */
return this_cpu_read_stable(cpu_current_top_of_stack);
-@@ -709,20 +716,30 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -695,19 +702,29 @@ static inline void spin_lock_prefetch(const void *x)
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
@@ -21139,14 +20676,13 @@ index 944f178..f2269de 100644
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
- .vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
+ .fpu.state = &init_fpregs_state, \
}
extern unsigned long thread_saved_pc(struct task_struct *tsk);
-@@ -737,12 +754,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -722,12 +739,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* "struct pt_regs" is possible, but they may contain the
* completely wrong values.
*/
@@ -21160,7 +20696,7 @@ index 944f178..f2269de 100644
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-@@ -756,13 +768,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -741,13 +753,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* particular problem by preventing anything from being mapped
* at the maximum canonical address.
*/
@@ -21176,7 +20712,7 @@ index 944f178..f2269de 100644
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -773,7 +785,8 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -758,7 +770,8 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
@@ -21186,7 +20722,7 @@ index 944f178..f2269de 100644
}
/*
-@@ -796,6 +809,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -781,6 +794,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -21197,7 +20733,7 @@ index 944f178..f2269de 100644
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
/* Get/set a process' ability to use the timestamp counter instruction */
-@@ -841,7 +858,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+@@ -826,7 +843,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
return 0;
}
@@ -21206,7 +20742,7 @@ index 944f178..f2269de 100644
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-@@ -851,6 +868,6 @@ bool xen_set_default_idle(void);
+@@ -836,6 +853,6 @@ bool xen_set_default_idle(void);
#define xen_set_default_idle 0
#endif
@@ -21215,10 +20751,10 @@ index 944f178..f2269de 100644
void df_debug(struct pt_regs *regs, long error_code);
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
-index 5fabf13..7388158 100644
+index 6271281..e2ba226 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
-@@ -125,15 +125,16 @@ static inline int v8086_mode(struct pt_regs *regs)
+@@ -124,15 +124,16 @@ static inline int v8086_mode(struct pt_regs *regs)
#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
@@ -21237,7 +20773,7 @@ index 5fabf13..7388158 100644
#endif
}
-@@ -180,9 +181,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
+@@ -179,9 +180,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
* Traps from the kernel do not save sp and ss.
* Use the helper function to retrieve sp.
*/
@@ -21252,21 +20788,6 @@ index 5fabf13..7388158 100644
#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
-diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
-index ae0e241..e80b10b 100644
---- a/arch/x86/include/asm/qrwlock.h
-+++ b/arch/x86/include/asm/qrwlock.h
-@@ -7,8 +7,8 @@
- #define queue_write_unlock queue_write_unlock
- static inline void queue_write_unlock(struct qrwlock *lock)
- {
-- barrier();
-- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
-+ barrier();
-+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
- }
- #endif
-
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 9c6b890..5305f53 100644
--- a/arch/x86/include/asm/realmode.h
@@ -21737,7 +21258,7 @@ index 222a6a3..839da8d 100644
#endif
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
-index c2e00bb..a10266e 100644
+index 58505f0..bff3b5b 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -49,7 +49,7 @@
@@ -21858,19 +21379,19 @@ index 82c34ee..940fa40 100644
unsigned, unsigned, unsigned);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index 225ee54..fae4566 100644
+index 8afdc3e..ca2f1b4 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -36,7 +36,7 @@
- #ifdef CONFIG_X86_32
- # define TOP_OF_KERNEL_STACK_PADDING 8
+@@ -39,7 +39,7 @@
+ # define TOP_OF_KERNEL_STACK_PADDING 8
+ # endif
#else
-# define TOP_OF_KERNEL_STACK_PADDING 0
+# define TOP_OF_KERNEL_STACK_PADDING 16
#endif
/*
-@@ -50,27 +50,26 @@ struct task_struct;
+@@ -53,27 +53,26 @@ struct task_struct;
#include <linux/atomic.h>
struct thread_info {
@@ -21901,7 +21422,7 @@ index 225ee54..fae4566 100644
#define init_stack (init_thread_union.stack)
#else /* !__ASSEMBLY__ */
-@@ -110,6 +109,7 @@ struct thread_info {
+@@ -113,6 +112,7 @@ struct thread_info {
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
#define TIF_X32 30 /* 32-bit native x86-64 binary */
@@ -21909,7 +21430,7 @@ index 225ee54..fae4566 100644
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-@@ -133,17 +133,18 @@ struct thread_info {
+@@ -136,17 +136,18 @@ struct thread_info {
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_ADDR32 (1 << TIF_ADDR32)
#define _TIF_X32 (1 << TIF_X32)
@@ -21922,24 +21443,15 @@ index 225ee54..fae4566 100644
- _TIF_NOHZ)
+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
- /* work to do in syscall_trace_leave() */
- #define _TIF_WORK_SYSCALL_EXIT \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
-- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
-+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
-
- /* work to do on interrupt/exception return */
- #define _TIF_WORK_MASK \
-@@ -154,7 +155,7 @@ struct thread_info {
/* work to do on any return to user space */
#define _TIF_ALLWORK_MASK \
((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
- /* Only used for 64 bit */
- #define _TIF_DO_NOTIFY_MASK \
-@@ -177,9 +178,11 @@ struct thread_info {
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW \
+@@ -164,9 +165,11 @@ struct thread_info {
*/
#ifndef __ASSEMBLY__
@@ -21952,7 +21464,7 @@ index 225ee54..fae4566 100644
}
static inline unsigned long current_stack_pointer(void)
-@@ -195,14 +198,9 @@ static inline unsigned long current_stack_pointer(void)
+@@ -182,14 +185,9 @@ static inline unsigned long current_stack_pointer(void)
#else /* !__ASSEMBLY__ */
@@ -21968,7 +21480,7 @@ index 225ee54..fae4566 100644
/*
* ASM operand which evaluates to a 'thread_info' address of
-@@ -295,5 +293,12 @@ static inline bool is_ia32_task(void)
+@@ -282,5 +280,12 @@ static inline bool is_ia32_task(void)
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
@@ -21982,7 +21494,7 @@ index 225ee54..fae4566 100644
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
-index cd79194..6a9956f 100644
+index 6df2029..a359a58 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
@@ -23057,10 +22569,10 @@ index 48d34d2..90671c7 100644
extern struct x86_init_ops x86_init;
extern struct x86_cpuinit_ops x86_cpuinit;
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
-index c44a5d5..7f83cfc 100644
+index 0679e11..10ba732 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
-@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+@@ -80,7 +80,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
* - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
* cases needing an extended handling.
*/
@@ -23070,7 +22582,7 @@ index c44a5d5..7f83cfc 100644
unsigned long mfn;
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
-index 0f457e6..5970c0a 100644
+index 9dafe59..0293c1d 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -69,7 +69,7 @@ struct e820map {
@@ -23083,10 +22595,10 @@ index 0f457e6..5970c0a 100644
#define BIOS_ROM_BASE 0xffe00000
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
-index 0f15af4..501a76a 100644
+index b1b78ff..92eb188 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
-@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
+@@ -30,7 +30,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
@@ -23096,10 +22608,10 @@ index 0f15af4..501a76a 100644
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index 9393896..adbaa90 100644
+index ded848c..b7a508ed 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1333,7 +1333,7 @@ static void __init acpi_reduced_hw_init(void)
+@@ -1327,7 +1327,7 @@ static void __init acpi_reduced_hw_init(void)
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
*/
@@ -23108,7 +22620,7 @@ index 9393896..adbaa90 100644
/*
* Boxes that need ACPI disabled
*/
-@@ -1408,7 +1408,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+@@ -1402,7 +1402,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
};
/* second table for DMI checks that should run after early-quirks */
@@ -23413,7 +22925,7 @@ index 25f9093..21d2827 100644
bp_int3_handler = handler;
bp_int3_addr = (u8 *)addr + sizeof(int3);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index 307a498..a7ffc39 100644
+index 24e94ce..7bd6977 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
@@ -23425,7 +22937,7 @@ index 307a498..a7ffc39 100644
int pic_mode;
-@@ -1864,7 +1864,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
+@@ -1868,7 +1868,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
apic_write(APIC_ESR, 0);
v = apic_read(APIC_ESR);
ack_APIC_irq();
@@ -23434,21 +22946,19 @@ index 307a498..a7ffc39 100644
apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
smp_processor_id(), v);
-@@ -2143,7 +2143,9 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
- for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
- /* Should happen once for each apic */
- WARN_ON((*drv)->eoi_write == eoi_write);
-- (*drv)->eoi_write = eoi_write;
-+ pax_open_kernel();
-+ *(void **)&(*drv)->eoi_write = eoi_write;
-+ pax_close_kernel();
- }
- }
-
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
-index de918c4..32eed23 100644
+index f92ab36..1884323 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
+@@ -25,7 +25,7 @@
+ static struct apic apic_physflat;
+ static struct apic apic_flat;
+
+-struct apic __read_mostly *apic = &apic_flat;
++struct apic *apic __read_only = &apic_flat;
+ EXPORT_SYMBOL_GPL(apic);
+
+ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
@@ -154,7 +154,7 @@ static int flat_probe(void)
return 1;
}
@@ -23458,7 +22968,7 @@ index de918c4..32eed23 100644
.name = "flat",
.probe = flat_probe,
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
-@@ -260,7 +260,7 @@ static int physflat_probe(void)
+@@ -259,7 +259,7 @@ static int physflat_probe(void)
return 0;
}
@@ -23468,7 +22978,7 @@ index de918c4..32eed23 100644
.name = "physical flat",
.probe = physflat_probe,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
-index b205cdb..d8503ff 100644
+index 0d96749..ce6b722 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
@@ -23481,7 +22991,7 @@ index b205cdb..d8503ff 100644
.probe = noop_probe,
.acpi_madt_oem_check = NULL,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
-index c4a8d63..fe893ac 100644
+index 971cf88..a8e01ae 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
@@ -23494,7 +23004,7 @@ index c4a8d63..fe893ac 100644
.name = "bigsmp",
.probe = probe_bigsmp,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 11b46d9..e2bc827 100644
+index 4f28215..e0b9a5d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1682,7 +1682,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
@@ -23534,11 +23044,11 @@ index 11b46d9..e2bc827 100644
.irq_mask = mask_lapic_irq,
.irq_unmask = unmask_lapic_irq,
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
-index 1a9d735..c58b5c5 100644
+index 5f1feb6..199d454 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -267,7 +267,7 @@ static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
- hpet_msi_write(data->handler_data, msg);
+ hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
}
-static struct irq_chip hpet_msi_controller = {
@@ -23547,7 +23057,7 @@ index 1a9d735..c58b5c5 100644
.irq_unmask = hpet_msi_unmask,
.irq_mask = hpet_msi_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
-index bda4886..f9c7195 100644
+index 7694ae6..5abb08e 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -72,7 +72,7 @@ static int probe_default(void)
@@ -23559,8 +23069,17 @@ index bda4886..f9c7195 100644
.name = "default",
.probe = probe_default,
+@@ -126,7 +126,7 @@ static struct apic apic_default = {
+
+ apic_driver(apic_default);
+
+-struct apic *apic = &apic_default;
++struct apic *apic __read_only = &apic_default;
+ EXPORT_SYMBOL_GPL(apic);
+
+ static int cmdline_apic __initdata;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
-index ea4ba83..13a7b74 100644
+index 861bc59..a721835 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -36,6 +36,7 @@ static struct irq_chip lapic_controller;
@@ -23580,18 +23099,9 @@ index ea4ba83..13a7b74 100644
{
raw_spin_unlock(&vector_lock);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
-index ab3219b..e8033eb 100644
+index cc8311c..d72f027 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
-@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __refdata x2apic_cpu_notifier = {
-+static struct notifier_block x2apic_cpu_notifier = {
- .notifier_call = update_clusterinfo,
- };
-
@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
}
@@ -23602,7 +23112,7 @@ index ab3219b..e8033eb 100644
.name = "cluster x2apic",
.probe = x2apic_cluster_probe,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
-index 3ffd925..8c0f5a8 100644
+index 662e915..e721634 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -90,7 +90,7 @@ static int x2apic_phys_probe(void)
@@ -23615,10 +23125,10 @@ index 3ffd925..8c0f5a8 100644
.name = "physical x2apic",
.probe = x2apic_phys_probe,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
-index c8d9295..9af2d03 100644
+index 4a13946..067ed8c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -375,7 +375,7 @@ static int uv_probe(void)
+@@ -374,7 +374,7 @@ static int uv_probe(void)
return apic == &apic_x2apic_uv_x;
}
@@ -23628,7 +23138,7 @@ index c8d9295..9af2d03 100644
.name = "UV large system",
.probe = uv_probe,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
-index 927ec92..de68f32 100644
+index 052c9c3..bc22ccdba 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
@@ -23764,7 +23274,7 @@ index d8f42f9..a46f1fc 100644
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
BLANK();
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
-index 9bff687..5b899fb 100644
+index 4eb065c..40dd012 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
@@ -23779,10 +23289,10 @@ index 9bff687..5b899fb 100644
obj-y += common.o
obj-y += rdrand.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index dd3a4ba..06672af 100644
+index 4a70fc6..7720ca5 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
-@@ -750,7 +750,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+@@ -754,7 +754,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
@@ -23812,10 +23322,10 @@ index 04f0fe5..3c0598c 100644
/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index e4f929d..0849dd8 100644
+index 1a292573..f917fff 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
-@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
+@@ -92,60 +92,6 @@ static const struct cpu_dev default_cpu = {
static const struct cpu_dev *this_cpu = &default_cpu;
@@ -23876,7 +23386,7 @@ index e4f929d..0849dd8 100644
static int __init x86_mpx_setup(char *s)
{
/* require an exact match without trailing characters */
-@@ -286,6 +232,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -287,6 +233,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
@@ -23986,7 +23496,7 @@ index e4f929d..0849dd8 100644
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -386,7 +435,7 @@ void switch_to_new_gdt(int cpu)
+@@ -387,7 +436,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -23995,7 +23505,7 @@ index e4f929d..0849dd8 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -917,6 +966,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -918,6 +967,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
@@ -24016,7 +23526,7 @@ index e4f929d..0849dd8 100644
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -991,7 +1054,7 @@ void enable_sep_cpu(void)
+@@ -992,7 +1055,7 @@ void enable_sep_cpu(void)
int cpu;
cpu = get_cpu();
@@ -24025,7 +23535,7 @@ index e4f929d..0849dd8 100644
if (!boot_cpu_has(X86_FEATURE_SEP))
goto out;
-@@ -1137,10 +1200,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1138,10 +1201,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
@@ -24041,7 +23551,7 @@ index e4f929d..0849dd8 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
-@@ -1252,21 +1317,21 @@ EXPORT_PER_CPU_SYMBOL(current_task);
+@@ -1253,21 +1318,21 @@ EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
@@ -24070,7 +23580,7 @@ index e4f929d..0849dd8 100644
/*
* Clear all 6 debug registers:
*/
-@@ -1342,7 +1407,7 @@ void cpu_init(void)
+@@ -1343,7 +1408,7 @@ void cpu_init(void)
*/
load_ucode_ap();
@@ -24079,7 +23589,7 @@ index e4f929d..0849dd8 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1374,7 +1439,6 @@ void cpu_init(void)
+@@ -1375,7 +1440,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -24087,7 +23597,7 @@ index e4f929d..0849dd8 100644
x2apic_setup();
/*
-@@ -1426,7 +1490,7 @@ void cpu_init(void)
+@@ -1427,7 +1491,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -24149,7 +23659,7 @@ index be4febc..f7af533 100644
return &cache_private_group;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index df919ff..3332bf7 100644
+index 9d014b82..8186c29 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -47,6 +47,7 @@
@@ -24160,7 +23670,17 @@ index df919ff..3332bf7 100644
#include "mce-internal.h"
-@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
+@@ -212,8 +213,7 @@ static struct notifier_block mce_srao_nb;
+ void mce_register_decode_chain(struct notifier_block *nb)
+ {
+ /* Ensure SRAO notifier has the highest priority in the decode chain. */
+- if (nb != &mce_srao_nb && nb->priority == INT_MAX)
+- nb->priority -= 1;
++ BUG_ON(nb != &mce_srao_nb && nb->priority == INT_MAX);
+
+ atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
+ }
+@@ -237,7 +237,7 @@ static void print_mce(struct mce *m)
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
m->cs, m->ip);
@@ -24169,7 +23689,7 @@ index df919ff..3332bf7 100644
print_symbol("{%s}", m->ip);
pr_cont("\n");
}
-@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
+@@ -270,10 +270,10 @@ static void print_mce(struct mce *m)
#define PANIC_TIMEOUT 5 /* 5 seconds */
@@ -24182,7 +23702,7 @@ index df919ff..3332bf7 100644
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
-@@ -319,7 +320,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -297,7 +297,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
/*
* Make sure only one CPU runs in machine check panic
*/
@@ -24191,7 +23711,7 @@ index df919ff..3332bf7 100644
wait_for_panic();
barrier();
-@@ -327,7 +328,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -305,7 +305,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
console_verbose();
} else {
/* Don't log too much for fake panic */
@@ -24200,7 +23720,7 @@ index df919ff..3332bf7 100644
return;
}
/* First print corrected ones that are still unlogged */
-@@ -366,7 +367,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -344,7 +344,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
if (!fake_panic) {
if (panic_timeout == 0)
panic_timeout = mca_cfg.panic_timeout;
@@ -24209,7 +23729,7 @@ index df919ff..3332bf7 100644
} else
pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
}
-@@ -752,7 +753,7 @@ static int mce_timed_out(u64 *t, const char *msg)
+@@ -697,7 +697,7 @@ static int mce_timed_out(u64 *t, const char *msg)
* might have been modified by someone else.
*/
rmb();
@@ -24218,7 +23738,7 @@ index df919ff..3332bf7 100644
wait_for_panic();
if (!mca_cfg.monarch_timeout)
goto out;
-@@ -1708,7 +1709,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+@@ -1654,7 +1654,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
}
/* Call the installed machine check handler for this CPU setup. */
@@ -24227,7 +23747,7 @@ index df919ff..3332bf7 100644
unexpected_machine_check;
/*
-@@ -1731,7 +1732,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
+@@ -1683,7 +1683,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
return;
}
@@ -24237,7 +23757,7 @@ index df919ff..3332bf7 100644
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
-@@ -1745,7 +1748,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
+@@ -1714,7 +1716,7 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
*/
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
@@ -24246,7 +23766,7 @@ index df919ff..3332bf7 100644
static int mce_chrdev_open_exclu; /* already open exclusive? */
static int mce_chrdev_open(struct inode *inode, struct file *file)
-@@ -1753,7 +1756,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1722,7 +1724,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
spin_lock(&mce_chrdev_state_lock);
if (mce_chrdev_open_exclu ||
@@ -24255,7 +23775,7 @@ index df919ff..3332bf7 100644
spin_unlock(&mce_chrdev_state_lock);
return -EBUSY;
-@@ -1761,7 +1764,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1730,7 +1732,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
if (file->f_flags & O_EXCL)
mce_chrdev_open_exclu = 1;
@@ -24264,7 +23784,7 @@ index df919ff..3332bf7 100644
spin_unlock(&mce_chrdev_state_lock);
-@@ -1772,7 +1775,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+@@ -1741,7 +1743,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
{
spin_lock(&mce_chrdev_state_lock);
@@ -24273,7 +23793,7 @@ index df919ff..3332bf7 100644
mce_chrdev_open_exclu = 0;
spin_unlock(&mce_chrdev_state_lock);
-@@ -2448,7 +2451,7 @@ static __init void mce_init_banks(void)
+@@ -2421,7 +2423,7 @@ static __init void mce_init_banks(void)
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
@@ -24282,7 +23802,7 @@ index df919ff..3332bf7 100644
sysfs_attr_init(&a->attr);
a->attr.name = b->attrname;
-@@ -2555,7 +2558,7 @@ struct dentry *mce_get_debugfs_dir(void)
+@@ -2528,7 +2530,7 @@ struct dentry *mce_get_debugfs_dir(void)
static void mce_reset(void)
{
cpu_missing = 0;
@@ -24292,7 +23812,7 @@ index df919ff..3332bf7 100644
atomic_set(&mce_callin, 0);
atomic_set(&global_nwo, 0);
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
-index 737b0ad..09ec66e 100644
+index 12402e1..9e0f230 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -12,6 +12,7 @@
@@ -24303,7 +23823,7 @@ index 737b0ad..09ec66e 100644
/* By default disabled */
int mce_p5_enabled __read_mostly;
-@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+@@ -54,7 +55,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
if (!cpu_has(c, X86_FEATURE_MCE))
return;
@@ -24314,7 +23834,7 @@ index 737b0ad..09ec66e 100644
wmb();
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
-index 44f1382..315b292 100644
+index 01dd870..6fd1c59 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -11,6 +11,7 @@
@@ -24335,19 +23855,6 @@ index 44f1382..315b292 100644
/* Make sure the vector pointer is visible before we enable MCEs: */
wmb();
-diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
-index 6236a54..532026d 100644
---- a/arch/x86/kernel/cpu/microcode/core.c
-+++ b/arch/x86/kernel/cpu/microcode/core.c
-@@ -460,7 +460,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata mc_cpu_notifier = {
-+static struct notifier_block mc_cpu_notifier = {
- .notifier_call = mc_cpu_callback,
- };
-
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 969dc17..a9c3fdd 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
@@ -24393,7 +23900,7 @@ index 3b533cf..b40d426 100644
/* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index e7ed0d8..57a2ab9 100644
+index f891b47..9dff300 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -72,7 +72,7 @@ static DEFINE_MUTEX(mtrr_mutex);
@@ -24419,7 +23926,7 @@ index 951884d..4796b75 100644
extern int generic_get_free_region(unsigned long base, unsigned long size,
int replace_reg);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 9469dfa..2b026bc 100644
+index 66dd3fe9..c9bfa35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1518,7 +1518,7 @@ static void __init pmu_check_apic(void)
@@ -24448,8 +23955,8 @@ index 9469dfa..2b026bc 100644
+ unsigned int idx = segment >> 3;
if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- struct ldt_struct *ldt;
-@@ -2194,7 +2194,7 @@ static unsigned long get_segment_base(unsigned int segment)
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+@@ -2198,7 +2198,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
@@ -24458,7 +23965,7 @@ index 9469dfa..2b026bc 100644
}
return get_desc_base(desc);
-@@ -2284,7 +2284,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+@@ -2288,7 +2288,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
break;
perf_callchain_store(entry, frame.return_address);
@@ -24481,10 +23988,10 @@ index 97242a9..cf9c30e 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index 1b09c42..9627a7b 100644
+index f63360b..5bf835d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -1900,6 +1900,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+@@ -2119,6 +2119,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
static void
@@ -24493,7 +24000,7 @@ index 1b09c42..9627a7b 100644
intel_start_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-@@ -1909,14 +1911,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
+@@ -2128,14 +2130,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
/*
* nothing needed if in group validation mode
*/
@@ -24514,7 +24021,7 @@ index 1b09c42..9627a7b 100644
xl = &excl_cntrs->states[tid];
-@@ -1956,6 +1962,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
+@@ -2175,6 +2181,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
}
static void
@@ -24523,7 +24030,7 @@ index 1b09c42..9627a7b 100644
intel_stop_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-@@ -1965,13 +1973,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+@@ -2184,13 +2192,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
/*
* nothing needed if in group validation mode
*/
@@ -24544,7 +24051,7 @@ index 1b09c42..9627a7b 100644
xl = &excl_cntrs->states[tid];
-@@ -2154,19 +2167,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
+@@ -2373,19 +2386,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
* unused now.
*/
if (hwc->idx >= 0) {
@@ -24569,7 +24076,7 @@ index 1b09c42..9627a7b 100644
raw_spin_unlock(&excl_cntrs->lock);
}
}
-@@ -3019,10 +3035,10 @@ __init int intel_pmu_init(void)
+@@ -3258,10 +3274,10 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -24584,10 +24091,10 @@ index 1b09c42..9627a7b 100644
intel_ds_init();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
-index 43dd672..78c0562 100644
+index d1c0f25..9d7332c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
-@@ -252,7 +252,7 @@ static void bts_event_start(struct perf_event *event, int flags)
+@@ -250,7 +250,7 @@ static void bts_event_start(struct perf_event *event, int flags)
__bts_event_start(event);
/* PMI handler: this counter is running and likely generating PMIs */
@@ -24596,7 +24103,7 @@ index 43dd672..78c0562 100644
}
static void __bts_event_stop(struct perf_event *event)
-@@ -266,7 +266,7 @@ static void __bts_event_stop(struct perf_event *event)
+@@ -264,7 +264,7 @@ static void __bts_event_stop(struct perf_event *event)
if (event->hw.state & PERF_HES_STOPPED)
return;
@@ -24605,7 +24112,7 @@ index 43dd672..78c0562 100644
}
static void bts_event_stop(struct perf_event *event, int flags)
-@@ -274,7 +274,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
+@@ -272,7 +272,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
/* PMI handler: don't restart this counter */
@@ -24630,10 +24137,10 @@ index 377e8f8..2982f48 100644
ret = intel_cqm_setup_rmid_cache();
if (ret)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
-index 183de71..bd34d52 100644
+index 4216928..cdae603 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
-@@ -116,16 +116,12 @@ static const struct attribute_group *pt_attr_groups[] = {
+@@ -132,16 +132,12 @@ static const struct attribute_group *pt_attr_groups[] = {
static int __init pt_pmu_hw_init(void)
{
@@ -24653,8 +24160,8 @@ index 183de71..bd34d52 100644
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
-@@ -135,39 +131,25 @@ static int __init pt_pmu_hw_init(void)
- &pt_pmu.caps[CR_EDX + i*4]);
+@@ -151,39 +147,25 @@ static int __init pt_pmu_hw_init(void)
+ &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
}
- ret = -ENOMEM;
@@ -24700,8 +24207,8 @@ index 183de71..bd34d52 100644
- return ret;
}
- #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
-@@ -929,7 +911,7 @@ static void pt_event_start(struct perf_event *event, int mode)
+ #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
+@@ -999,7 +981,7 @@ static void pt_event_start(struct perf_event *event, int mode)
return;
}
@@ -24710,7 +24217,7 @@ index 183de71..bd34d52 100644
event->hw.state = 0;
pt_config_buffer(buf->cur->table, buf->cur_idx,
-@@ -946,7 +928,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
+@@ -1015,7 +997,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* Protect against the PMI racing with disabling wrmsr,
* see comment in intel_pt_interrupt().
*/
@@ -24720,10 +24227,10 @@ index 183de71..bd34d52 100644
if (event->hw.state == PERF_HES_STOPPED)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-index 5cbd4e6..ee9388a 100644
+index 81431c0..bc7524c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-@@ -486,7 +486,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
+@@ -502,7 +502,7 @@ static struct attribute *rapl_events_knl_attr[] = {
NULL,
};
@@ -24733,7 +24240,7 @@ index 5cbd4e6..ee9388a 100644
.attrs = NULL, /* patched at runtime */
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 21b5e38..84f1f82 100644
+index 560e525..f5d28a9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -731,7 +731,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -24746,7 +24253,7 @@ index 21b5e38..84f1f82 100644
int i, j;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
-index 0f77f0a..d3c6b7d 100644
+index 72c54c2..c4170a4b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -115,7 +115,7 @@ struct intel_uncore_box {
@@ -24758,19 +24265,6 @@ index 0f77f0a..d3c6b7d 100644
ssize_t uncore_event_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf);
-diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
-index 83741a7..bd3507d 100644
---- a/arch/x86/kernel/cpuid.c
-+++ b/arch/x86/kernel/cpuid.c
-@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
- return notifier_from_errno(err);
- }
-
--static struct notifier_block __refdata cpuid_class_cpu_notifier =
-+static struct notifier_block cpuid_class_cpu_notifier =
- {
- .notifier_call = cpuid_class_cpu_callback,
- };
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index afa64ad..dce67dd 100644
--- a/arch/x86/kernel/crash_dump_64.c
@@ -25178,7 +24672,7 @@ index eec40f5..4fee808 100644
#include <asm/processor.h>
#include <asm/fcntl.h>
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
-index ce95676..af5c012 100644
+index 4d38416..ec7cc4e 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -41,6 +41,7 @@
@@ -26229,7 +25723,7 @@ index 0e2d96f..5889003 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index ffdc0e8..2dbedb3 100644
+index ffdc0e8..f429d4f 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
@@ -26290,12 +25784,11 @@ index ffdc0e8..2dbedb3 100644
/*
* Set up the identity mapping for the switchover. These
-@@ -180,11 +210,13 @@ ENTRY(secondary_startup_64)
+@@ -180,11 +210,12 @@ ENTRY(secondary_startup_64)
/* Sanitize CPU configuration */
call verify_cpu
+ orq $-1, %rbp
-+
movq $(init_level4_pgt - __START_KERNEL_map), %rax
1:
@@ -26306,7 +25799,7 @@ index ffdc0e8..2dbedb3 100644
movq %rcx, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -205,10 +237,21 @@ ENTRY(secondary_startup_64)
+@@ -205,10 +236,21 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -26329,7 +25822,7 @@ index ffdc0e8..2dbedb3 100644
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -288,6 +331,7 @@ ENTRY(secondary_startup_64)
+@@ -288,6 +330,7 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
@@ -26337,7 +25830,7 @@ index ffdc0e8..2dbedb3 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -321,7 +365,7 @@ ENDPROC(start_cpu0)
+@@ -321,7 +364,7 @@ ENDPROC(start_cpu0)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(stack_start)
@@ -26346,7 +25839,7 @@ index ffdc0e8..2dbedb3 100644
.word 0
__FINITDATA
-@@ -401,7 +445,7 @@ early_idt_handler_common:
+@@ -401,7 +444,7 @@ early_idt_handler_common:
call dump_stack
#ifdef CONFIG_KALLSYMS
leaq early_idt_ripmsg(%rip),%rdi
@@ -26355,7 +25848,7 @@ index ffdc0e8..2dbedb3 100644
call __print_symbol
#endif
#endif /* EARLY_PRINTK */
-@@ -430,6 +474,7 @@ ENDPROC(early_idt_handler_common)
+@@ -430,6 +473,7 @@ ENDPROC(early_idt_handler_common)
early_recursion_flag:
.long 0
@@ -26363,7 +25856,7 @@ index ffdc0e8..2dbedb3 100644
#ifdef CONFIG_EARLY_PRINTK
early_idt_msg:
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
-@@ -452,40 +497,67 @@ GLOBAL(name)
+@@ -452,40 +496,67 @@ GLOBAL(name)
__INITDATA
NEXT_PAGE(early_level4_pgt)
.fill 511,8,0
@@ -26443,7 +25936,7 @@ index ffdc0e8..2dbedb3 100644
NEXT_PAGE(level2_kernel_pgt)
/*
-@@ -502,31 +574,79 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -502,31 +573,79 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
@@ -26662,7 +26155,7 @@ index 37dae79..620dd84 100644
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
t->iopl = level << 12;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
-index c7dfe1b..146f63c 100644
+index f8062aa..c37b60f 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -26674,9 +26167,9 @@ index c7dfe1b..146f63c 100644
/* Function pointer for generic interrupt vector handling */
void (*x86_platform_ipi_callback)(void) = NULL;
-@@ -144,9 +144,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
- seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
- seq_puts(p, " Hypervisor callback interrupts\n");
+@@ -147,9 +147,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+ seq_puts(p, " Hypervisor callback interrupts\n");
+ }
#endif
- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
@@ -26686,7 +26179,7 @@ index c7dfe1b..146f63c 100644
#endif
#ifdef CONFIG_HAVE_KVM
seq_printf(p, "%*s: ", prec, "PIN");
-@@ -198,7 +198,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+@@ -201,7 +201,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
u64 arch_irq_stat(void)
{
@@ -26696,7 +26189,7 @@ index c7dfe1b..146f63c 100644
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index cd74f59..588af0b 100644
+index 38da8f2..5653e36 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -23,6 +23,8 @@
@@ -26724,19 +26217,19 @@ index cd74f59..588af0b 100644
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
}
-@@ -71,10 +74,9 @@ static inline void *current_stack(void)
- static inline int
- execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -70,10 +73,9 @@ static inline void *current_stack(void)
+
+ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
{
- struct irq_stack *curstk, *irqstk;
+ struct irq_stack *irqstk;
- u32 *isp, *prev_esp, arg1, arg2;
+ u32 *isp, *prev_esp, arg1;
- curstk = (struct irq_stack *) current_stack();
irqstk = __this_cpu_read(hardirq_stack);
/*
-@@ -83,15 +85,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -82,15 +84,19 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
* handler) we can't do that and just have to keep using the
* current stack (which is the irq stack already after all)
*/
@@ -26758,8 +26251,8 @@ index cd74f59..588af0b 100644
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
-@@ -102,6 +108,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
- : "0" (irq), "1" (desc), "2" (isp),
+@@ -101,6 +107,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+ : "0" (desc), "1" (isp),
"D" (desc->handle_irq)
: "memory", "cc", "ecx");
+
@@ -26770,7 +26263,7 @@ index cd74f59..588af0b 100644
return 1;
}
-@@ -110,32 +121,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -109,32 +120,18 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
*/
void irq_ctx_init(int cpu)
{
@@ -26805,7 +26298,7 @@ index cd74f59..588af0b 100644
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
-@@ -145,7 +142,16 @@ void do_softirq_own_stack(void)
+@@ -144,7 +141,16 @@ void do_softirq_own_stack(void)
prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer();
@@ -26821,9 +26314,9 @@ index cd74f59..588af0b 100644
+
}
- bool handle_irq(unsigned irq, struct pt_regs *regs)
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
-index bc4604e..0be227d 100644
+index c767cf2..425a7ec 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -20,6 +20,8 @@
@@ -26845,7 +26338,7 @@ index bc4604e..0be227d 100644
panic("low stack detected by irq handler - check messages\n");
#endif
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
-index 26d5a55..063fef8 100644
+index e565e0e..fdfeb45 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -31,6 +31,8 @@ static void bug_at(unsigned char *ip, int line)
@@ -27164,7 +26657,7 @@ index c2bedae..25e7ab60 100644
.name = "data",
.mode = S_IRUGO,
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
-index 49487b4..a94a0d3 100644
+index 2c7aafa..7ac2af2 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -29,7 +29,7 @@
@@ -27238,7 +26731,7 @@ index 49487b4..a94a0d3 100644
size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index 2bcc052..864eb84 100644
+index 6acc9dd..f72931d 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -11,6 +11,7 @@
@@ -27660,7 +27153,7 @@ index 113e707..0a690e1 100644
};
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
-index d05bd2e..f690edd 100644
+index 697f90d..8b1c639 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
@@ -27682,8 +27175,8 @@ index d05bd2e..f690edd 100644
+ n->action->handler, whole_msecs, decimal_msecs);
}
- static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
-@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
+ static int nmi_handle(unsigned int type, struct pt_regs *regs)
+@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
delta = sched_clock() - delta;
trace_nmi_handler(a->handler, (int)delta, thishandled);
@@ -27698,7 +27191,7 @@ index d05bd2e..f690edd 100644
}
rcu_read_unlock();
-@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
+@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(nmi_handle);
@@ -27792,7 +27285,7 @@ index 33ee3e0..da3519a 100644
#ifdef CONFIG_QUEUED_SPINLOCKS
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index ebb5657..dde2f45 100644
+index c2130ae..d52ca15 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -64,6 +64,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -27877,7 +27370,7 @@ index ebb5657..dde2f45 100644
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
-@@ -405,21 +412,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
+@@ -403,21 +410,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt);
@@ -27907,7 +27400,7 @@ index ebb5657..dde2f45 100644
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
-@@ -469,6 +481,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -467,6 +479,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.make_pud = PTE_IDENT,
.set_pgd = native_set_pgd,
@@ -27915,7 +27408,7 @@ index ebb5657..dde2f45 100644
#endif
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-@@ -489,6 +502,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -487,6 +500,12 @@ struct pv_mmu_ops pv_mmu_ops = {
},
.set_fixmap = native_set_fixmap,
@@ -27996,7 +27489,7 @@ index adf0392..88a7576 100644
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index c27cad7..47c45ed 100644
+index 9f7c21c..854f412 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -15,6 +15,7 @@
@@ -28007,7 +27500,7 @@ index c27cad7..47c45ed 100644
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
-@@ -37,7 +38,8 @@
+@@ -39,7 +40,8 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
@@ -28017,7 +27510,7 @@ index c27cad7..47c45ed 100644
.x86_tss = {
.sp0 = TOP_OF_INIT_STACK,
#ifdef CONFIG_X86_32
-@@ -55,6 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+@@ -57,6 +59,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
*/
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
@@ -28025,7 +27518,7 @@ index c27cad7..47c45ed 100644
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-@@ -75,17 +78,36 @@ void idle_notifier_unregister(struct notifier_block *n)
+@@ -77,13 +80,26 @@ void idle_notifier_unregister(struct notifier_block *n)
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
#endif
@@ -28050,7 +27543,10 @@ index c27cad7..47c45ed 100644
+ *dst = *src;
+ dst->thread.fpu.state = kmem_cache_alloc_node(fpregs_state_cachep, GFP_KERNEL, tsk_fork_get_node(src));
+ memcpy(dst->thread.fpu.state, src->thread.fpu.state, xstate_size);
-
+ #ifdef CONFIG_VM86
+ dst->thread.vm86 = NULL;
+ #endif
+@@ -91,6 +107,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
@@ -28063,7 +27559,7 @@ index c27cad7..47c45ed 100644
/*
* Free current thread data structures etc..
*/
-@@ -97,7 +119,7 @@ void exit_thread(void)
+@@ -102,7 +124,7 @@ void exit_thread(void)
struct fpu *fpu = &t->fpu;
if (bp) {
@@ -28072,7 +27568,7 @@ index c27cad7..47c45ed 100644
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
-@@ -117,6 +139,9 @@ void flush_thread(void)
+@@ -124,6 +146,9 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -28082,7 +27578,7 @@ index c27cad7..47c45ed 100644
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-@@ -258,7 +283,7 @@ static void __exit_idle(void)
+@@ -265,7 +290,7 @@ static void __exit_idle(void)
void exit_idle(void)
{
/* idle loop has pid 0 */
@@ -28091,7 +27587,7 @@ index c27cad7..47c45ed 100644
return;
__exit_idle();
}
-@@ -311,7 +336,7 @@ bool xen_set_default_idle(void)
+@@ -318,7 +343,7 @@ bool xen_set_default_idle(void)
return ret;
}
#endif
@@ -28100,7 +27596,7 @@ index c27cad7..47c45ed 100644
{
local_irq_disable();
/*
-@@ -488,16 +513,40 @@ static int __init idle_setup(char *str)
+@@ -496,13 +521,6 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
@@ -28114,9 +27610,31 @@ index c27cad7..47c45ed 100644
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long range_end = mm->brk + 0x02000000;
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
- }
+@@ -534,9 +552,7 @@ unsigned long get_wchan(struct task_struct *p)
+ * PADDING
+ * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+ * stack
+- * ----------- bottom = start + sizeof(thread_info)
+- * thread_info
+- * ----------- start
++ * ----------- bottom = start
+ *
+ * The tasks stack pointer points at the location where the
+ * framepointer is stored. The data on the stack is:
+@@ -547,7 +563,7 @@ unsigned long get_wchan(struct task_struct *p)
+ */
+ top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+ top -= 2 * sizeof(unsigned long);
+- bottom = start + sizeof(struct thread_info);
++ bottom = start;
+ sp = READ_ONCE(p->thread.sp);
+ if (sp < bottom || sp > top)
+@@ -564,3 +580,35 @@ unsigned long get_wchan(struct task_struct *p)
+ } while (count++ < 16 && p->state != TASK_RUNNING);
+ return 0;
+ }
++
+#ifdef CONFIG_PAX_RANDKSTACK
+void pax_randomize_kstack(struct pt_regs *regs)
+{
@@ -28129,7 +27647,7 @@ index c27cad7..47c45ed 100644
+ if (v8086_mode(regs))
+ return;
+
-+ rdtscl(time);
++ time = rdtsc();
+
+ /* P4 seems to return a 0 LSB, ignore it */
+#ifdef CONFIG_MPENTIUM4
@@ -28149,10 +27667,10 @@ index c27cad7..47c45ed 100644
+}
+#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index f73c962..6589332 100644
+index 737527b..ebf7a85 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
-@@ -63,6 +63,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
+@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.sp)[3];
@@ -28160,7 +27678,7 @@ index f73c962..6589332 100644
}
void __show_regs(struct pt_regs *regs, int all)
-@@ -75,16 +76,15 @@ void __show_regs(struct pt_regs *regs, int all)
+@@ -76,16 +77,15 @@ void __show_regs(struct pt_regs *regs, int all)
if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
@@ -28179,7 +27697,7 @@ index f73c962..6589332 100644
print_symbol("EIP is at %s\n", regs->ip);
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-@@ -131,21 +131,22 @@ void release_thread(struct task_struct *dead_task)
+@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
@@ -28206,7 +27724,7 @@ index f73c962..6589332 100644
childregs->fs = __KERNEL_PERCPU;
childregs->bx = sp; /* function */
childregs->bp = arg;
-@@ -245,7 +246,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -246,7 +247,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
@@ -28215,7 +27733,7 @@ index f73c962..6589332 100644
fpu_switch_t fpu_switch;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -265,6 +266,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
lazy_save_gs(prev->gs);
@@ -28226,7 +27744,7 @@ index f73c962..6589332 100644
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
-@@ -307,9 +312,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -308,9 +313,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info().
*/
load_sp0(tss, next);
@@ -28239,7 +27757,7 @@ index f73c962..6589332 100644
/*
* Restore %gs if needed (which is common)
-@@ -319,8 +324,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -320,7 +325,5 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_fpu_finish(next_fpu, fpu_switch);
@@ -28247,17 +27765,11 @@ index f73c962..6589332 100644
-
return prev_p;
}
-
-@@ -350,4 +353,3 @@ unsigned long get_wchan(struct task_struct *p)
- } while (count++ < 16);
- return 0;
- }
--
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index a90ac95..c285bd5 100644
+index b35921a..c995d0b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
-@@ -157,9 +157,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+@@ -159,9 +159,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct pt_regs *childregs;
struct task_struct *me = current;
@@ -28269,7 +27781,7 @@ index a90ac95..c285bd5 100644
set_tsk_thread_flag(p, TIF_FORK);
p->thread.io_bitmap_ptr = NULL;
-@@ -169,6 +170,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+@@ -171,6 +172,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
@@ -28278,7 +27790,7 @@ index a90ac95..c285bd5 100644
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(p->flags & PF_KTHREAD)) {
-@@ -276,7 +279,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -278,7 +281,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
@@ -28287,7 +27799,7 @@ index a90ac95..c285bd5 100644
unsigned fsindex, gsindex;
fpu_switch_t fpu_switch;
-@@ -327,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -329,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
@@ -28298,7 +27810,7 @@ index a90ac95..c285bd5 100644
/*
* Switch FS and GS.
*
-@@ -398,6 +405,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -400,6 +407,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Switch the PDA and FPU contexts.
*/
this_cpu_write(current_task, next_p);
@@ -28306,7 +27818,7 @@ index a90ac95..c285bd5 100644
/*
* If it were not for PREEMPT_ACTIVE we could guarantee that the
-@@ -410,6 +418,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -412,6 +420,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Reload esp0 and ss1. This changes current_thread_info(). */
load_sp0(tss, next);
@@ -28315,31 +27827,11 @@ index a90ac95..c285bd5 100644
/*
* Now maybe reload the debug registers and handle I/O bitmaps
*/
-@@ -522,9 +532,7 @@ unsigned long get_wchan(struct task_struct *p)
- * PADDING
- * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
- * stack
-- * ----------- bottom = start + sizeof(thread_info)
-- * thread_info
-- * ----------- start
-+ * ----------- bottom = start
- *
- * The tasks stack pointer points at the location where the
- * framepointer is stored. The data on the stack is:
-@@ -535,7 +543,7 @@ unsigned long get_wchan(struct task_struct *p)
- */
- top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
- top -= 2 * sizeof(unsigned long);
-- bottom = start + sizeof(struct thread_info);
-+ bottom = start;
-
- sp = READ_ONCE(p->thread.sp);
- if (sp < bottom || sp > top)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-index 9be72bc..f4329c5 100644
+index 558f50e..2312c52 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
-@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
+@@ -184,10 +184,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
unsigned long sp = (unsigned long)&regs->sp;
u32 *prev_esp;
@@ -28352,7 +27844,7 @@ index 9be72bc..f4329c5 100644
if (prev_esp)
return (unsigned long)prev_esp;
-@@ -446,6 +446,20 @@ static int putreg(struct task_struct *child,
+@@ -444,6 +444,20 @@ static int putreg(struct task_struct *child,
if (child->thread.gs != value)
return do_arch_prctl(child, ARCH_SET_GS, value);
return 0;
@@ -28373,7 +27865,7 @@ index 9be72bc..f4329c5 100644
#endif
}
-@@ -582,7 +596,7 @@ static void ptrace_triggered(struct perf_event *bp,
+@@ -580,7 +594,7 @@ static void ptrace_triggered(struct perf_event *bp,
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
{
int i;
@@ -28382,7 +27874,7 @@ index 9be72bc..f4329c5 100644
struct arch_hw_breakpoint *info;
for (i = 0; i < HBP_NUM; i++) {
-@@ -816,7 +830,7 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -814,7 +828,7 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
@@ -28391,7 +27883,7 @@ index 9be72bc..f4329c5 100644
switch (request) {
/* read the word at location addr in the USER area. */
-@@ -901,14 +915,14 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -899,14 +913,14 @@ long arch_ptrace(struct task_struct *child, long request,
if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
@@ -28408,7 +27900,7 @@ index 9be72bc..f4329c5 100644
break;
#endif
-@@ -1286,7 +1300,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+@@ -1294,7 +1308,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
#ifdef CONFIG_X86_64
@@ -28417,7 +27909,7 @@ index 9be72bc..f4329c5 100644
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct) / sizeof(long),
-@@ -1327,7 +1341,7 @@ static const struct user_regset_view user_x86_64_view = {
+@@ -1335,7 +1349,7 @@ static const struct user_regset_view user_x86_64_view = {
#endif /* CONFIG_X86_64 */
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -28426,7 +27918,7 @@ index 9be72bc..f4329c5 100644
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
-@@ -1380,7 +1394,7 @@ static const struct user_regset_view user_x86_32_view = {
+@@ -1388,7 +1402,7 @@ static const struct user_regset_view user_x86_32_view = {
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
@@ -28435,7 +27927,7 @@ index 9be72bc..f4329c5 100644
{
#ifdef CONFIG_X86_64
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
-@@ -1415,7 +1429,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
+@@ -1423,7 +1437,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
@@ -28444,41 +27936,6 @@ index 9be72bc..f4329c5 100644
}
void user_single_step_siginfo(struct task_struct *tsk,
-@@ -1449,6 +1463,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
- }
- }
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+extern void gr_delayed_cred_worker(void);
-+#endif
-+
- /*
- * We can return 0 to resume the syscall or anything else to go to phase
- * 2. If we resume the syscall, we need to put something appropriate in
-@@ -1556,6 +1574,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
-
- BUG_ON(regs != task_pt_regs(current));
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- /*
- * If we stepped into a sysenter/syscall insn, it trapped in
- * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
-@@ -1614,6 +1637,11 @@ void syscall_trace_leave(struct pt_regs *regs)
- */
- user_exit();
-
-+#ifdef CONFIG_GRKERNSEC_SETXID
-+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
-+ gr_delayed_cred_worker();
-+#endif
-+
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d2..e75ed0a 100644
--- a/arch/x86/kernel/pvclock.c
@@ -28512,7 +27969,7 @@ index 2f355d2..e75ed0a 100644
return ret;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 86db4bc..a50a54a 100644
+index 02693dd..33a1546 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -28650,7 +28107,7 @@ index 98111b3..73ca125 100644
identity_mapped:
/* set return address to 0 if not preserving context */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 1e6f70f..a6b1c8a 100644
+index 37c8ea8..c0e8efa 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -111,6 +111,7 @@
@@ -28677,7 +28134,7 @@ index 1e6f70f..a6b1c8a 100644
#endif
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -772,7 +775,7 @@ static void __init trim_bios_range(void)
+@@ -752,7 +755,7 @@ static void __init trim_bios_range(void)
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
@@ -28686,7 +28143,7 @@ index 1e6f70f..a6b1c8a 100644
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
-@@ -780,7 +783,7 @@ static void __init trim_bios_range(void)
+@@ -760,7 +763,7 @@ static void __init trim_bios_range(void)
/* called before trim_bios_range() to spare extra sanitize */
static void __init e820_add_kernel_range(void)
{
@@ -28695,7 +28152,7 @@ index 1e6f70f..a6b1c8a 100644
u64 size = __pa_symbol(_end) - start;
/*
-@@ -861,8 +864,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+@@ -841,8 +844,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
void __init setup_arch(char **cmdline_p)
{
@@ -28706,7 +28163,7 @@ index 1e6f70f..a6b1c8a 100644
early_reserve_initrd();
-@@ -960,16 +963,16 @@ void __init setup_arch(char **cmdline_p)
+@@ -935,16 +938,16 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -28806,7 +28263,7 @@ index e4fcb87..9c06c55 100644
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 71820c4..ad16f6b 100644
+index da52e6b..8c18d64 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -189,7 +189,7 @@ static unsigned long align_sigframe(unsigned long sp)
@@ -28886,10 +28343,10 @@ index 71820c4..ad16f6b 100644
}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
-index 15aaa69..66103af 100644
+index 12c8286..aa65d13 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
-@@ -334,7 +334,7 @@ static int __init nonmi_ipi_setup(char *str)
+@@ -336,7 +336,7 @@ static int __init nonmi_ipi_setup(char *str)
__setup("nonmi_ipi", nonmi_ipi_setup);
@@ -28899,10 +28356,10 @@ index 15aaa69..66103af 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index b1f3ed9c..b76221b 100644
+index 892ee2e5..be6b3f6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
-@@ -220,14 +220,17 @@ static void notrace start_secondary(void *unused)
+@@ -213,14 +213,17 @@ static void notrace start_secondary(void *unused)
enable_start_cpu0 = 0;
@@ -28924,7 +28381,7 @@ index b1f3ed9c..b76221b 100644
/*
* Check TSC synchronization with the BP:
*/
-@@ -808,16 +811,15 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+@@ -809,16 +812,15 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
alternatives_enable_smp();
per_cpu(current_task, cpu) = idle;
@@ -28943,7 +28400,7 @@ index b1f3ed9c..b76221b 100644
}
/*
-@@ -838,9 +840,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+@@ -839,9 +841,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
unsigned long timeout;
idle->thread.sp = (unsigned long) (((struct pt_regs *)
@@ -28956,7 +28413,7 @@ index b1f3ed9c..b76221b 100644
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
-@@ -992,6 +996,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+@@ -989,6 +993,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
common_cpu_up(cpu, tidle);
@@ -28973,20 +28430,20 @@ index b1f3ed9c..b76221b 100644
* We have to walk the irq descriptors to setup the vector
* space for the cpu which comes online. Prevent irq
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
-index 0ccb53a..fbc4759 100644
+index c9a0738..f0ab628 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
-@@ -44,7 +44,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+@@ -45,7 +45,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
addr += base;
}
mutex_unlock(&child->mm->context.lock);
- }
+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
+ addr = ktla_ktva(addr);
+ #endif
return addr;
- }
-@@ -55,6 +56,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+@@ -57,6 +58,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
@@ -29422,10 +28879,10 @@ index 1c113db..287b42e 100644
static int trace_irq_vector_refcount;
static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index f579192..aed90b8 100644
+index 346eec7..d98e7a6 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
-@@ -69,7 +69,7 @@
+@@ -70,7 +70,7 @@
#include <asm/proto.h>
/* No need to be aligned, but done to keep all IDTs defined the same way. */
@@ -29434,7 +28891,7 @@ index f579192..aed90b8 100644
#else
#include <asm/processor-flags.h>
#include <asm/setup.h>
-@@ -77,7 +77,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
+@@ -78,7 +78,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#endif
/* Must be page-aligned because the real IDT is used in a fixmap. */
@@ -29443,7 +28900,7 @@ index f579192..aed90b8 100644
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
-@@ -174,7 +174,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
+@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
* will catch asm bugs and any attempt to use ist_preempt_enable
* from double_fault.
*/
@@ -29452,7 +28909,7 @@ index f579192..aed90b8 100644
current_stack_pointer()) >= THREAD_SIZE);
preempt_count_sub(HARDIRQ_OFFSET);
-@@ -191,7 +191,7 @@ void ist_end_non_atomic(void)
+@@ -182,7 +182,7 @@ void ist_end_non_atomic(void)
}
static nokprobe_inline int
@@ -29461,7 +28918,7 @@ index f579192..aed90b8 100644
struct pt_regs *regs, long error_code)
{
if (v8086_mode(regs)) {
-@@ -211,8 +211,20 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+@@ -202,8 +202,20 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
if (!fixup_exception(regs)) {
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
@@ -29482,7 +28939,7 @@ index f579192..aed90b8 100644
return 0;
}
-@@ -251,7 +263,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
+@@ -242,7 +254,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
}
static void
@@ -29491,7 +28948,7 @@ index f579192..aed90b8 100644
long error_code, siginfo_t *info)
{
struct task_struct *tsk = current;
-@@ -275,7 +287,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+@@ -266,7 +278,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
printk_ratelimit()) {
pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
@@ -29500,7 +28957,7 @@ index f579192..aed90b8 100644
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
pr_cont("\n");
-@@ -357,6 +369,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+@@ -347,6 +359,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_DF;
@@ -29512,7 +28969,7 @@ index f579192..aed90b8 100644
#ifdef CONFIG_DOUBLEFAULT
df_debug(regs, error_code);
#endif
-@@ -473,11 +490,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -459,11 +476,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
@@ -29520,14 +28977,14 @@ index f579192..aed90b8 100644
+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
-+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
-+ die("PAX: suspicious general protection fault", regs, error_code);
-+ else
++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
+#endif
+
die("general protection fault", regs, error_code);
+ }
- goto exit;
+ return;
}
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
@@ -29549,7 +29006,7 @@ index f579192..aed90b8 100644
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
-@@ -576,6 +617,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+@@ -559,6 +600,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
container_of(task_pt_regs(current),
struct bad_iret_stack, regs);
@@ -29560,7 +29017,7 @@ index f579192..aed90b8 100644
memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index dc9af7a..1bc625e 100644
+index c3f7602..f6033e1 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -151,7 +151,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
@@ -29573,7 +29030,7 @@ index dc9af7a..1bc625e 100644
/*
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
-index 6647624..2056791 100644
+index bf4db6e..d491400 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -978,7 +978,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
@@ -29598,44 +29055,21 @@ index 4cf401f..ae8c9cf 100644
* verify_cpu, returns the status of longmode and SSE in register %eax.
* 0: Success 1: Failure
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
-index fc9db6e..2c5865d 100644
+index 5246193..c6bed42 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
-@@ -44,6 +44,7 @@
- #include <linux/ptrace.h>
- #include <linux/audit.h>
- #include <linux/stddef.h>
-+#include <linux/grsecurity.h>
-
- #include <asm/uaccess.h>
- #include <asm/io.h>
-@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+@@ -144,7 +144,7 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
do_exit(SIGSEGV);
}
- tss = &per_cpu(cpu_tss, get_cpu());
+ tss = cpu_tss + get_cpu();
- current->thread.sp0 = current->thread.saved_sp0;
- current->thread.sysenter_cs = __KERNEL_CS;
- load_sp0(tss, &current->thread);
-@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
-
- if (tsk->thread.saved_sp0)
+ tsk->thread.sp0 = vm86->saved_sp0;
+ tsk->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, &tsk->thread);
+@@ -259,6 +259,13 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
return -EPERM;
-+
-+#ifdef CONFIG_GRKERNSEC_VM86
-+ if (!capable(CAP_SYS_RAWIO)) {
-+ gr_handle_vm86();
-+ return -EPERM;
-+ }
-+#endif
-+
- tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
- offsetof(struct kernel_vm86_struct, vm86plus) -
- sizeof(info.regs));
-@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
- int tmp;
- struct vm86plus_struct __user *v86;
+ }
+#ifdef CONFIG_GRKERNSEC_VM86
+ if (!capable(CAP_SYS_RAWIO)) {
@@ -29644,29 +29078,44 @@ index fc9db6e..2c5865d 100644
+ }
+#endif
+
- tsk = current;
- switch (cmd) {
- case VM86_REQUEST_IRQ:
-@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
- tsk->thread.saved_fs = info->regs32->fs;
- tsk->thread.saved_gs = get_user_gs(info->regs32);
+ if (!vm86) {
+ if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
+ return -ENOMEM;
+@@ -354,7 +361,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
+ vm86->saved_sp0 = tsk->thread.sp0;
+ lazy_save_gs(vm86->regs32.gs);
- tss = &per_cpu(cpu_tss, get_cpu());
+ tss = cpu_tss + get_cpu();
- tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ /* make room for real-mode segments */
+ tsk->thread.sp0 += 16;
if (cpu_has_sep)
- tsk->thread.sysenter_cs = 0;
-@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
+@@ -535,7 +542,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
goto cannot_handle;
- if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+ if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
goto cannot_handle;
- intr_ptr = (unsigned long __user *) (i << 2);
-+ intr_ptr = (__force unsigned long __user *) (i << 2);
++ intr_ptr = (unsigned long __force_user *) (i << 2);
if (get_user(segoffs, intr_ptr))
goto cannot_handle;
if ((segoffs >> 16) == BIOSSEG)
+@@ -828,6 +835,14 @@ static inline int get_and_reset_irq(int irqnumber)
+ static int do_vm86_irq_handling(int subfunction, int irqnumber)
+ {
+ int ret;
++
++#ifdef CONFIG_GRKERNSEC_VM86
++ if (!capable(CAP_SYS_RAWIO)) {
++ gr_handle_vm86();
++ return -EPERM;
++ }
++#endif
++
+ switch (subfunction) {
+ case VM86_GET_AND_RESET_IRQ: {
+ return get_and_reset_irq(irqnumber);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
-index 00bf300..03e1c3b 100644
+index 74e4bf1..a9a6168 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -26,6 +26,13 @@
@@ -29935,29 +29384,6 @@ index 00bf300..03e1c3b 100644
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
-index b034b1b..32462af 100644
---- a/arch/x86/kernel/vsmp_64.c
-+++ b/arch/x86/kernel/vsmp_64.c
-@@ -224,10 +224,15 @@ static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
- static void vsmp_apic_post_init(void)
- {
- /* need to update phys_pkg_id */
-- apic->phys_pkg_id = apicid_phys_pkg_id;
-+ pax_open_kernel();
-+ *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
-+ pax_close_kernel();
-
-- if (!irq_routing_comply)
-- apic->vector_allocation_domain = fill_vector_allocation_domain;
-+ if (!irq_routing_comply) {
-+ pax_open_kernel();
-+ *(void **)&apic->vector_allocation_domain = fill_vector_allocation_domain;
-+ pax_close_kernel();
-+ }
- }
-
- void __init vsmp_init(void)
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index a0695be..33e180c 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
@@ -30062,7 +29488,7 @@ index 2fbea25..9e0f8c7 100644
out:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index f17c342..d5d17bc 100644
+index 1505587..e7b669d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3870,7 +3870,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
@@ -30075,7 +29501,7 @@ index f17c342..d5d17bc 100644
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
-index fef922f..18f48a0 100644
+index 7cc2360..6ae1236 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -39,14 +39,14 @@
@@ -30157,7 +29583,7 @@ index 856f791..bfc7694 100644
{
int i;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 236e346..2b0f2be 100644
+index ae4483a..0eac8a1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -56,7 +56,7 @@
@@ -30170,10 +29596,10 @@ index 236e346..2b0f2be 100644
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
-index 0f67d7e..4b9fa11 100644
+index 736e6ab..b2e3094 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
-@@ -343,7 +343,7 @@ retry_walk:
+@@ -335,7 +335,7 @@ retry_walk:
if (unlikely(kvm_is_error_hva(host_addr)))
goto error;
@@ -30183,10 +29609,10 @@ index 0f67d7e..4b9fa11 100644
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 00da6e8..7901046 100644
+index d7f8938..bc95a50 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
-@@ -1107,6 +1107,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+@@ -1108,6 +1108,7 @@ static void init_vmcb(struct vcpu_svm *svm)
set_exception_intercept(svm, UD_VECTOR);
set_exception_intercept(svm, MC_VECTOR);
set_exception_intercept(svm, AC_VECTOR);
@@ -30194,7 +29620,7 @@ index 00da6e8..7901046 100644
set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
-@@ -1641,20 +1642,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+@@ -1642,20 +1643,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
mark_dirty(svm->vmcb, VMCB_SEG);
}
@@ -30216,7 +29642,7 @@ index 00da6e8..7901046 100644
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
set_exception_intercept(svm, BP_VECTOR);
} else
-@@ -1760,7 +1754,6 @@ static int db_interception(struct vcpu_svm *svm)
+@@ -1761,7 +1755,6 @@ static int db_interception(struct vcpu_svm *svm)
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
svm->vmcb->save.rflags &=
~(X86_EFLAGS_TF | X86_EFLAGS_RF);
@@ -30224,7 +29650,7 @@ index 00da6e8..7901046 100644
}
if (svm->vcpu.guest_debug &
-@@ -3593,7 +3586,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+@@ -3595,7 +3588,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
@@ -30236,7 +29662,7 @@ index 00da6e8..7901046 100644
load_TR_desc();
}
-@@ -3759,7 +3756,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
+@@ -3761,7 +3758,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
*/
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
@@ -30244,7 +29670,7 @@ index 00da6e8..7901046 100644
}
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
-@@ -3989,6 +3985,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -3991,6 +3987,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
#endif
@@ -30255,7 +29681,7 @@ index 00da6e8..7901046 100644
reload_tss(vcpu);
local_irq_disable();
-@@ -4362,7 +4362,7 @@ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
+@@ -4364,7 +4364,7 @@ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}
@@ -30264,7 +29690,7 @@ index 00da6e8..7901046 100644
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
-@@ -4381,7 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
+@@ -4383,7 +4383,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
@@ -30274,7 +29700,7 @@ index 00da6e8..7901046 100644
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index e77d75b..0f056cd 100644
+index 343d369..95ade96 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
@@ -30324,7 +29750,7 @@ index e77d75b..0f056cd 100644
{
u64 host_tsc, tsc_offset;
-@@ -4459,7 +4467,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4450,7 +4458,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
@@ -30335,7 +29761,7 @@ index e77d75b..0f056cd 100644
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
-@@ -4486,7 +4497,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4477,7 +4488,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -30344,7 +29770,7 @@ index e77d75b..0f056cd 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6099,11 +6110,17 @@ static __init int hardware_setup(void)
+@@ -6015,11 +6026,17 @@ static __init int hardware_setup(void)
* page upon invalidation. No need to do anything if not
* using the APIC_ACCESS_ADDR VMCS field.
*/
@@ -30364,7 +29790,7 @@ index e77d75b..0f056cd 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -6114,6 +6131,7 @@ static __init int hardware_setup(void)
+@@ -6030,6 +6047,7 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -30372,7 +29798,7 @@ index e77d75b..0f056cd 100644
if (enable_apicv)
kvm_x86_ops->update_cr8_intercept = NULL;
else {
-@@ -6122,6 +6140,7 @@ static __init int hardware_setup(void)
+@@ -6038,6 +6056,7 @@ static __init int hardware_setup(void)
kvm_x86_ops->deliver_posted_interrupt = NULL;
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
@@ -30380,7 +29806,7 @@ index e77d75b..0f056cd 100644
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
-@@ -6176,10 +6195,12 @@ static __init int hardware_setup(void)
+@@ -6092,10 +6111,12 @@ static __init int hardware_setup(void)
enable_pml = 0;
if (!enable_pml) {
@@ -30393,7 +29819,7 @@ index e77d75b..0f056cd 100644
}
return alloc_kvm_area();
-@@ -8382,6 +8403,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8351,6 +8372,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -30406,7 +29832,7 @@ index e77d75b..0f056cd 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -8434,6 +8461,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8403,6 +8430,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -30418,7 +29844,7 @@ index e77d75b..0f056cd 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -8447,7 +8479,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8416,7 +8448,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -30427,7 +29853,7 @@ index e77d75b..0f056cd 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -8456,8 +8488,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8425,8 +8457,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -30448,7 +29874,7 @@ index e77d75b..0f056cd 100644
#endif
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-@@ -10311,7 +10353,7 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
+@@ -10280,7 +10322,7 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
@@ -30458,10 +29884,10 @@ index e77d75b..0f056cd 100644
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 2781e2b..b7bff94 100644
+index 43609af..f8b7b2c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1844,8 +1844,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1828,8 +1828,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -30472,7 +29898,7 @@ index 2781e2b..b7bff94 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2735,6 +2735,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2530,6 +2530,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -30481,7 +29907,7 @@ index 2781e2b..b7bff94 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -3095,7 +3097,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+@@ -2890,7 +2892,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
@@ -30490,7 +29916,7 @@ index 2781e2b..b7bff94 100644
u64 xstate_bv = xsave->header.xfeatures;
u64 valid;
-@@ -3131,7 +3133,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+@@ -2926,7 +2928,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
@@ -30499,7 +29925,7 @@ index 2781e2b..b7bff94 100644
u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
u64 valid;
-@@ -3175,7 +3177,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+@@ -2970,7 +2972,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
@@ -30508,7 +29934,7 @@ index 2781e2b..b7bff94 100644
sizeof(struct fxregs_state));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XSTATE_FPSSE;
-@@ -3200,7 +3202,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+@@ -2995,7 +2997,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
} else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
@@ -30517,7 +29943,7 @@ index 2781e2b..b7bff94 100644
guest_xsave->region, sizeof(struct fxregs_state));
}
return 0;
-@@ -6473,6 +6475,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+@@ -6214,6 +6216,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -30525,7 +29951,7 @@ index 2781e2b..b7bff94 100644
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
-@@ -6690,6 +6693,7 @@ out:
+@@ -6437,6 +6440,7 @@ out:
return r;
}
@@ -30533,7 +29959,7 @@ index 2781e2b..b7bff94 100644
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
-@@ -7229,7 +7233,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+@@ -6976,7 +6980,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -30542,7 +29968,7 @@ index 2781e2b..b7bff94 100644
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
-@@ -7246,7 +7250,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -6993,7 +6997,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -30551,7 +29977,7 @@ index 2781e2b..b7bff94 100644
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
-@@ -7262,9 +7266,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7009,9 +7013,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
static void fx_init(struct kvm_vcpu *vcpu)
{
@@ -30563,7 +29989,7 @@ index 2781e2b..b7bff94 100644
host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
-@@ -7288,7 +7292,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+@@ -7035,7 +7039,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
@@ -30572,7 +29998,7 @@ index 2781e2b..b7bff94 100644
trace_kvm_fpu(1);
}
-@@ -7566,6 +7570,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+@@ -7324,6 +7328,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
struct static_key kvm_no_apic_vcpu __read_mostly;
@@ -30581,7 +30007,7 @@ index 2781e2b..b7bff94 100644
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
-@@ -7582,11 +7588,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+@@ -7340,11 +7346,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -30600,7 +30026,7 @@ index 2781e2b..b7bff94 100644
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
-@@ -7640,6 +7649,9 @@ fail_mmu_destroy:
+@@ -7398,6 +7407,9 @@ fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
@@ -30610,7 +30036,7 @@ index 2781e2b..b7bff94 100644
fail:
return r;
}
-@@ -7657,6 +7669,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+@@ -7415,6 +7427,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.pio_data);
if (!irqchip_in_kernel(vcpu->kvm))
static_key_slow_dec(&kvm_no_apic_vcpu);
@@ -30620,10 +30046,10 @@ index 2781e2b..b7bff94 100644
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index f2dc08c..d85d906 100644
+index a0d09f6..92ede76 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
-@@ -1341,9 +1341,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+@@ -1336,9 +1336,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
* Rebooting also tells the Host we're finished, but the RESTART flag tells the
* Launcher to reboot us.
*/
@@ -33283,10 +32709,10 @@ index 903ec1e..41b4708 100644
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 9dc9098..938251a 100644
+index eef44d9..b0fb164 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -14,12 +14,19 @@
+@@ -14,6 +14,8 @@
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
@@ -33295,9 +32721,10 @@ index 9dc9098..938251a 100644
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
- #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
+@@ -21,6 +23,11 @@
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
+ #include <asm/vm86.h> /* struct vm86 */
+#include <asm/tlbflush.h>
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
@@ -33306,7 +32733,7 @@ index 9dc9098..938251a 100644
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
-@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+@@ -122,7 +129,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
return !instr_lo || (instr_lo>>1) == 1;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
@@ -33318,7 +32745,7 @@ index 9dc9098..938251a 100644
return 0;
*prefetch = (instr_lo == 0xF) &&
-@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+@@ -156,7 +166,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
while (instr < max_instr) {
unsigned char opcode;
@@ -33330,7 +32757,7 @@ index 9dc9098..938251a 100644
break;
instr++;
-@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+@@ -187,6 +200,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
force_sig_info(si_signo, &info, tsk);
}
@@ -33365,7 +32792,7 @@ index 9dc9098..938251a 100644
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
-@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
+@@ -237,10 +278,27 @@ void vmalloc_sync_all(void)
for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE && address < FIXADDR_TOP;
address += PMD_SIZE) {
@@ -33393,7 +32820,7 @@ index 9dc9098..938251a 100644
spinlock_t *pgt_lock;
pmd_t *ret;
-@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
+@@ -248,8 +306,14 @@ void vmalloc_sync_all(void)
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
@@ -33409,7 +32836,7 @@ index 9dc9098..938251a 100644
if (!ret)
break;
-@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
+@@ -283,6 +347,12 @@ static noinline int vmalloc_fault(unsigned long address)
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3();
@@ -33422,7 +32849,7 @@ index 9dc9098..938251a 100644
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
-@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
+@@ -381,11 +451,25 @@ static noinline int vmalloc_fault(unsigned long address)
* happen within a race in page table update. In the later
* case just flush:
*/
@@ -33449,7 +32876,7 @@ index 9dc9098..938251a 100644
if (pgd_none(*pgd)) {
set_pgd(pgd, *pgd_ref);
arch_flush_lazy_mmu_mode();
-@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+@@ -552,7 +636,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
@@ -33458,7 +32885,7 @@ index 9dc9098..938251a 100644
return 1;
#endif
return 0;
-@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+@@ -579,9 +663,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
}
static const char nx_warning[] = KERN_CRIT
@@ -33470,7 +32897,7 @@ index 9dc9098..938251a 100644
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -590,7 +674,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
@@ -33479,7 +32906,7 @@ index 9dc9098..938251a 100644
unsigned int level;
pgd_t *pgd;
pte_t *pte;
-@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -601,13 +685,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
pte = lookup_address_in_pgd(pgd, address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
@@ -33507,7 +32934,7 @@ index 9dc9098..938251a 100644
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
-@@ -783,6 +879,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+@@ -786,6 +882,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
@@ -33530,7 +32957,7 @@ index 9dc9098..938251a 100644
/* Kernel addresses are always protection faults: */
if (address >= TASK_SIZE)
error_code |= PF_PROT;
-@@ -865,7 +977,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+@@ -868,7 +980,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
@@ -33539,7 +32966,7 @@ index 9dc9098..938251a 100644
code = BUS_MCEERR_AR;
}
#endif
-@@ -917,6 +1029,107 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+@@ -920,6 +1032,107 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
return 1;
}
@@ -33647,7 +33074,7 @@ index 9dc9098..938251a 100644
/*
* Handle a spurious fault caused by a stale TLB entry.
*
-@@ -1002,6 +1215,9 @@ int show_unhandled_signals = 1;
+@@ -1005,6 +1218,9 @@ int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
@@ -33657,7 +33084,7 @@ index 9dc9098..938251a 100644
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -1064,6 +1280,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+@@ -1067,6 +1283,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
tsk = current;
mm = tsk->mm;
@@ -33680,7 +33107,7 @@ index 9dc9098..938251a 100644
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
-@@ -1188,6 +1420,11 @@ retry:
+@@ -1191,6 +1423,11 @@ retry:
might_sleep();
}
@@ -33692,7 +33119,7 @@ index 9dc9098..938251a 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1199,18 +1436,24 @@ retry:
+@@ -1202,18 +1439,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -33728,7 +33155,7 @@ index 9dc9098..938251a 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1330,3 +1573,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1333,3 +1576,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
}
NOKPROBE_SYMBOL(trace_do_page_fault);
#endif /* CONFIG_TRACING */
@@ -34173,7 +33600,7 @@ index 42982b2..7168fc3 100644
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 8533b46..8c83176 100644
+index 1d8a83d..e435d63 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,7 @@
@@ -34193,7 +33620,7 @@ index 8533b46..8c83176 100644
/*
* We need to define the tracepoints somewhere, and tlb.c
-@@ -615,7 +618,18 @@ void __init init_mem_mapping(void)
+@@ -618,7 +621,18 @@ void __init init_mem_mapping(void)
early_ioremap_page_table_range_init();
#endif
@@ -34212,7 +33639,7 @@ index 8533b46..8c83176 100644
__flush_tlb_all();
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
-@@ -631,10 +645,40 @@ void __init init_mem_mapping(void)
+@@ -634,10 +648,40 @@ void __init init_mem_mapping(void)
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
*/
@@ -34254,7 +33681,7 @@ index 8533b46..8c83176 100644
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
-@@ -680,8 +724,127 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -683,8 +727,127 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
#endif
}
@@ -34383,7 +33810,7 @@ index 8533b46..8c83176 100644
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 68aec42..95ad5d3 100644
+index 7562f42..6859164 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
@@ -34636,7 +34063,7 @@ index 68aec42..95ad5d3 100644
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index f9977a7..21a5082 100644
+index df48430..0a2197b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -136,7 +136,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
@@ -35036,10 +34463,10 @@ index 0057a7acc..95c7edd 100644
might_sleep();
if (is_enabled()) /* recheck and proper locking in *_core() */
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
-index c28f618..73c7772 100644
+index 71fc79a..7388ad7 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
-@@ -329,11 +329,11 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
+@@ -292,11 +292,11 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
@@ -35054,10 +34481,10 @@ index c28f618..73c7772 100644
err_out:
/* info might be NULL, but kfree() handles that */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
-index 4053bb5..b1ad3dc 100644
+index c3b3f65..8919a28 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
-@@ -506,7 +506,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
+@@ -508,7 +508,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
}
}
@@ -35067,10 +34494,10 @@ index 4053bb5..b1ad3dc 100644
unsigned long uninitialized_var(pfn_align);
int i, nid;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 727158c..e278402 100644
+index 2c44c07..5c5e457 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
-@@ -260,7 +260,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -259,7 +259,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
*/
#ifdef CONFIG_PCI_BIOS
if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
@@ -35079,7 +34506,7 @@ index 727158c..e278402 100644
#endif
/*
-@@ -268,9 +268,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -267,9 +267,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
* Does not cover __inittext since that is gone later on. On
* 64bit we do not enforce !NX on the low mapping
*/
@@ -35092,7 +34519,7 @@ index 727158c..e278402 100644
/*
* The .rodata section needs to be read-only. Using the pfn
* catches all aliases.
-@@ -278,6 +279,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -277,6 +278,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
__pa_symbol(__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
@@ -35100,7 +34527,7 @@ index 727158c..e278402 100644
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
-@@ -316,6 +318,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+@@ -315,6 +317,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
}
#endif
@@ -35114,7 +34541,7 @@ index 727158c..e278402 100644
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
return prot;
-@@ -436,23 +445,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+@@ -435,23 +444,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
/* change init_mm */
@@ -35154,7 +34581,7 @@ index 727158c..e278402 100644
}
static int
-@@ -505,7 +528,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
+@@ -504,7 +527,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* up accordingly.
*/
old_pte = *kpte;
@@ -35164,7 +34591,7 @@ index 727158c..e278402 100644
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
-@@ -675,6 +699,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+@@ -674,6 +698,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
return 0;
}
@@ -35175,7 +34602,7 @@ index 727158c..e278402 100644
static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
unsigned long address)
{
-@@ -1118,6 +1146,9 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
+@@ -1117,6 +1145,9 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
}
}
@@ -35185,7 +34612,7 @@ index 727158c..e278402 100644
static int __change_page_attr(struct cpa_data *cpa, int primary)
{
unsigned long address;
-@@ -1176,7 +1207,9 @@ repeat:
+@@ -1175,7 +1206,9 @@ repeat:
* Do we really change anything ?
*/
if (pte_val(old_pte) != pte_val(new_pte)) {
@@ -35725,7 +35152,7 @@ index 90555bf..f5f1828 100644
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
-index 90b924a..4197ac2 100644
+index 8ddb5d0..6f70318 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -45,7 +45,11 @@ void leave_mm(int cpu)
@@ -35873,7 +35300,7 @@ index 4093216..44b6b83 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index be2e7a2..e6960dd 100644
+index 70efcd0..0a689c9 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -14,7 +14,11 @@
@@ -35898,7 +35325,7 @@ index be2e7a2..e6960dd 100644
}
struct jit_context {
-@@ -1026,7 +1032,9 @@ common_load:
+@@ -1030,7 +1036,9 @@ common_load:
pr_err("bpf_jit_compile fatal error\n");
return -EFAULT;
}
@@ -35908,7 +35335,7 @@ index be2e7a2..e6960dd 100644
}
proglen += ilen;
addrs[i] = proglen;
-@@ -1103,7 +1111,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+@@ -1107,7 +1115,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (image) {
bpf_flush_icache(header, image + proglen);
@@ -35916,7 +35343,7 @@ index be2e7a2..e6960dd 100644
prog->bpf_func = (void *)image;
prog->jited = true;
}
-@@ -1116,12 +1123,8 @@ void bpf_jit_free(struct bpf_prog *fp)
+@@ -1120,12 +1127,8 @@ void bpf_jit_free(struct bpf_prog *fp)
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr;
@@ -36042,10 +35469,10 @@ index 71e8a67..6a313bb 100644
struct op_counter_config;
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
-index 7553921..d631bd4 100644
+index 0d24e7c..d937be3 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
-@@ -278,7 +278,7 @@ int __init intel_mid_pci_init(void)
+@@ -283,7 +283,7 @@ int __init intel_mid_pci_init(void)
pci_mmcfg_late_init();
pcibios_enable_irq = intel_mid_pci_irq_enable;
pcibios_disable_irq = intel_mid_pci_irq_disable;
@@ -36055,7 +35482,7 @@ index 7553921..d631bd4 100644
/* Continue with standard init */
return 1;
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
-index 9bd1154..e9d4656 100644
+index 32e7034..bf2dd06 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -51,7 +51,7 @@ struct irq_router {
@@ -37188,11 +36615,11 @@ index 48e3858..ab4458c 100644
return 0;
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
-index 4841453..d59a203 100644
+index c7b15f3..cc09a65 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
-@@ -9,6 +9,7 @@ config XEN
- select XEN_HAVE_PVMMU
+@@ -10,6 +10,7 @@ config XEN
+ select XEN_HAVE_VPMU
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_LOCAL_APIC && X86_TSC
+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
@@ -37200,10 +36627,10 @@ index 4841453..d59a203 100644
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 3cebc65..2789b02 100644
+index 993b7a7..59dec9a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
-@@ -129,8 +129,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
+@@ -130,8 +130,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
@@ -37212,7 +36639,7 @@ index 3cebc65..2789b02 100644
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
__read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-@@ -588,8 +586,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+@@ -589,8 +587,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
@@ -37222,7 +36649,7 @@ index 3cebc65..2789b02 100644
int f;
/*
-@@ -637,8 +634,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -638,8 +635,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{
unsigned long va = dtr->address;
unsigned int size = dtr->size + 1;
@@ -37232,7 +36659,7 @@ index 3cebc65..2789b02 100644
int f;
/*
-@@ -646,7 +642,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -647,7 +643,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
* 8-byte entries, or 16 4k pages..
*/
@@ -37241,7 +36668,7 @@ index 3cebc65..2789b02 100644
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-@@ -1268,30 +1264,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1270,7 +1266,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
#endif
};
@@ -37249,6 +36676,10 @@ index 3cebc65..2789b02 100644
+static __noreturn void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
+ int cpu;
+@@ -1278,26 +1274,26 @@ static void xen_reboot(int reason)
+ for_each_online_cpu(cpu)
+ xen_pmu_finish(cpu);
- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
- BUG();
@@ -37279,7 +36710,7 @@ index 3cebc65..2789b02 100644
{
if (pm_power_off)
pm_power_off();
-@@ -1444,8 +1440,11 @@ static void __ref xen_setup_gdt(int cpu)
+@@ -1450,8 +1446,11 @@ static void __ref xen_setup_gdt(int cpu)
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
pv_cpu_ops.load_gdt = xen_load_gdt_boot;
@@ -37293,7 +36724,7 @@ index 3cebc65..2789b02 100644
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
pv_cpu_ops.load_gdt = xen_load_gdt;
-@@ -1561,7 +1560,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1567,7 +1566,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Work out if we support NX */
@@ -37312,7 +36743,7 @@ index 3cebc65..2789b02 100644
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
-@@ -1589,13 +1598,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1595,13 +1604,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
machine_ops = xen_machine_ops;
@@ -37327,10 +36758,10 @@ index 3cebc65..2789b02 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index dd151b2..3291e38 100644
+index 9c479fe..7ec4091 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
-@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1950,7 +1950,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
@@ -37342,7 +36773,7 @@ index dd151b2..3291e38 100644
convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
-@@ -1860,11 +1864,22 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1980,11 +1984,22 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -37366,7 +36797,7 @@ index dd151b2..3291e38 100644
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-@@ -2048,6 +2063,7 @@ static void __init xen_post_allocator_init(void)
+@@ -2395,6 +2410,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -37374,7 +36805,7 @@ index dd151b2..3291e38 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2126,6 +2142,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2473,6 +2489,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -37383,10 +36814,10 @@ index dd151b2..3291e38 100644
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index 8648438..18bac20 100644
+index 3f4ebf0..f074dc1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
-@@ -284,17 +284,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
+@@ -306,17 +306,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
if (xen_pv_domain()) {
if (!xen_feature(XENFEAT_writable_page_tables))
@@ -37406,7 +36837,7 @@ index 8648438..18bac20 100644
#endif
xen_filter_cpu_maps();
-@@ -375,7 +371,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+@@ -399,7 +395,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_32
/* Note: PVH is not yet supported on x86_32. */
ctxt->user_regs.fs = __KERNEL_PERCPU;
@@ -37415,7 +36846,7 @@ index 8648438..18bac20 100644
#endif
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-@@ -383,8 +379,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+@@ -407,8 +403,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -37426,7 +36857,7 @@ index 8648438..18bac20 100644
ctxt->user_regs.ss = __KERNEL_DS;
xen_copy_trap_info(ctxt->trap_ctxt);
-@@ -720,7 +716,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
+@@ -747,7 +743,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
void __init xen_smp_init(void)
{
@@ -37449,7 +36880,7 @@ index fd92a64..1f72641 100644
#else
movl %ss:xen_vcpu, %eax
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
-index 8afdfcc..79239db 100644
+index b65f59a..c43f9c6 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -41,6 +41,17 @@ ENTRY(startup_xen)
@@ -37471,7 +36902,7 @@ index 8afdfcc..79239db 100644
mov %rsi,xen_start_info
mov $init_thread_union+THREAD_SIZE,%rsp
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
-index 2292721..a9bb18e 100644
+index 1399423..b1ae0fa 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -16,8 +16,6 @@ void xen_syscall_target(void);
@@ -37522,10 +36953,10 @@ index 2f33760..835e50a 100644
#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
diff --git a/block/bio.c b/block/bio.c
-index d6e5ba3..2bb142c 100644
+index ad3f276..bef6d50 100644
--- a/block/bio.c
+++ b/block/bio.c
-@@ -1187,7 +1187,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+@@ -1140,7 +1140,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -37534,7 +36965,7 @@ index d6e5ba3..2bb142c 100644
return ERR_PTR(-EINVAL);
nr_pages += end - start;
-@@ -1312,7 +1312,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+@@ -1265,7 +1265,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -37557,10 +36988,10 @@ index 0736729..2ec3b48 100644
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = blk_iopoll_budget;
diff --git a/block/blk-map.c b/block/blk-map.c
-index da310a1..213b5c9 100644
+index f565e11..f05b424 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
-@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+@@ -214,7 +214,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (!len || !kbuf)
return -EINVAL;
@@ -37636,7 +37067,7 @@ index f678c73..f35aa18 100644
err = -EFAULT;
goto out;
diff --git a/block/genhd.c b/block/genhd.c
-index 59a1395..54ff187 100644
+index 0c706f3..7e54d22 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -470,21 +470,24 @@ static char *bdevt_str(dev_t devt, char *buf)
@@ -37756,8 +37187,44 @@ index dda653c..028a13ee 100644
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index b4ffc5b..e5b5721 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
++ walk->iv = req->info;
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->iv_buffer = NULL;
+- walk->iv = req->info;
+ if (unlikely(((unsigned long)walk->iv & alignmask))) {
+ int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 11b9814..8cc1622 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
++ walk->iv = desc->info;
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->buffer = NULL;
+- walk->iv = desc->info;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = blkcipher_copy_iv(walk);
+ if (err)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
-index 22ba81f..1acac67 100644
+index c81861b..dbf894f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
@@ -37779,10 +37246,10 @@ index 22ba81f..1acac67 100644
static void cryptd_queue_worker(struct work_struct *work);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
-index 45e7d51..2967121 100644
+index ee9cfb9..30b36ed 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
-@@ -385,7 +385,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+@@ -392,7 +392,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
int ret;
pinst->kobj.kset = pcrypt_kset;
@@ -37864,10 +37331,10 @@ index d51a30a..b6891a3 100644
stream->workspace = vzalloc(zlib_inflate_workspacesize());
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
-index 8c2fe2f..fc47c12 100644
+index 5778e8e..03a478b 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
-@@ -398,7 +398,7 @@ static int video_disable_backlight_sysfs_if(
+@@ -394,7 +394,7 @@ static int video_disable_backlight_sysfs_if(
return 0;
}
@@ -37877,7 +37344,7 @@ index 8c2fe2f..fc47c12 100644
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
*/
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
-index 52dfd0d..8386baf 100644
+index d62a616..387dbd0 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -70,11 +70,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
@@ -37912,10 +37379,10 @@ index 16129c7..8b675cd 100644
struct apei_exec_context {
u32 ip;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
-index 2bfd53c..391e9a4 100644
+index 23981ac..35eb27e 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
-@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
+@@ -474,7 +474,7 @@ static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
const struct acpi_hest_generic_status *estatus)
{
@@ -37924,7 +37391,7 @@ index 2bfd53c..391e9a4 100644
unsigned int curr_seqno;
char pfx_seq[64];
-@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
+@@ -485,7 +485,7 @@ static void __ghes_print_estatus(const char *pfx,
else
pfx = KERN_ERR;
}
@@ -37951,10 +37418,10 @@ index a83e3c6..c3d617f 100644
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
if (!bgrt_kobj)
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
-index 278dc4b..976433d 100644
+index 96809cd..6a49f97 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
-@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
+@@ -47,7 +47,7 @@ struct acpi_blacklist_item {
u32 is_critical_error;
};
@@ -37963,7 +37430,7 @@ index 278dc4b..976433d 100644
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
-@@ -172,7 +172,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
+@@ -168,7 +168,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
#endif
@@ -37973,10 +37440,10 @@ index 278dc4b..976433d 100644
.callback = dmi_disable_osi_vista,
.ident = "Fujitsu Siemens",
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
-index 513e7230e..802015a 100644
+index a212cef..443c9c4 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
-@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
+@@ -63,7 +63,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
}
#endif
@@ -37985,7 +37452,7 @@ index 513e7230e..802015a 100644
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
-@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+@@ -79,7 +79,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
{}
};
#else
@@ -38010,10 +37477,10 @@ index c68e724..e863008 100644
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index 88dbbb1..90714c0 100644
+index 4806b7f..78f0746 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
-@@ -1045,6 +1045,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+@@ -1041,6 +1041,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
#endif /* CONFIG_PM_SLEEP */
@@ -38022,7 +37489,7 @@ index 88dbbb1..90714c0 100644
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
-@@ -1061,6 +1063,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
+@@ -1057,6 +1059,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_resume_early,
#endif
},
@@ -38030,7 +37497,7 @@ index 88dbbb1..90714c0 100644
};
/**
-@@ -1130,7 +1133,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+@@ -1134,7 +1137,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup(adev, ACPI_STATE_S0, false);
}
@@ -38039,10 +37506,10 @@ index 88dbbb1..90714c0 100644
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index 9d4761d..ece2163 100644
+index 42c66b6..52256bc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
-@@ -1434,7 +1434,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
+@@ -1470,7 +1470,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
@@ -38052,10 +37519,10 @@ index 9d4761d..ece2163 100644
ec_skip_dsdt_scan, "Compal JFL92", {
DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
-index 139d9e4..9a9d799 100644
+index 7188e53..6012bc4 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
-@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
+@@ -191,7 +191,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
return 0;
}
@@ -38064,24 +37531,11 @@ index 139d9e4..9a9d799 100644
/*
* Fujitsu Primequest machines will return 1023 to indicate an
* error if the _SUN method is evaluated on SxFy objects that
-diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
-index d9f7158..168e742 100644
---- a/drivers/acpi/processor_driver.c
-+++ b/drivers/acpi/processor_driver.c
-@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
- return NOTIFY_OK;
- }
-
--static struct notifier_block __refdata acpi_cpu_notifier = {
-+static struct notifier_block __refconst acpi_cpu_notifier = {
- .notifier_call = acpi_cpu_soft_notify,
- };
-
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index d540f42..d5b32ac 100644
+index 175c86b..f8226f0 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
-@@ -910,7 +910,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+@@ -906,7 +906,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
@@ -38117,10 +37571,10 @@ index 2f0d4db..b9e9b15 100644
.callback = init_old_suspend_ordering,
.ident = "Abit KN9 (nForce4 variant)",
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
-index 0876d77b..3ba0127 100644
+index 40a4265..bb254e2 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
-@@ -423,11 +423,11 @@ static u32 num_counters;
+@@ -486,11 +486,11 @@ static u32 num_counters;
static struct attribute **all_attrs;
static u32 acpi_gpe_count;
@@ -38135,10 +37589,10 @@ index 0876d77b..3ba0127 100644
static void delete_gpe_attr_array(void)
{
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
-index 6d4e44e..44fb839 100644
+index 30d8518..06efb1e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
-@@ -1212,7 +1212,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
+@@ -1208,7 +1208,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
return 0;
}
@@ -38180,7 +37634,7 @@ index 2922f1f..6c2fdaf 100644
backlight_notifier_registered = true;
init_done = true;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 790e0de..6bae378 100644
+index b79cb10..7daa9f7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -102,7 +102,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -38192,7 +37646,7 @@ index 790e0de..6bae378 100644
struct ata_force_param {
const char *name;
-@@ -4800,7 +4800,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -4801,7 +4801,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -38201,7 +37655,7 @@ index 790e0de..6bae378 100644
ap = qc->ap;
qc->flags = 0;
-@@ -4817,7 +4817,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -4818,7 +4818,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
@@ -38210,7 +37664,7 @@ index 790e0de..6bae378 100644
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
ap = qc->ap;
link = qc->dev->link;
-@@ -5924,6 +5924,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5925,6 +5925,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
return;
spin_lock(&lock);
@@ -38218,7 +37672,7 @@ index 790e0de..6bae378 100644
for (cur = ops->inherits; cur; cur = cur->inherits) {
void **inherit = (void **)cur;
-@@ -5937,8 +5938,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5938,8 +5939,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
if (IS_ERR(*pp))
*pp = NULL;
@@ -38229,7 +37683,7 @@ index 790e0de..6bae378 100644
spin_unlock(&lock);
}
-@@ -6134,7 +6136,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+@@ -6135,7 +6137,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* give ports names and add SCSI hosts */
for (i = 0; i < host->n_ports; i++) {
@@ -38265,10 +37719,10 @@ index f840ca1..edd6ef3 100644
extern int libata_fua;
extern int libata_noacpi;
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
-index 5d9ee99..8fa2585 100644
+index 80fe0f6..8c0fa3f 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
-@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
+@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
/* Handle platform specific quirks */
if (quirk) {
if (quirk & CF_BROKEN_PIO) {
@@ -38532,10 +37986,10 @@ index 75dde90..4309ead 100644
fore200e->tx_sat++;
DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
-index a8da3a5..67cf6c2 100644
+index 0f5cb37..c8bcdef 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
-@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
@@ -38544,7 +37998,7 @@ index a8da3a5..67cf6c2 100644
goto return_host_buffers;
}
-@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
RBRQ_LEN_ERR(he_dev->rbrq_head)
? "LEN_ERR" : "",
vcc->vpi, vcc->vci);
@@ -38553,7 +38007,7 @@ index a8da3a5..67cf6c2 100644
goto return_host_buffers;
}
-@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
+@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
vcc->push(vcc, skb);
spin_lock(&he_dev->global_lock);
@@ -38562,7 +38016,7 @@ index a8da3a5..67cf6c2 100644
return_host_buffers:
++pdus_assembled;
-@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
+@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
tpd->vcc->pop(tpd->vcc, tpd->skb);
else
dev_kfree_skb_any(tpd->skb);
@@ -38571,7 +38025,7 @@ index a8da3a5..67cf6c2 100644
}
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
return;
-@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
@@ -38580,7 +38034,7 @@ index a8da3a5..67cf6c2 100644
return -EINVAL;
}
-@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
@@ -38589,7 +38043,7 @@ index a8da3a5..67cf6c2 100644
return -EINVAL;
}
#endif
-@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
@@ -38598,7 +38052,7 @@ index a8da3a5..67cf6c2 100644
spin_unlock_irqrestore(&he_dev->global_lock, flags);
return -ENOMEM;
}
-@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
@@ -38607,7 +38061,7 @@ index a8da3a5..67cf6c2 100644
spin_unlock_irqrestore(&he_dev->global_lock, flags);
return -ENOMEM;
}
-@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
+@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
__enqueue_tpd(he_dev, tpd, cid);
spin_unlock_irqrestore(&he_dev->global_lock, flags);
@@ -39158,10 +38612,10 @@ index ddc4ceb..36e29aa 100644
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
-index 74e18b0..f16afa0 100644
+index 3d7fb65..0f26393 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
-@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
+@@ -843,7 +843,7 @@ static void solos_bh(unsigned long card_arg)
}
atm_charge(vcc, skb->truesize);
vcc->push(vcc, skb);
@@ -39170,7 +38624,7 @@ index 74e18b0..f16afa0 100644
break;
case PKT_STATUS:
-@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+@@ -1124,7 +1124,7 @@ static uint32_t fpga_tx(struct solos_card *card)
vcc = SKB_CB(oldskb)->vcc;
if (vcc) {
@@ -39351,11 +38805,63 @@ index 560751b..3a4847a 100644
static ssize_t show_node_state(struct device *dev,
struct device_attribute *attr, char *buf)
+diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
+index 134483d..5588d1c 100644
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -24,6 +24,8 @@
+ #include <linux/msi.h>
+ #include <linux/slab.h>
+
++#include <asm/pgtable.h>
++
+ #define DEV_ID_SHIFT 24
+
+ /*
+@@ -77,10 +79,12 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+
+ BUG_ON(!ops);
+
++ pax_open_kernel();
+ if (ops->msi_init == NULL)
+- ops->msi_init = platform_msi_init;
++ *(void **)&ops->msi_init = platform_msi_init;
+ if (ops->set_desc == NULL)
+- ops->set_desc = platform_msi_set_desc;
++ *(void **)&ops->set_desc = platform_msi_set_desc;
++ pax_close_kernel();
+ }
+
+ static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+@@ -98,16 +102,18 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
++ pax_open_kernel();
+ if (!chip->irq_mask)
+- chip->irq_mask = irq_chip_mask_parent;
++ *(void **)&chip->irq_mask = irq_chip_mask_parent;
+ if (!chip->irq_unmask)
+- chip->irq_unmask = irq_chip_unmask_parent;
++ *(void **)&chip->irq_unmask = irq_chip_unmask_parent;
+ if (!chip->irq_eoi)
+- chip->irq_eoi = irq_chip_eoi_parent;
++ *(void **)&chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_set_affinity)
+- chip->irq_set_affinity = msi_domain_set_affinity;
++ *(void **)&chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_write_msi_msg)
+- chip->irq_write_msi_msg = platform_msi_write_msg;
++ *(void **)&chip->irq_write_msi_msg = platform_msi_write_msg;
++ pax_close_kernel();
+ }
+
+ static void platform_msi_free_descs(struct device *dev)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
-index 0ee43c1..369dd62 100644
+index 16550c6..322c4c7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
-@@ -1738,7 +1738,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+@@ -1517,7 +1517,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
struct gpd_cpuidle_data *cpuidle_data;
@@ -39364,7 +38870,7 @@ index 0ee43c1..369dd62 100644
int ret = 0;
if (IS_ERR_OR_NULL(genpd) || state < 0)
-@@ -1806,7 +1806,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
+@@ -1585,7 +1585,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
struct gpd_cpuidle_data *cpuidle_data;
@@ -39373,8 +38879,8 @@ index 0ee43c1..369dd62 100644
int ret = 0;
if (IS_ERR_OR_NULL(genpd))
-@@ -2241,8 +2241,11 @@ int genpd_dev_pm_attach(struct device *dev)
- return ret;
+@@ -2021,8 +2021,11 @@ int genpd_dev_pm_attach(struct device *dev)
+ goto out;
}
- dev->pm_domain->detach = genpd_dev_pm_detach;
@@ -39384,9 +38890,9 @@ index 0ee43c1..369dd62 100644
+ *(void **)&dev->pm_domain->sync = genpd_dev_pm_sync;
+ pax_close_kernel();
+
- pm_genpd_poweron(pd);
+ ret = pm_genpd_poweron(pd);
- return 0;
+ out:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index e1a10a0..a6bc363 100644
--- a/drivers/base/power/runtime.c
@@ -39422,7 +38928,7 @@ index e1a10a0..a6bc363 100644
int (*callback)(struct device *);
struct device *parent = NULL;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
-index d2be3f9..0a3167a 100644
+index a7b4679..d302490 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
@@ -39474,7 +38980,7 @@ index 51f15bc..892a668 100644
split_counters(&cnt, &inpr);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
-index c8941f3..f7c7a7e 100644
+index 4c55cfb..b4c21fb 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -30,7 +30,7 @@ static LIST_HEAD(regmap_debugfs_early_list);
@@ -39514,11 +39020,20 @@ index c8941f3..f7c7a7e 100644
tot_len = reg_len + 10; /* ': R W V P\n' */
for (i = 0; i <= map->max_register; i += map->reg_stride) {
+@@ -437,7 +436,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ /* Format the register */
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%.*x: %c %c %c %c\n",
+- reg_len, i,
++ (int)reg_len, i,
+ regmap_readable(map, i) ? 'y' : 'n',
+ regmap_writeable(map, i) ? 'y' : 'n',
+ regmap_volatile(map, i) ? 'y' : 'n',
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
-index 7111d04..bcda737 100644
+index afaf562..722e2a9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
-@@ -340,8 +340,8 @@ static void regmap_unlock_mutex(void *__map)
+@@ -344,8 +344,8 @@ static void regmap_unlock_mutex(void *__map)
mutex_unlock(&map->mutex);
}
@@ -39528,7 +39043,7 @@ index 7111d04..bcda737 100644
{
struct regmap *map = __map;
unsigned long flags;
-@@ -350,8 +350,8 @@ __acquires(&map->spinlock)
+@@ -354,8 +354,8 @@ __acquires(&map->spinlock)
map->spinlock_flags = flags;
}
@@ -39868,10 +39383,10 @@ index be73e9d..7fbf140 100644
cmdlist_t *reqQ;
cmdlist_t *cmpQ;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
-index 434c77d..6d3219a 100644
+index e5e0f19..a5dfbd4 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
-@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
+@@ -1027,7 +1027,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
submit_bio(rw, bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
@@ -39881,7 +39396,7 @@ index 434c77d..6d3219a 100644
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index efd19c2..6ccfa94 100644
+index 015c6e9..8226d6c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -386,7 +386,7 @@ struct drbd_epoch {
@@ -39914,7 +39429,7 @@ index efd19c2..6ccfa94 100644
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index a151853..b9b5baa 100644
+index 74d97f4..bb5a486 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
@@ -39958,7 +39473,7 @@ index a151853..b9b5baa 100644
idr_destroy(&connection->peer_devices);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
-index 74df8cf..e41fc24 100644
+index e80cbef..42533f1 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -3637,13 +3637,13 @@ finish:
@@ -40142,10 +39657,10 @@ index c097909..b0dd588 100644
static struct asender_cmd asender_tbl[] = {
[P_PING] = { 0, got_Ping },
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
-index d0fae55..e85f28e 100644
+index 5578c14..a05f791 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
-@@ -94,7 +94,8 @@ void drbd_md_endio(struct bio *bio, int error)
+@@ -94,7 +94,8 @@ void drbd_md_endio(struct bio *bio)
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
@@ -40165,7 +39680,7 @@ index d0fae55..e85f28e 100644
{
unsigned long flags = 0;
struct drbd_peer_device *peer_device = peer_req->peer_device;
-@@ -408,7 +410,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
+@@ -386,7 +388,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
list_add_tail(&peer_req->w.list, &device->read_ee);
spin_unlock_irq(&device->resource->req_lock);
@@ -40174,7 +39689,7 @@ index d0fae55..e85f28e 100644
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
return 0;
-@@ -553,7 +555,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
+@@ -531,7 +533,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
unsigned int sect_in; /* Number of sectors that came in since the last turn */
int number, mxb;
@@ -40183,7 +39698,7 @@ index d0fae55..e85f28e 100644
device->rs_in_flight -= sect_in;
rcu_read_lock();
-@@ -1595,8 +1597,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
+@@ -1573,8 +1575,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
struct fifo_buffer *plan;
@@ -40194,22 +39709,8 @@ index d0fae55..e85f28e 100644
device->rs_in_flight = 0;
device->rs_last_events =
(int)part_stat_read(&disk->part0, sectors[0]) +
-diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
-index 0e385d8..0c63d6a 100644
---- a/drivers/block/nbd.c
-+++ b/drivers/block/nbd.c
-@@ -538,8 +538,8 @@ static int nbd_thread(void *data)
- * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
- */
-
-+static void do_nbd_request(struct request_queue *q) __must_hold(q->queue_lock);
- static void do_nbd_request(struct request_queue *q)
-- __releases(q->queue_lock) __acquires(q->queue_lock)
- {
- struct request *req;
-
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
-index 4c20c22..caef1eb 100644
+index 7be2375..8747286 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -109,7 +109,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
@@ -40221,7 +39722,7 @@ index 4c20c22..caef1eb 100644
}
/*
-@@ -1891,7 +1891,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -40231,7 +39732,7 @@ index 4c20c22..caef1eb 100644
if (ti.nwa_v) {
pd->nwa = be32_to_cpu(ti.next_writable);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index 017b7d5..6845b91 100644
+index 8630a77..aad8d5aa 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -64,7 +64,7 @@
@@ -40326,6 +39827,36 @@ index 7a722df..54b76ab 100644
struct hci_dev *hdev;
int err;
+diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
+index 577cc4b..bfe0c2d 100644
+--- a/drivers/bus/arm-cci.c
++++ b/drivers/bus/arm-cci.c
+@@ -1249,16 +1249,22 @@ static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *p
+ model->event_attrs);
+ if (!attrs)
+ return -ENOMEM;
+- pmu_event_attr_group.attrs = attrs;
++ pax_open_kernel();
++ *(struct attribute ***)&pmu_event_attr_group.attrs = attrs;
++ pax_close_kernel();
+ }
+ if (model->nformat_attrs) {
+ attrs = alloc_attrs(pdev, model->nformat_attrs,
+ model->format_attrs);
+ if (!attrs)
+ return -ENOMEM;
+- pmu_format_attr_group.attrs = attrs;
++ pax_open_kernel();
++ *(struct attribute ***)&pmu_format_attr_group.attrs = attrs;
++ pax_close_kernel();
+ }
+- pmu_cpumask_attr.var = cci_pmu;
++ pax_open_kernel();
++ *(void **)&pmu_cpumask_attr.var = cci_pmu;
++ pax_close_kernel();
+
+ return 0;
+ }
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 5d28a45..a538f90 100644
--- a/drivers/cdrom/cdrom.c
@@ -40448,17 +39979,17 @@ index 09f17eb..8531d2f 100644
kfree(segment);
return -EFAULT;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
-index c6dea3f..72ae4b0 100644
+index 1341a94..8a45bc2 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
--void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-- phys_addr_t *mappable_base, unsigned long *mappable_end)
-+void intel_gtt_get(uint64_t *gtt_total, uint64_t *stolen_size,
-+ uint64_t *mappable_base, uint64_t *mappable_end)
+-void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+- phys_addr_t *mappable_base, u64 *mappable_end)
++void intel_gtt_get(u64 *gtt_total, u64 *stolen_size,
++ u64 *mappable_base, u64 *mappable_end)