summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2016-03-04 07:13:18 -0500
committerAnthony G. Basile <blueness@gentoo.org>2016-03-04 07:13:18 -0500
commit774d82a71af823de5172c7cbb11ae4355c27aabb (patch)
treea3dbf54e4710ef743986e10c485869ec2a8ff07b
parentgrsecurity-3.1-4.4.3-201602282149 (diff)
downloadhardened-patchset-774d82a71af823de5172c7cbb11ae4355c27aabb.tar.gz
hardened-patchset-774d82a71af823de5172c7cbb11ae4355c27aabb.tar.bz2
hardened-patchset-774d82a71af823de5172c7cbb11ae4355c27aabb.zip
grsecurity-3.1-4.4.4-20160303215820160303
-rw-r--r--4.4.4/0000_README (renamed from 4.4.3/0000_README)6
-rw-r--r--4.4.4/1003_linux-4.4.4.patch13326
-rw-r--r--4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch (renamed from 4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch)1352
-rw-r--r--4.4.4/4425_grsec_remove_EI_PAX.patch (renamed from 4.4.3/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.4.4/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.4.3/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.4.4/4430_grsec-remove-localversion-grsec.patch (renamed from 4.4.3/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.4.4/4435_grsec-mute-warnings.patch (renamed from 4.4.3/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.4.4/4440_grsec-remove-protected-paths.patch (renamed from 4.4.3/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.4.4/4450_grsec-kconfig-default-gids.patch (renamed from 4.4.3/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.4.4/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.4.3/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.4.4/4470_disable-compat_vdso.patch (renamed from 4.4.3/4470_disable-compat_vdso.patch)0
-rw-r--r--4.4.4/4475_emutramp_default_on.patch (renamed from 4.4.3/4475_emutramp_default_on.patch)0
12 files changed, 13987 insertions, 697 deletions
diff --git a/4.4.3/0000_README b/4.4.4/0000_README
index 25f9ab4..5fcf793 100644
--- a/4.4.3/0000_README
+++ b/4.4.4/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.4.3-201602282149.patch
+Patch: 1003_linux-4.4.4.patch
+From: https://www.kernel.org/
+Desc: Linux 4.4.4
+
+Patch: 4420_grsecurity-3.1-4.4.4-201603032158.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.4.4/1003_linux-4.4.4.patch b/4.4.4/1003_linux-4.4.4.patch
new file mode 100644
index 0000000..57fd383
--- /dev/null
+++ b/4.4.4/1003_linux-4.4.4.patch
@@ -0,0 +1,13326 @@
+diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
+index c477af0..686a64b 100644
+--- a/Documentation/filesystems/efivarfs.txt
++++ b/Documentation/filesystems/efivarfs.txt
+@@ -14,3 +14,10 @@ filesystem.
+ efivarfs is typically mounted like this,
+
+ mount -t efivarfs none /sys/firmware/efi/efivars
++
++Due to the presence of numerous firmware bugs where removing non-standard
++UEFI variables causes the system firmware to fail to POST, efivarfs
++files that are not well-known standardized variables are created
++as immutable files. This doesn't prevent removal - "chattr -i" will work -
++but it does prevent this kind of failure from being accomplished
++accidentally.
+diff --git a/Makefile b/Makefile
+index 802be10..344bc6f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+
+diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
+index 258b0e5..68b60923 100644
+--- a/arch/arc/include/asm/irqflags-arcv2.h
++++ b/arch/arc/include/asm/irqflags-arcv2.h
+@@ -22,6 +22,7 @@
+ #define AUX_IRQ_CTRL 0x00E
+ #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
+ #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
++#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+ #define AUX_IRQ_PRIORITY 0x206
+ #define ICAUSE 0x40a
+ #define AUX_IRQ_SELECT 0x40b
+@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+ }
+
++static inline void arc_softirq_trigger(int irq)
++{
++ write_aux_reg(AUX_IRQ_HINT, irq);
++}
++
++static inline void arc_softirq_clear(int irq)
++{
++ write_aux_reg(AUX_IRQ_HINT, 0);
++}
++
+ #else
+
+ .macro IRQ_DISABLE scratch
+diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
+index cbfec79..c126460 100644
+--- a/arch/arc/kernel/entry-arcv2.S
++++ b/arch/arc/kernel/entry-arcv2.S
+@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
+ VECTOR handle_interrupt ; (16) Timer0
+ VECTOR handle_interrupt ; unused (Timer1)
+ VECTOR handle_interrupt ; unused (WDT)
+-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
+-VECTOR handle_interrupt
+-VECTOR handle_interrupt
+-VECTOR handle_interrupt
+-VECTOR handle_interrupt ; (23) End of fixed IRQs
++VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
++VECTOR handle_interrupt ; (20) perf Interrupt
++VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
++VECTOR handle_interrupt ; unused
++VECTOR handle_interrupt ; (23) unused
++# End of fixed IRQs
+
+ .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
+ VECTOR handle_interrupt
+@@ -211,7 +212,11 @@ debug_marker_syscall:
+ ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
+ ; entry was via Exception in DS which got preempted in kernel).
+ ;
+-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
++; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
++;
++; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
++; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
++
+ .Lintr_ret_to_delay_slot:
+ debug_marker_ds:
+
+@@ -222,18 +227,23 @@ debug_marker_ds:
+ ld r2, [sp, PT_ret]
+ ld r3, [sp, PT_status32]
+
++ ; STAT32 for Int return created from scratch
++ ; (No delay dlot, disable Further intr in trampoline)
++
+ bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
+ st r0, [sp, PT_status32]
+
+ mov r1, .Lintr_ret_to_delay_slot_2
+ st r1, [sp, PT_ret]
+
++ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
+ st r2, [sp, 0]
+ st r3, [sp, 4]
+
+ b .Lisr_ret_fast_path
+
+ .Lintr_ret_to_delay_slot_2:
++ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
+ sub sp, sp, SZ_PT_REGS
+ st r9, [sp, -4]
+
+@@ -243,11 +253,19 @@ debug_marker_ds:
+ ld r9, [sp, 4]
+ sr r9, [erstatus]
+
++ ; restore AUX_USER_SP if returning to U mode
++ bbit0 r9, STATUS_U_BIT, 1f
++ ld r9, [sp, PT_sp]
++ sr r9, [AUX_USER_SP]
++
++1:
+ ld r9, [sp, 8]
+ sr r9, [erbta]
+
+ ld r9, [sp, -4]
+ add sp, sp, SZ_PT_REGS
++
++ ; return from pure kernel mode to delay slot
+ rtie
+
+ END(ret_from_exception)
+diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
+index bd237ac..30d806c 100644
+--- a/arch/arc/kernel/mcip.c
++++ b/arch/arc/kernel/mcip.c
+@@ -11,9 +11,12 @@
+ #include <linux/smp.h>
+ #include <linux/irq.h>
+ #include <linux/spinlock.h>
++#include <asm/irqflags-arcv2.h>
+ #include <asm/mcip.h>
+ #include <asm/setup.h>
+
++#define SOFTIRQ_IRQ 21
++
+ static char smp_cpuinfo_buf[128];
+ static int idu_detected;
+
+@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
+ static void mcip_setup_per_cpu(int cpu)
+ {
+ smp_ipi_irq_setup(cpu, IPI_IRQ);
++ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+ }
+
+ static void mcip_ipi_send(int cpu)
+@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
+ unsigned long flags;
+ int ipi_was_pending;
+
++ /* ARConnect can only send IPI to others */
++ if (unlikely(cpu == raw_smp_processor_id())) {
++ arc_softirq_trigger(SOFTIRQ_IRQ);
++ return;
++ }
++
+ /*
+ * NOTE: We must spin here if the other cpu hasn't yet
+ * serviced a previous message. This can burn lots
+@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
+ unsigned long flags;
+ unsigned int __maybe_unused copy;
+
++ if (unlikely(irq == SOFTIRQ_IRQ)) {
++ arc_softirq_clear(irq);
++ return;
++ }
++
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
+ /* Who sent the IPI */
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index 259c0ca..ddbb361 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -162,10 +162,9 @@ choice
+ mobile SoCs in the Kona family of chips (e.g. bcm28155,
+ bcm11351, etc...)
+
+- config DEBUG_BCM63XX
++ config DEBUG_BCM63XX_UART
+ bool "Kernel low-level debugging on BCM63XX UART"
+ depends on ARCH_BCM_63XX
+- select DEBUG_UART_BCM63XX
+
+ config DEBUG_BERLIN_UART
+ bool "Marvell Berlin SoC Debug UART"
+@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
+ default "debug/vf.S" if DEBUG_VF_UART
+ default "debug/vt8500.S" if DEBUG_VT8500_UART0
+ default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
+- default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
++ default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
+ default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ default "mach/debug-macro.S"
+
+@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
+ ARCH_IOP33X || ARCH_IXP4XX || \
+ ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+
+-# Compatibility options for BCM63xx
+-config DEBUG_UART_BCM63XX
+- def_bool ARCH_BCM_63XX
+-
+ config DEBUG_UART_PHYS
+ hex "Physical base address of debug UART"
+ default 0x00100a00 if DEBUG_NETX_UART
+@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
+ default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
+- default 0xfffe8600 if DEBUG_UART_BCM63XX
++ default 0xfffe8600 if DEBUG_BCM63XX_UART
+ default 0xfffff700 if ARCH_IOP33X
+ depends on ARCH_EP93XX || \
+ DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
+ DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
+ DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
+ DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
+- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
+ DEBUG_AT91_UART
+
+@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
+ default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ default 0xfc40ab00 if DEBUG_BRCMSTB_UART
+ default 0xfc705000 if DEBUG_ZTE_ZX
+- default 0xfcfe8600 if DEBUG_UART_BCM63XX
++ default 0xfcfe8600 if DEBUG_BCM63XX_UART
+ default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ default 0xfd000000 if ARCH_SPEAR13XX
+ default 0xfd012000 if ARCH_MV78XX0
+@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
+ DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
+ DEBUG_NETX_UART || \
+ DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
+- DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++ DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+
+ config DEBUG_UART_8250_SHIFT
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index 1afe246..b0c912fe 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -90,7 +90,7 @@
+ #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
+ #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
+ #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
+-#define PIN_PA15 14
++#define PIN_PA15 15
+ #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
+ #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
+ #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
+diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
+index 68ee3ce..b4c6d99 100644
+--- a/arch/arm/include/asm/psci.h
++++ b/arch/arm/include/asm/psci.h
+@@ -16,7 +16,7 @@
+
+ extern struct smp_operations psci_smp_ops;
+
+-#ifdef CONFIG_ARM_PSCI
++#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
+ bool psci_smp_available(void);
+ #else
+ static inline bool psci_smp_available(void) { return false; }
+diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
+index 0375c8c..9408a99 100644
+--- a/arch/arm/include/asm/xen/page-coherent.h
++++ b/arch/arm/include/asm/xen/page-coherent.h
+@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
++ unsigned long page_pfn = page_to_xen_pfn(page);
++ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
++ unsigned long compound_pages =
++ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
++ bool local = (page_pfn <= dev_pfn) &&
++ (dev_pfn - page_pfn < compound_pages);
++
+ /*
+- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+- * multiple Xen page, it's not possible to have a mix of local and
+- * foreign Xen page. So if the first xen_pfn == mfn the page is local
+- * otherwise it's a foreign page grant-mapped in dom0. If the page is
+- * local we can safely call the native dma_ops function, otherwise we
+- * call the xen specific function.
++ * Dom0 is mapped 1:1, while the Linux page can span across
++ * multiple Xen pages, it's not possible for it to contain a
++ * mix of local and foreign Xen pages. So if the first xen_pfn
++ * == mfn the page is local otherwise it's a foreign page
++ * grant-mapped in dom0. If the page is local we can safely
++ * call the native dma_ops function, otherwise we call the xen
++ * specific function.
+ */
+ if (local)
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
+index 7b76ce0..8633c70 100644
+--- a/arch/arm/mach-omap2/gpmc-onenand.c
++++ b/arch/arm/mach-omap2/gpmc-onenand.c
+@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
+
+ static void set_onenand_cfg(void __iomem *onenand_base)
+ {
+- u32 reg;
++ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
+ reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
+ ONENAND_SYS_CFG1_BL_16;
+ if (onenand_flags & ONENAND_FLAG_SYNCREAD)
+@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ else
+ reg &= ~ONENAND_SYS_CFG1_VHF;
++
+ writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+ }
+
+@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
+ }
+ }
+
++ onenand_async.sync_write = true;
+ omap2_onenand_calc_async_timings(&t);
+
+ ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index cd822d8..b6c90e5 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
+ endif
+
+ KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
++KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
+ KBUILD_AFLAGS += $(lseinstr)
+
+ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 2046c02..21ed715 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -33,7 +33,7 @@
+ #define PAGE_SHIFT 16
+ #endif
+ #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+-#define PAGE_MASK (~(PAGE_SIZE - 1))
++#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+
+ /*
+ * This is used for calculating the real page sizes
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 8957f15..18826aa 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ static inline pte_t pte_mkyoung(pte_t pte)
+ {
+ pte_val(pte) |= _PAGE_ACCESSED;
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (!(pte_val(pte) & _PAGE_NO_READ))
+ pte_val(pte) |= _PAGE_SILENT_READ;
+ else
+@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+ pmd_val(pmd) |= _PAGE_ACCESSED;
+
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ pmd_val(pmd) |= _PAGE_SILENT_READ;
+ else
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 6499d93..47bc45a 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ if ((config_enabled(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+- (regs->regs[2] == __NR_syscall)) {
++ (regs->regs[2] == __NR_syscall))
+ i++;
+- n++;
+- }
+
+ while (n--)
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
+diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
+index bf9f1a7..a2631a5 100644
+--- a/arch/mips/loongson64/loongson-3/hpet.c
++++ b/arch/mips/loongson64/loongson-3/hpet.c
+@@ -13,6 +13,9 @@
+ #define SMBUS_PCI_REG64 0x64
+ #define SMBUS_PCI_REGB4 0xb4
+
++#define HPET_MIN_CYCLES 64
++#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
++
+ static DEFINE_SPINLOCK(hpet_lock);
+ DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
+
+@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
+ cnt += delta;
+ hpet_write(HPET_T0_CMP, cnt);
+
+- res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
+- return res;
++ res = (int)(cnt - hpet_read(HPET_COUNTER));
++
++ return res < HPET_MIN_CYCLES ? -ETIME : 0;
+ }
+
+ static irqreturn_t hpet_irq_handler(int irq, void *data)
+@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
+ cd->cpumask = cpumask_of(cpu);
+ clockevent_set_clock(cd, HPET_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+- cd->min_delta_ns = 5000;
++ cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
+
+ clockevents_register_device(cd);
+ setup_irq(HPET_T0_IRQ, &hpet_irq);
+diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
+index 1a4738a..509832a9 100644
+--- a/arch/mips/loongson64/loongson-3/smp.c
++++ b/arch/mips/loongson64/loongson-3/smp.c
+@@ -30,13 +30,13 @@
+ #include "smp.h"
+
+ DEFINE_PER_CPU(int, cpu_state);
+-DEFINE_PER_CPU(uint32_t, core0_c0count);
+
+ static void *ipi_set0_regs[16];
+ static void *ipi_clear0_regs[16];
+ static void *ipi_status0_regs[16];
+ static void *ipi_en0_regs[16];
+ static void *ipi_mailbox_buf[16];
++static uint32_t core0_c0count[NR_CPUS];
+
+ /* read a 32bit value from ipi register */
+ #define loongson3_ipi_read32(addr) readl(addr)
+@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
+ if (action & SMP_ASK_C0COUNT) {
+ BUG_ON(cpu != 0);
+ c0count = read_c0_count();
+- for (i = 1; i < num_possible_cpus(); i++)
+- per_cpu(core0_c0count, i) = c0count;
++ c0count = c0count ? c0count : 1;
++ for (i = 1; i < nr_cpu_ids; i++)
++ core0_c0count[i] = c0count;
++ __wbflush(); /* Let others see the result ASAP */
+ }
+ }
+
+-#define MAX_LOOPS 1111
++#define MAX_LOOPS 800
+ /*
+ * SMP init and finish on secondary CPUs
+ */
+@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+
+ i = 0;
+- __this_cpu_write(core0_c0count, 0);
++ core0_c0count[cpu] = 0;
+ loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
+- while (!__this_cpu_read(core0_c0count)) {
++ while (!core0_c0count[cpu]) {
+ i++;
+ cpu_relax();
+ }
+
+ if (i > MAX_LOOPS)
+ i = MAX_LOOPS;
+- initcount = __this_cpu_read(core0_c0count) + i;
++ if (cpu_data[cpu].package)
++ initcount = core0_c0count[cpu] + i;
++ else /* Local access is faster for loops */
++ initcount = core0_c0count[cpu] + i/2;
++
+ write_c0_count(initcount);
+ }
+
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 32e0be2..29f73e0 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+ #endif
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_has_rixi) {
+ #ifdef _PAGE_NO_EXEC_SHIFT
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index f69ecaa..52c1e27 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
+ eeh_pcid_put(dev);
+ if (driver->err_handler &&
+ driver->err_handler->error_detected &&
+- driver->err_handler->slot_reset &&
+- driver->err_handler->resume)
++ driver->err_handler->slot_reset)
+ return NULL;
+ }
+
+diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
+index 2559b16..17d9dcd 100644
+--- a/arch/s390/include/asm/fpu/internal.h
++++ b/arch/s390/include/asm/fpu/internal.h
+@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
+ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+ {
+ fpregs->pad = 0;
++ fpregs->fpc = fpu->fpc;
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
+ else
+@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+
+ static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
+ {
++ fpu->fpc = fpregs->fpc;
+ if (MACHINE_HAS_VX)
+ convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
+ else
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index efaac2c..e9a983f 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
+ struct kvm_s390_sie_block *sie_block;
+ unsigned int host_acrs[NUM_ACRS];
+ struct fpu host_fpregs;
+- struct fpu guest_fpregs;
+ struct kvm_s390_local_interrupt local_int;
+ struct hrtimer ckc_timer;
+ struct kvm_s390_pgm_info pgm;
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 9cd248f..dc6c9c6 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -181,6 +181,7 @@ int main(void)
+ OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
+ OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
+ OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
++ OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
+ OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
+ OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
+ OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 66c9441..4af6037 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
+
+ /* Restore high gprs from signal stack */
+ if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
+- sizeof(&sregs_ext->gprs_high)))
++ sizeof(sregs_ext->gprs_high)))
+ return -EFAULT;
+ for (i = 0; i < NUM_GPRS; i++)
+ *(__u32 *)&regs->gprs[i] = gprs_high[i];
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 8465892..a08d0af 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
+-/*
+- * Backs up the current FP/VX register save area on a particular
+- * destination. Used to switch between different register save
+- * areas.
+- */
+-static inline void save_fpu_to(struct fpu *dst)
+-{
+- dst->fpc = current->thread.fpu.fpc;
+- dst->regs = current->thread.fpu.regs;
+-}
+-
+-/*
+- * Switches the FP/VX register save area from which to lazy
+- * restore register contents.
+- */
+-static inline void load_fpu_from(struct fpu *from)
+-{
+- current->thread.fpu.fpc = from->fpc;
+- current->thread.fpu.regs = from->regs;
+-}
+-
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+ /* Save host register state */
+ save_fpu_regs();
+- save_fpu_to(&vcpu->arch.host_fpregs);
+-
+- if (test_kvm_facility(vcpu->kvm, 129)) {
+- current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+- /*
+- * Use the register save area in the SIE-control block
+- * for register restore and save in kvm_arch_vcpu_put()
+- */
+- current->thread.fpu.vxrs =
+- (__vector128 *)&vcpu->run->s.regs.vrs;
+- } else
+- load_fpu_from(&vcpu->arch.guest_fpregs);
++ vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
++ vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+
++ /* Depending on MACHINE_HAS_VX, data stored to vrs either
++ * has vector register or floating point register format.
++ */
++ current->thread.fpu.regs = vcpu->run->s.regs.vrs;
++ current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ if (test_fp_ctl(current->thread.fpu.fpc))
+ /* User space provided an invalid FPC, let's clear it */
+ current->thread.fpu.fpc = 0;
+@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ gmap_disable(vcpu->arch.gmap);
+
++ /* Save guest register state */
+ save_fpu_regs();
++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+
+- if (test_kvm_facility(vcpu->kvm, 129))
+- /*
+- * kvm_arch_vcpu_load() set up the register save area to
+- * the &vcpu->run->s.regs.vrs and, thus, the vector registers
+- * are already saved. Only the floating-point control must be
+- * copied.
+- */
+- vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+- else
+- save_fpu_to(&vcpu->arch.guest_fpregs);
+- load_fpu_from(&vcpu->arch.host_fpregs);
++ /* Restore host register state */
++ current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
++ current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+
+ save_access_regs(vcpu->run->s.regs.acrs);
+ restore_access_regs(vcpu->arch.host_acrs);
+@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+ memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
+ vcpu->arch.sie_block->gcr[0] = 0xE0UL;
+ vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
+- vcpu->arch.guest_fpregs.fpc = 0;
+- asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
++ /* make sure the new fpc will be lazily loaded */
++ save_fpu_regs();
++ current->thread.fpu.fpc = 0;
+ vcpu->arch.sie_block->gbea = 1;
+ vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ vcpu->arch.local_int.wq = &vcpu->wq;
+ vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+
+- /*
+- * Allocate a save area for floating-point registers. If the vector
+- * extension is available, register contents are saved in the SIE
+- * control block. The allocated save area is still required in
+- * particular places, for example, in kvm_s390_vcpu_store_status().
+- */
+- vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
+- GFP_KERNEL);
+- if (!vcpu->arch.guest_fpregs.fprs) {
+- rc = -ENOMEM;
+- goto out_free_sie_block;
+- }
+-
+ rc = kvm_vcpu_init(vcpu, kvm, id);
+ if (rc)
+ goto out_free_sie_block;
+@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
++ /* make sure the new values will be lazily loaded */
++ save_fpu_regs();
+ if (test_fp_ctl(fpu->fpc))
+ return -EINVAL;
+- memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+- save_fpu_regs();
+- load_fpu_from(&vcpu->arch.guest_fpregs);
++ current->thread.fpu.fpc = fpu->fpc;
++ if (MACHINE_HAS_VX)
++ convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
++ else
++ memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ return 0;
+ }
+
+ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+- memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+- fpu->fpc = vcpu->arch.guest_fpregs.fpc;
++ /* make sure we have the latest values */
++ save_fpu_regs();
++ if (MACHINE_HAS_VX)
++ convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
++ else
++ memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
++ fpu->fpc = current->thread.fpu.fpc;
+ return 0;
+ }
+
+@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
+ {
+ unsigned char archmode = 1;
++ freg_t fprs[NUM_FPRS];
+ unsigned int px;
+ u64 clkcomp;
+ int rc;
+
++ px = kvm_s390_get_prefix(vcpu);
+ if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
+ if (write_guest_abs(vcpu, 163, &archmode, 1))
+ return -EFAULT;
+- gpa = SAVE_AREA_BASE;
++ gpa = 0;
+ } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
+ if (write_guest_real(vcpu, 163, &archmode, 1))
+ return -EFAULT;
+- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
++ gpa = px;
++ } else
++ gpa -= __LC_FPREGS_SAVE_AREA;
++
++ /* manually convert vector registers if necessary */
++ if (MACHINE_HAS_VX) {
++ convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
++ fprs, 128);
++ } else {
++ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
++ vcpu->run->s.regs.vrs, 128);
+ }
+- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
+- vcpu->arch.guest_fpregs.fprs, 128);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
++ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
+ vcpu->run->s.regs.gprs, 128);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
++ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
+ &vcpu->arch.sie_block->gpsw, 16);
+- px = kvm_s390_get_prefix(vcpu);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
++ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
+ &px, 4);
+- rc |= write_guest_abs(vcpu,
+- gpa + offsetof(struct save_area, fp_ctrl_reg),
+- &vcpu->arch.guest_fpregs.fpc, 4);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
++ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
++ &vcpu->run->s.regs.fpc, 4);
++ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
+ &vcpu->arch.sie_block->todpr, 4);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
++ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
+ &vcpu->arch.sie_block->cputm, 8);
+ clkcomp = vcpu->arch.sie_block->ckc >> 8;
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
++ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
+ &clkcomp, 8);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
++ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
+ &vcpu->run->s.regs.acrs, 64);
+- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
++ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
+ &vcpu->arch.sie_block->gcr, 128);
+ return rc ? -EFAULT : 0;
+ }
+@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ * it into the save area
+ */
+ save_fpu_regs();
+- if (test_kvm_facility(vcpu->kvm, 129)) {
+- /*
+- * If the vector extension is available, the vector registers
+- * which overlaps with floating-point registers are saved in
+- * the SIE-control block. Hence, extract the floating-point
+- * registers and the FPC value and store them in the
+- * guest_fpregs structure.
+- */
+- vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
+- convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
+- current->thread.fpu.vxrs);
+- } else
+- save_fpu_to(&vcpu->arch.guest_fpregs);
++ vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ save_access_regs(vcpu->run->s.regs.acrs);
+
+ return kvm_s390_store_status_unloaded(vcpu, addr);
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 4d1ee88..18c8b81 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
+ int i;
+
+ /* Normalize entries to being relative to the start of the section */
+- for (p = start, i = 0; p < finish; p++, i += 8)
++ for (p = start, i = 0; p < finish; p++, i += 8) {
+ p->insn += i;
++ p->fixup += i + 4;
++ }
+ sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ /* Denormalize all entries */
+- for (p = start, i = 0; p < finish; p++, i += 8)
++ for (p = start, i = 0; p < finish; p++, i += 8) {
+ p->insn -= i;
++ p->fixup -= i + 4;
++ }
+ }
+
+ #ifdef CONFIG_MODULES
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 30e7ddb..c690c8e 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -413,7 +413,7 @@ out:
+
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+- int ret;
++ long ret;
+
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 47f1ff0..22a358e 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
+ {
+ int pid, n, status;
+
++ fflush(stdout);
++
+ pid = fork();
+ if (pid == 0)
+ ptrace_child();
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 6a1ae37..15cfeba 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
+ * Interrupts are off on entry.
+ */
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
++ ASM_CLAC /* Do this early to minimize exposure */
+ SWAPGS
+
+ /*
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index 881b476..e7de5c9 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
+
+ #define __ARCH_HAS_DO_SOFTIRQ
+
++struct irq_desc;
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
+ extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+-extern void irq_force_complete_move(int);
++extern void irq_force_complete_move(struct irq_desc *desc);
+ #endif
+
+ #ifdef CONFIG_HAVE_KVM
+@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
+ extern void (*x86_platform_ipi_callback)(void);
+ extern void native_init_IRQ(void);
+
+-struct irq_desc;
+ extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+
+ extern __visible unsigned int do_IRQ(struct pt_regs *regs);
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index f253218..fdb0fbf 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
+ {
+ int pin, ioapic, irq, irq_entry;
+ const struct cpumask *mask;
++ struct irq_desc *desc;
+ struct irq_data *idata;
+ struct irq_chip *chip;
+
+@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
+ if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
+ continue;
+
+- idata = irq_get_irq_data(irq);
++ desc = irq_to_desc(irq);
++ raw_spin_lock_irq(&desc->lock);
++ idata = irq_desc_get_irq_data(desc);
+
+ /*
+ * Honour affinities which have been set in early boot
+@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
+ /* Might be lapic_chip for irq 0 */
+ if (chip->irq_set_affinity)
+ chip->irq_set_affinity(idata, mask, false);
++ raw_spin_unlock_irq(&desc->lock);
+ }
+ }
+ #endif
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 861bc59..a35f6b5 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -30,7 +30,7 @@ struct apic_chip_data {
+
+ struct irq_domain *x86_vector_domain;
+ static DEFINE_RAW_SPINLOCK(vector_lock);
+-static cpumask_var_t vector_cpumask;
++static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
+ static struct irq_chip lapic_controller;
+ #ifdef CONFIG_X86_IO_APIC
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
+ */
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 16;
+- int cpu, err;
++ int cpu, vector;
+
+- if (d->move_in_progress)
++ /*
++ * If there is still a move in progress or the previous move has not
++ * been cleaned up completely, tell the caller to come back later.
++ */
++ if (d->move_in_progress ||
++ cpumask_intersects(d->old_domain, cpu_online_mask))
+ return -EBUSY;
+
+ /* Only try and allocate irqs on cpus that are present */
+- err = -ENOSPC;
+ cpumask_clear(d->old_domain);
++ cpumask_clear(searched_cpumask);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+- int new_cpu, vector, offset;
++ int new_cpu, offset;
+
++ /* Get the possible target cpus for @mask/@cpu from the apic */
+ apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+
++ /*
++ * Clear the offline cpus from @vector_cpumask for searching
++ * and verify whether the result overlaps with @mask. If true,
++ * then the call to apic->cpu_mask_to_apicid_and() will
++ * succeed as well. If not, no point in trying to find a
++ * vector in this mask.
++ */
++ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
++ if (!cpumask_intersects(vector_searchmask, mask))
++ goto next_cpu;
++
+ if (cpumask_subset(vector_cpumask, d->domain)) {
+- err = 0;
+ if (cpumask_equal(vector_cpumask, d->domain))
+- break;
++ goto success;
+ /*
+- * New cpumask using the vector is a proper subset of
+- * the current in use mask. So cleanup the vector
+- * allocation for the members that are not used anymore.
++ * Mark the cpus which are not longer in the mask for
++ * cleanup.
+ */
+- cpumask_andnot(d->old_domain, d->domain,
+- vector_cpumask);
+- d->move_in_progress =
+- cpumask_intersects(d->old_domain, cpu_online_mask);
+- cpumask_and(d->domain, d->domain, vector_cpumask);
+- break;
++ cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
++ vector = d->cfg.vector;
++ goto update;
+ }
+
+ vector = current_vector;
+@@ -156,45 +168,60 @@ next:
+ vector = FIRST_EXTERNAL_VECTOR + offset;
+ }
+
+- if (unlikely(current_vector == vector)) {
+- cpumask_or(d->old_domain, d->old_domain,
+- vector_cpumask);
+- cpumask_andnot(vector_cpumask, mask, d->old_domain);
+- cpu = cpumask_first_and(vector_cpumask,
+- cpu_online_mask);
+- continue;
+- }
++ /* If the search wrapped around, try the next cpu */
++ if (unlikely(current_vector == vector))
++ goto next_cpu;
+
+ if (test_bit(vector, used_vectors))
+ goto next;
+
+- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
++ for_each_cpu(new_cpu, vector_searchmask) {
+ if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
+ goto next;
+ }
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+- if (d->cfg.vector) {
++ /* Schedule the old vector for cleanup on all cpus */
++ if (d->cfg.vector)
+ cpumask_copy(d->old_domain, d->domain);
+- d->move_in_progress =
+- cpumask_intersects(d->old_domain, cpu_online_mask);
+- }
+- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
++ for_each_cpu(new_cpu, vector_searchmask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+- d->cfg.vector = vector;
+- cpumask_copy(d->domain, vector_cpumask);
+- err = 0;
+- break;
+- }
++ goto update;
+
+- if (!err) {
+- /* cache destination APIC IDs into cfg->dest_apicid */
+- err = apic->cpu_mask_to_apicid_and(mask, d->domain,
+- &d->cfg.dest_apicid);
++next_cpu:
++ /*
++ * We exclude the current @vector_cpumask from the requested
++ * @mask and try again with the next online cpu in the
++ * result. We cannot modify @mask, so we use @vector_cpumask
++ * as a temporary buffer here as it will be reassigned when
++ * calling apic->vector_allocation_domain() above.
++ */
++ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
++ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
++ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
++ continue;
+ }
++ return -ENOSPC;
+
+- return err;
++update:
++ /*
++ * Exclude offline cpus from the cleanup mask and set the
++ * move_in_progress flag when the result is not empty.
++ */
++ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
++ d->move_in_progress = !cpumask_empty(d->old_domain);
++ d->cfg.vector = vector;
++ cpumask_copy(d->domain, vector_cpumask);
++success:
++ /*
++ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
++ * as we already established, that mask & d->domain & cpu_online_mask
++ * is not empty.
++ */
++ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
++ &d->cfg.dest_apicid));
++ return 0;
+ }
+
+ static int assign_irq_vector(int irq, struct apic_chip_data *data,
+@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
+ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ {
+ struct irq_desc *desc;
+- unsigned long flags;
+ int cpu, vector;
+
+- raw_spin_lock_irqsave(&vector_lock, flags);
+ BUG_ON(!data->cfg.vector);
+
+ vector = data->cfg.vector;
+@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ data->cfg.vector = 0;
+ cpumask_clear(data->domain);
+
+- if (likely(!data->move_in_progress)) {
+- raw_spin_unlock_irqrestore(&vector_lock, flags);
++ /*
++ * If move is in progress or the old_domain mask is not empty,
++ * i.e. the cleanup IPI has not been processed yet, we need to remove
++ * the old references to desc from all cpus vector tables.
++ */
++ if (!data->move_in_progress && cpumask_empty(data->old_domain))
+ return;
+- }
+
+ desc = irq_to_desc(irq);
+ for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
+@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ }
+ }
+ data->move_in_progress = 0;
+- raw_spin_unlock_irqrestore(&vector_lock, flags);
+ }
+
+ void init_irq_alloc_info(struct irq_alloc_info *info,
+@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
+ static void x86_vector_free_irqs(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+ {
++ struct apic_chip_data *apic_data;
+ struct irq_data *irq_data;
++ unsigned long flags;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
+ if (irq_data && irq_data->chip_data) {
++ raw_spin_lock_irqsave(&vector_lock, flags);
+ clear_irq_vector(virq + i, irq_data->chip_data);
+- free_apic_chip_data(irq_data->chip_data);
++ apic_data = irq_data->chip_data;
++ irq_domain_reset_irq_data(irq_data);
++ raw_spin_unlock_irqrestore(&vector_lock, flags);
++ free_apic_chip_data(apic_data);
+ #ifdef CONFIG_X86_IO_APIC
+ if (virq + i < nr_legacy_irqs())
+ legacy_irq_data[virq + i] = NULL;
+ #endif
+- irq_domain_reset_irq_data(irq_data);
+ }
+ }
+ }
+@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
+ arch_init_htirq_domain(x86_vector_domain);
+
+ BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
++ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
++ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
+
+ return arch_early_ioapic_init();
+ }
+@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
+ return -EINVAL;
+
+ err = assign_irq_vector(irq, data, dest);
+- if (err) {
+- if (assign_irq_vector(irq, data,
+- irq_data_get_affinity_mask(irq_data)))
+- pr_err("Failed to recover vector for irq %d\n", irq);
+- return err;
+- }
+-
+- return IRQ_SET_MASK_OK;
++ return err ? err : IRQ_SET_MASK_OK;
+ }
+
+ static struct irq_chip lapic_controller = {
+@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
+ #ifdef CONFIG_SMP
+ static void __send_cleanup_vector(struct apic_chip_data *data)
+ {
+- cpumask_var_t cleanup_mask;
+-
+- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+- unsigned int i;
+-
+- for_each_cpu_and(i, data->old_domain, cpu_online_mask)
+- apic->send_IPI_mask(cpumask_of(i),
+- IRQ_MOVE_CLEANUP_VECTOR);
+- } else {
+- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
+- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+- free_cpumask_var(cleanup_mask);
+- }
++ raw_spin_lock(&vector_lock);
++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+ data->move_in_progress = 0;
++ if (!cpumask_empty(data->old_domain))
++ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
++ raw_spin_unlock(&vector_lock);
+ }
+
+ void send_cleanup_vector(struct irq_cfg *cfg)
+@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+ goto unlock;
+
+ /*
+- * Check if the irq migration is in progress. If so, we
+- * haven't received the cleanup request yet for this irq.
++ * Nothing to cleanup if irq migration is in progress
++ * or this cpu is not set in the cleanup mask.
+ */
+- if (data->move_in_progress)
++ if (data->move_in_progress ||
++ !cpumask_test_cpu(me, data->old_domain))
+ goto unlock;
+
++ /*
++ * We have two cases to handle here:
++ * 1) vector is unchanged but the target mask got reduced
++ * 2) vector and the target mask has changed
++ *
++ * #1 is obvious, but in #2 we have two vectors with the same
++ * irq descriptor: the old and the new vector. So we need to
++ * make sure that we only cleanup the old vector. The new
++ * vector has the current @vector number in the config and
++ * this cpu is part of the target mask. We better leave that
++ * one alone.
++ */
+ if (vector == data->cfg.vector &&
+ cpumask_test_cpu(me, data->domain))
+ goto unlock;
+@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+ goto unlock;
+ }
+ __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++ cpumask_clear_cpu(me, data->old_domain);
+ unlock:
+ raw_spin_unlock(&desc->lock);
+ }
+@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
+ __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+ }
+
+-void irq_force_complete_move(int irq)
++/*
++ * Called with @desc->lock held and interrupts disabled.
++ */
++void irq_force_complete_move(struct irq_desc *desc)
+ {
+- struct irq_cfg *cfg = irq_cfg(irq);
++ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
++ struct apic_chip_data *data = apic_chip_data(irqdata);
++ struct irq_cfg *cfg = data ? &data->cfg : NULL;
+
+- if (cfg)
+- __irq_complete_move(cfg, cfg->vector);
++ if (!cfg)
++ return;
++
++ __irq_complete_move(cfg, cfg->vector);
++
++ /*
++ * This is tricky. If the cleanup of @data->old_domain has not been
++ * done yet, then the following setaffinity call will fail with
++ * -EBUSY. This can leave the interrupt in a stale state.
++ *
++ * The cleanup cannot make progress because we hold @desc->lock. So in
++ * case @data->old_domain is not yet cleaned up, we need to drop the
++ * lock and acquire it again. @desc cannot go away, because the
++ * hotplug code holds the sparse irq lock.
++ */
++ raw_spin_lock(&vector_lock);
++ /* Clean out all offline cpus (including ourself) first. */
++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
++ while (!cpumask_empty(data->old_domain)) {
++ raw_spin_unlock(&vector_lock);
++ raw_spin_unlock(&desc->lock);
++ cpu_relax();
++ raw_spin_lock(&desc->lock);
++ /*
++ * Reevaluate apic_chip_data. It might have been cleared after
++ * we dropped @desc->lock.
++ */
++ data = apic_chip_data(irqdata);
++ if (!data)
++ return;
++ raw_spin_lock(&vector_lock);
++ }
++ raw_spin_unlock(&vector_lock);
+ }
+ #endif
+
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index f8062aa..61521dc 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -462,7 +462,7 @@ void fixup_irqs(void)
+ * non intr-remapping case, we can't wait till this interrupt
+ * arrives at this cpu before completing the irq move.
+ */
+- irq_force_complete_move(irq);
++ irq_force_complete_move(desc);
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ break_affinity = 1;
+@@ -470,6 +470,15 @@ void fixup_irqs(void)
+ }
+
+ chip = irq_data_get_irq_chip(data);
++ /*
++ * The interrupt descriptor might have been cleaned up
++ * already, but it is not yet removed from the radix tree
++ */
++ if (!chip) {
++ raw_spin_unlock(&desc->lock);
++ continue;
++ }
++
+ if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
+ chip->irq_mask(data);
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 1505587..b9b09fe 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ u16 sel;
+
+ la = seg_base(ctxt, addr.seg) + addr.ea;
+- *linear = la;
+ *max_size = 0;
+ switch (mode) {
+ case X86EMUL_MODE_PROT64:
++ *linear = la;
+ if (is_noncanonical_address(la))
+ goto bad;
+
+@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ goto bad;
+ break;
+ default:
++ *linear = la = (u32)la;
+ usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
+ addr.seg);
+ if (!usable)
+@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ if (size > *max_size)
+ goto bad;
+ }
+- la &= (u32)-1;
+ break;
+ }
+ if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 3058a22..7be8a25 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
+ return ret;
+
+ kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
+- walker->ptes[level] = pte;
++ walker->ptes[level - 1] = pte;
+ }
+ return 0;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9a2ed89..6ef3856 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ }
+
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index b2fd67d..ef05755 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ break;
+ }
+
+- if (regno > nr_registers) {
++ if (regno >= nr_registers) {
+ WARN_ONCE(1, "decoded an instruction with an invalid register");
+ return -EINVAL;
+ }
+diff --git a/block/bio.c b/block/bio.c
+index 4f184d9..d4d1443 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+- * don't copy into a random user address space, just free.
++ * don't copy into a random user address space, just free
++ * and return -EINTR so user space doesn't expect any data.
+ */
+- if (current->mm && bio_data_dir(bio) == READ)
++ if (!current->mm)
++ ret = -EINTR;
++ else if (bio_data_dir(bio) == READ)
+ ret = bio_copy_to_iter(bio, bmd->iter);
+ if (bmd->is_our_pages)
+ bio_free_pages(bio);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 3405f7a..5fdac39 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
+ * as brightness control does not work.
+ */
+ {
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
++ .callback = video_disable_backlight_sysfs_if,
++ .ident = "Toshiba Portege R700",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
++ },
++ },
++ {
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ .callback = video_disable_backlight_sysfs_if,
+ .ident = "Toshiba Portege R830",
+@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
+ },
+ },
++ {
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
++ .callback = video_disable_backlight_sysfs_if,
++ .ident = "Toshiba Satellite R830",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
++ },
++ },
+ /*
+ * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
+ * but the IDs actually follow the Device ID Scheme.
+diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
+index aa45d48..11d8209 100644
+--- a/drivers/acpi/nfit.c
++++ b/drivers/acpi/nfit.c
+@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
+ nfit_mem->bdw = NULL;
+ }
+
+-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
++static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
+ {
+ u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
+ struct nfit_memdev *nfit_memdev;
+ struct nfit_flush *nfit_flush;
+- struct nfit_dcr *nfit_dcr;
+ struct nfit_bdw *nfit_bdw;
+ struct nfit_idt *nfit_idt;
+ u16 idt_idx, range_index;
+
+- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+- if (nfit_dcr->dcr->region_index != dcr)
+- continue;
+- nfit_mem->dcr = nfit_dcr->dcr;
+- break;
+- }
+-
+- if (!nfit_mem->dcr) {
+- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
+- spa->range_index, __to_nfit_memdev(nfit_mem)
+- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
+- return -ENODEV;
+- }
+-
+- /*
+- * We've found enough to create an nvdimm, optionally
+- * find an associated BDW
+- */
+- list_add(&nfit_mem->list, &acpi_desc->dimms);
+-
+ list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
+ if (nfit_bdw->bdw->region_index != dcr)
+ continue;
+@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+ }
+
+ if (!nfit_mem->bdw)
+- return 0;
++ return;
+
+ nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
+
+ if (!nfit_mem->spa_bdw)
+- return 0;
++ return;
+
+ range_index = nfit_mem->spa_bdw->range_index;
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+ }
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_mem *nfit_mem, *found;
+ struct nfit_memdev *nfit_memdev;
+ int type = nfit_spa_type(spa);
+- u16 dcr;
+
+ switch (type) {
+ case NFIT_SPA_DCR:
+@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ }
+
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+- int rc;
++ struct nfit_dcr *nfit_dcr;
++ u32 device_handle;
++ u16 dcr;
+
+ if (nfit_memdev->memdev->range_index != spa->range_index)
+ continue;
+ found = NULL;
+ dcr = nfit_memdev->memdev->region_index;
++ device_handle = nfit_memdev->memdev->device_handle;
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
+- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
++ if (__to_nfit_memdev(nfit_mem)->device_handle
++ == device_handle) {
+ found = nfit_mem;
+ break;
+ }
+@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ if (!nfit_mem)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&nfit_mem->list);
++ list_add(&nfit_mem->list, &acpi_desc->dimms);
++ }
++
++ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
++ if (nfit_dcr->dcr->region_index != dcr)
++ continue;
++ /*
++ * Record the control region for the dimm. For
++ * the ACPI 6.1 case, where there are separate
++ * control regions for the pmem vs blk
++ * interfaces, be sure to record the extended
++ * blk details.
++ */
++ if (!nfit_mem->dcr)
++ nfit_mem->dcr = nfit_dcr->dcr;
++ else if (nfit_mem->dcr->windows == 0
++ && nfit_dcr->dcr->windows)
++ nfit_mem->dcr = nfit_dcr->dcr;
++ break;
++ }
++
++ if (dcr && !nfit_mem->dcr) {
++ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
++ spa->range_index, dcr);
++ return -ENODEV;
+ }
+
+ if (type == NFIT_SPA_DCR) {
+@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ nfit_mem->idt_dcr = nfit_idt->idt;
+ break;
+ }
++ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
+ } else {
+ /*
+ * A single dimm may belong to multiple SPA-PM
+@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ */
+ nfit_mem->memdev_pmem = nfit_memdev->memdev;
+ }
+-
+- if (found)
+- continue;
+-
+- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
+- if (rc)
+- return rc;
+ }
+
+ return 0;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index daaf1c4..80e55cb 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+ },
+ },
+- {
+- .callback = video_detect_force_vendor,
+- .ident = "Dell Inspiron 5737",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+- },
+- },
+
+ /*
+ * These models have a working acpi_video backlight control, and using
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index a39e85f..7d00b7a 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+
+- ptr += sizeof(void *);
++ ptr += sizeof(cookie);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index cdf6215..7dbba38 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ struct ata_port *ap = qc->ap;
+- unsigned long flags;
+
+ if (ap->ops->error_handler) {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+-
+ /* EH might have kicked in while host lock is
+ * released.
+ */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ } else
+ ata_port_freeze(ap);
+ }
+-
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else {
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ }
+ } else {
+ if (in_wq) {
+- spin_lock_irqsave(ap->lock, flags);
+ ata_sff_irq_on(ap);
+ ata_qc_complete(qc);
+- spin_unlock_irqrestore(ap->lock, flags);
+ } else
+ ata_qc_complete(qc);
+ }
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ struct ata_link *link = qc->dev->link;
+ struct ata_eh_info *ehi = &link->eh_info;
+- unsigned long flags = 0;
+ int poll_next;
+
++ lockdep_assert_held(ap->lock);
++
+ WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+ /* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ }
+ }
+
+- /* Send the CDB (atapi) or the first data block (ata pio out).
+- * During the state transition, interrupt handler shouldn't
+- * be invoked before the data transfer is complete and
+- * hsm_task_state is changed. Hence, the following locking.
+- */
+- if (in_wq)
+- spin_lock_irqsave(ap->lock, flags);
+-
+ if (qc->tf.protocol == ATA_PROT_PIO) {
+ /* PIO data out protocol.
+ * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ /* send CDB */
+ atapi_send_cdb(ap, qc);
+
+- if (in_wq)
+- spin_unlock_irqrestore(ap->lock, flags);
+-
+ /* if polling, ata_sff_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ u8 status;
+ int poll_next;
+
++ spin_lock_irq(ap->lock);
++
+ BUG_ON(ap->sff_pio_task_link == NULL);
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ if (!qc) {
+ ap->sff_pio_task_link = NULL;
+- return;
++ goto out_unlock;
+ }
+
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ */
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ if (status & ATA_BUSY) {
++ spin_unlock_irq(ap->lock);
+ ata_msleep(ap, 2);
++ spin_lock_irq(ap->lock);
++
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ if (status & ATA_BUSY) {
+ ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+- return;
++ goto out_unlock;
+ }
+ }
+
+@@ -1402,6 +1390,8 @@ fsm_start:
+ */
+ if (poll_next)
+ goto fsm_start;
++out_unlock:
++ spin_unlock_irq(ap->lock);
+ }
+
+ /**
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 92f0ee3..9688971 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
++ /* Toshiba Corp - Broadcom based */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
++ .driver_info = BTUSB_BCM_PATCHRAM },
++
+ /* Intel Bluetooth USB Bootloader (RAM module) */
+ { USB_DEVICE(0x8087, 0x0a5a),
+ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
+index 2fe37f7..813003d 100644
+--- a/drivers/clk/samsung/clk-cpu.c
++++ b/drivers/clk/samsung/clk-cpu.c
+@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+ unsigned long div0, div1 = 0, mux_reg;
++ unsigned long flags;
+
+ /* find out the divider values to use for clock data */
+ while ((cfg_data->prate * 1000) != ndata->new_rate) {
+@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ cfg_data++;
+ }
+
+- spin_lock(cpuclk->lock);
++ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /*
+ * For the selected PLL clock frequency, get the pre-defined divider
+@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ DIV_MASK_ALL);
+ }
+
+- spin_unlock(cpuclk->lock);
++ spin_unlock_irqrestore(cpuclk->lock, flags);
+ return 0;
+ }
+
+@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ unsigned long div = 0, div_mask = DIV_MASK;
+ unsigned long mux_reg;
++ unsigned long flags;
+
+ /* find out the divider values to use for clock data */
+ if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
+@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ }
+ }
+
+- spin_lock(cpuclk->lock);
++ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /* select mout_apll as the alternate parent */
+ mux_reg = readl(base + E4210_SRC_CPU);
+@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ }
+
+ exynos_set_safe_div(base, div, div_mask);
+- spin_unlock(cpuclk->lock);
++ spin_unlock_irqrestore(cpuclk->lock, flags);
+ return 0;
+ }
+
+diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
+index 6ee9140..4da2af9 100644
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
+
+ __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+ __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+- clk_disable(tcd->clk);
++ if (!clockevent_state_detached(d))
++ clk_disable(tcd->clk);
+
+ return 0;
+ }
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index a92e94b..dfc3bb4 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
++#define MIN_OSCR_DELTA 16
++
+ static void __iomem *regbase;
+
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ cpu_relax();
+ writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+
+- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ return -ETIME;
+
+ writel(1, regbase + TIMER_IER_VAL);
+@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ pr_err("%s: setup_irq failed for %s\n", __func__,
+ clockevent.name);
+ clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+- 4, 0xf0000000);
++ MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index b260576..d994b0f 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
+ if (!have_governor_per_policy())
+ cdata->gdbs_data = dbs_data;
+
++ policy->governor_data = dbs_data;
++
+ ret = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+ if (ret)
+ goto reset_gdbs_data;
+
+- policy->governor_data = dbs_data;
+-
+ return 0;
+
+ reset_gdbs_data:
++ policy->governor_data = NULL;
++
+ if (!have_governor_per_policy())
+ cdata->gdbs_data = NULL;
+ cdata->exit(dbs_data, !policy->governor->initialized);
+@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+ if (!cdbs->shared || cdbs->shared->policy)
+ return -EBUSY;
+
+- policy->governor_data = NULL;
+ if (!--dbs_data->usage_count) {
+ sysfs_remove_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+
++ policy->governor_data = NULL;
++
+ if (!have_governor_per_policy())
+ cdata->gdbs_data = NULL;
+
+ cdata->exit(dbs_data, policy->governor->initialized == 1);
+ kfree(dbs_data);
++ } else {
++ policy->governor_data = NULL;
+ }
+
+ free_common_dbs_info(policy, cdata);
+diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
+index 1d99c97..0963772 100644
+--- a/drivers/cpufreq/pxa2xx-cpufreq.c
++++ b/drivers/cpufreq/pxa2xx-cpufreq.c
+@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
+ }
+ }
+ #else
+-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
++static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
+ {
+ return 0;
+ }
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 370c661..fa00f3a 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ at_xdmac_remove_xfer(atchan, desc);
+
++ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ spin_unlock_irqrestore(&atchan->lock, flags);
+
+@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
+ atchan = to_at_xdmac_chan(chan);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
+ if (at_xdmac_chan_is_cyclic(atchan)) {
++ if (at_xdmac_chan_is_paused(atchan))
++ at_xdmac_device_resume(chan);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 7067b6d..4f099ea 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+ /* Called with dwc->lock held and all DMAC interrupts disabled */
+ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+- u32 status_err, u32 status_xfer)
++ u32 status_block, u32 status_err, u32 status_xfer)
+ {
+ unsigned long flags;
+
+- if (dwc->mask) {
++ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
++
++ /* Re-enable interrupts */
++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ }
+
+ /* ------------------------------------------------------------------------- */
+@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
+ {
+ struct dw_dma *dw = (struct dw_dma *)data;
+ struct dw_dma_chan *dwc;
++ u32 status_block;
+ u32 status_xfer;
+ u32 status_err;
+ int i;
+
++ status_block = dma_readl(dw, RAW.BLOCK);
+ status_xfer = dma_readl(dw, RAW.XFER);
+ status_err = dma_readl(dw, RAW.ERROR);
+
+@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
+ for (i = 0; i < dw->dma.chancnt; i++) {
+ dwc = &dw->chan[i];
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++ dwc_handle_cyclic(dw, dwc, status_block, status_err,
++ status_xfer);
+ else if (status_err & (1 << i))
+ dwc_handle_error(dw, dwc);
+ else if (status_xfer & (1 << i))
+ dwc_scan_descriptors(dw, dwc);
+ }
+
+- /*
+- * Re-enable interrupts.
+- */
++ /* Re-enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ * softirq handler.
+ */
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ status = dma_readl(dw, STATUS_INT);
+@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+
+ /* Try to recover */
+ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
+ dma_writel(dw, CFG, 0);
+
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+
+ /* Disable interrupts */
+ channel_clear_bit(dw, MASK.XFER, dwc->mask);
++ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
++ struct dw_dma *dw = to_dw_dma(chan->device);
+ unsigned long flags;
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+- /* Assert channel is idle */
+- if (dma_readl(dw, CH_EN) & dwc->mask) {
+- dev_err(chan2dev(&dwc->chan),
+- "%s: BUG: Attempted to start non-idle channel\n",
+- __func__);
+- dwc_dump_chan_regs(dwc);
+- spin_unlock_irqrestore(&dwc->lock, flags);
+- return -EBUSY;
+- }
+-
+- dma_writel(dw, CLEAR.ERROR, dwc->mask);
+- dma_writel(dw, CLEAR.XFER, dwc->mask);
++ /* Enable interrupts to perform cyclic transfer */
++ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+
+- /* Setup DMAC channel registers */
+- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+- channel_writel(dwc, CTL_HI, 0);
+-
+- channel_set_bit(dw, CH_EN, dwc->mask);
++ dwc_dostart(dwc, dwc->cdesc->desc[0]);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
+
+ dwc_chan_disable(dw, dwc);
+
++ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ /* Force dma off, just in case */
+ dw_dma_off(dw);
+
+- /* Disable BLOCK interrupts as well */
+- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ sizeof(struct dw_desc), 4, 0);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 592af5f..5358737 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+- int status;
+-
+ if (!edac_dev->edac_check)
+ return;
+
+- status = cancel_delayed_work(&edac_dev->work);
+- if (status == 0) {
+- /* workq instance might be running, wait for it */
+- flush_workqueue(edac_workqueue);
+- }
++ edac_dev->op_state = OP_OFFLINE;
++
++ cancel_delayed_work_sync(&edac_dev->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 77ecd6a..1b2c218 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+- int status;
+-
+- if (mci->op_state != OP_RUNNING_POLL)
+- return;
+-
+- status = cancel_delayed_work(&mci->work);
+- if (status == 0) {
+- edac_dbg(0, "not canceled, flush the queue\n");
++ mci->op_state = OP_OFFLINE;
+
+- /* workq instance might be running, wait for it */
+- flush_workqueue(edac_workqueue);
+- }
++ cancel_delayed_work_sync(&mci->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index a75acea..58aed67 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
+ const struct attribute_group **groups)
+ {
++ char *name;
+ int i, err;
+
+ /*
+ * The memory controller needs its own bus, in order to avoid
+ * namespace conflicts at /sys/bus/edac.
+ */
+- mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+- if (!mci->bus->name)
++ name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++ if (!name)
+ return -ENOMEM;
+
++ mci->bus->name = name;
++
+ edac_dbg(0, "creating bus %s\n", mci->bus->name);
+
+ err = bus_register(mci->bus);
+- if (err < 0)
+- goto fail_free_name;
++ if (err < 0) {
++ kfree(name);
++ return err;
++ }
+
+ /* get the /sys/devices/system/edac subsys reference */
+ mci->dev.type = &mci_attr_type;
+@@ -961,8 +966,8 @@ fail_unregister_dimm:
+ device_unregister(&mci->dev);
+ fail_unregister_bus:
+ bus_unregister(mci->bus);
+-fail_free_name:
+- kfree(mci->bus->name);
++ kfree(name);
++
+ return err;
+ }
+
+@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++ const char *name = mci->bus->name;
++
+ edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ device_unregister(&mci->dev);
+ bus_unregister(mci->bus);
+- kfree(mci->bus->name);
++ kfree(name);
+ }
+
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 2cf44b4d..b4b3860 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+ */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+- int status;
+-
+ edac_dbg(0, "\n");
+
+- status = cancel_delayed_work(&pci->work);
+- if (status == 0)
+- flush_workqueue(edac_workqueue);
++ pci->op_state = OP_OFFLINE;
++
++ cancel_delayed_work_sync(&pci->work);
++ flush_workqueue(edac_workqueue);
+ }
+
+ /*
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 756eca8..10e6774 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
+ }
+
+ if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+- efivar_validate(name, data, size) == false) {
++ efivar_validate(vendor, name, data, size) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ }
+
+ if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+- efivar_validate(name, data, size) == false) {
++ efivar_validate(new_var->VendorGuid, name, data,
++ size) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ static int
+ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ {
+- int i, short_name_size;
++ int short_name_size;
+ char *short_name;
+- unsigned long variable_name_size;
+- efi_char16_t *variable_name;
++ unsigned long utf8_name_size;
++ efi_char16_t *variable_name = new_var->var.VariableName;
+ int ret;
+
+- variable_name = new_var->var.VariableName;
+- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
+-
+ /*
+- * Length of the variable bytes in ASCII, plus the '-' separator,
++ * Length of the variable bytes in UTF8, plus the '-' separator,
+ * plus the GUID, plus trailing NUL
+ */
+- short_name_size = variable_name_size / sizeof(efi_char16_t)
+- + 1 + EFI_VARIABLE_GUID_LEN + 1;
+-
+- short_name = kzalloc(short_name_size, GFP_KERNEL);
++ utf8_name_size = ucs2_utf8size(variable_name);
++ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+
++ short_name = kmalloc(short_name_size, GFP_KERNEL);
+ if (!short_name)
+ return -ENOMEM;
+
+- /* Convert Unicode to normal chars (assume top bits are 0),
+- ala UTF-8 */
+- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
+- short_name[i] = variable_name[i] & 0xFF;
+- }
++ ucs2_as_utf8(short_name, variable_name, short_name_size);
++
+ /* This is ugly, but necessary to separate one vendor's
+ private variables from another's. */
+-
+- *(short_name + strlen(short_name)) = '-';
++ short_name[utf8_name_size] = '-';
+ efi_guid_to_str(&new_var->var.VendorGuid,
+- short_name + strlen(short_name));
++ short_name + utf8_name_size + 1);
+
+ new_var->kobj.kset = efivars_kset;
+
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 70a0fb1..7f2ea21 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ }
+
+ struct variable_validate {
++ efi_guid_t vendor;
+ char *name;
+ bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ unsigned long len);
+ };
+
++/*
++ * This is the list of variables we need to validate, as well as the
++ * whitelist for what we think is safe not to default to immutable.
++ *
++ * If it has a validate() method that's not NULL, it'll go into the
++ * validation routine. If not, it is assumed valid, but still used for
++ * whitelisting.
++ *
++ * Note that it's sorted by {vendor,name}, but globbed names must come after
++ * any other name with the same prefix.
++ */
+ static const struct variable_validate variable_validate[] = {
+- { "BootNext", validate_uint16 },
+- { "BootOrder", validate_boot_order },
+- { "DriverOrder", validate_boot_order },
+- { "Boot*", validate_load_option },
+- { "Driver*", validate_load_option },
+- { "ConIn", validate_device_path },
+- { "ConInDev", validate_device_path },
+- { "ConOut", validate_device_path },
+- { "ConOutDev", validate_device_path },
+- { "ErrOut", validate_device_path },
+- { "ErrOutDev", validate_device_path },
+- { "Timeout", validate_uint16 },
+- { "Lang", validate_ascii_string },
+- { "PlatformLang", validate_ascii_string },
+- { "", NULL },
++ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
++ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
++ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
++ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
++ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
++ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
++ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
++ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
++ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
++ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
++ { LINUX_EFI_CRASH_GUID, "*", NULL },
++ { NULL_GUID, "", NULL },
+ };
+
++static bool
++variable_matches(const char *var_name, size_t len, const char *match_name,
++ int *match)
++{
++ for (*match = 0; ; (*match)++) {
++ char c = match_name[*match];
++ char u = var_name[*match];
++
++ /* Wildcard in the matching name means we've matched */
++ if (c == '*')
++ return true;
++
++ /* Case sensitive match */
++ if (!c && *match == len)
++ return true;
++
++ if (c != u)
++ return false;
++
++ if (!c)
++ return true;
++ }
++ return true;
++}
++
+ bool
+-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++ unsigned long data_size)
+ {
+ int i;
+- u16 *unicode_name = var_name;
++ unsigned long utf8_size;
++ u8 *utf8_name;
+
+- for (i = 0; variable_validate[i].validate != NULL; i++) {
+- const char *name = variable_validate[i].name;
+- int match;
++ utf8_size = ucs2_utf8size(var_name);
++ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
++ if (!utf8_name)
++ return false;
+
+- for (match = 0; ; match++) {
+- char c = name[match];
+- u16 u = unicode_name[match];
++ ucs2_as_utf8(utf8_name, var_name, utf8_size);
++ utf8_name[utf8_size] = '\0';
+
+- /* All special variables are plain ascii */
+- if (u > 127)
+- return true;
++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++ const char *name = variable_validate[i].name;
++ int match = 0;
+
+- /* Wildcard in the matching name means we've matched */
+- if (c == '*')
+- return variable_validate[i].validate(var_name,
+- match, data, len);
++ if (efi_guidcmp(vendor, variable_validate[i].vendor))
++ continue;
+
+- /* Case sensitive match */
+- if (c != u)
++ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
++ if (variable_validate[i].validate == NULL)
+ break;
+-
+- /* Reached the end of the string while matching */
+- if (!c)
+- return variable_validate[i].validate(var_name,
+- match, data, len);
++ kfree(utf8_name);
++ return variable_validate[i].validate(var_name, match,
++ data, data_size);
+ }
+ }
+-
++ kfree(utf8_name);
+ return true;
+ }
+ EXPORT_SYMBOL_GPL(efivar_validate);
+
++bool
++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
++ size_t len)
++{
++ int i;
++ bool found = false;
++ int match = 0;
++
++ /*
++ * Check if our variable is in the validated variables list
++ */
++ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++ if (efi_guidcmp(variable_validate[i].vendor, vendor))
++ continue;
++
++ if (variable_matches(var_name, len,
++ variable_validate[i].name, &match)) {
++ found = true;
++ break;
++ }
++ }
++
++ /*
++ * If it's in our list, it is removable.
++ */
++ return found;
++}
++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
++
+ static efi_status_t
+ check_var_size(u32 attributes, unsigned long size)
+ {
+@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+
+ *set = false;
+
+- if (efivar_validate(name, data, *size) == false)
++ if (efivar_validate(*vendor, name, data, *size) == false)
+ return -EINVAL;
+
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 04c2707..ca06601 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+
+ # add asic specific block
+-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
++amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+ amdgpu_amdkfd_gfx_v7.o
+
+@@ -31,6 +31,7 @@ amdgpu-y += \
+
+ # add GMC block
+ amdgpu-y += \
++ gmc_v7_0.o \
+ gmc_v8_0.o
+
+ # add IH block
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 048cfe0..bb1099c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
+ uint32_t align;
+ };
+
+-struct amdgpu_sa_bo;
+-
+ /* sub-allocation buffer */
+ struct amdgpu_sa_bo {
+ struct list_head olist;
+@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags);
+ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
++ unsigned long end);
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d5b4213..c961fe0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ }
+
+ /* post card */
+- amdgpu_atom_asic_init(adev->mode_info.atom_context);
++ if (!amdgpu_card_posted(adev))
++ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+ r = amdgpu_resume(adev);
++ if (r)
++ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+
+ amdgpu_fence_driver_resume(adev);
+
+- r = amdgpu_ib_ring_tests(adev);
+- if (r)
+- DRM_ERROR("ib ring test failed (%d).\n", r);
++ if (resume) {
++ r = amdgpu_ib_ring_tests(adev);
++ if (r)
++ DRM_ERROR("ib ring test failed (%d).\n", r);
++ }
+
+ r = amdgpu_late_init(adev);
+ if (r)
+@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ }
+
+ drm_kms_helper_poll_enable(dev);
++ drm_helper_hpd_irq_event(dev);
+
+ if (fbcon) {
+ amdgpu_fbdev_set_suspend(adev, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 5580d34..0c713a9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+
+ struct drm_crtc *crtc = &amdgpuCrtc->base;
+ unsigned long flags;
+- unsigned i;
+- int vpos, hpos, stat, min_udelay;
++ unsigned i, repcnt = 4;
++ int vpos, hpos, stat, min_udelay = 0;
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+
+ amdgpu_flip_wait_fence(adev, &work->excl);
+@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ * In practice this won't execute very often unless on very fast
+ * machines because the time window for this to happen is very small.
+ */
+- for (;;) {
++ while (amdgpuCrtc->enabled && repcnt--) {
+ /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ * start in hpos, and to the "fudged earlier" vblank start in
+ * vpos.
+@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ /* Sleep at least until estimated real start of hw vblank */
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
++ if (min_udelay > vblank->framedur_ns / 2000) {
++ /* Don't wait ridiculously long - something is wrong */
++ repcnt = 0;
++ break;
++ }
+ usleep_range(min_udelay, 2 * min_udelay);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ };
+
++ if (!repcnt)
++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
++ "framedur %d, linedur %d, stat %d, vpos %d, "
++ "hpos %d\n", work->crtc_id, min_udelay,
++ vblank->framedur_ns / 1000,
++ vblank->linedur_ns / 1000, stat, vpos, hpos);
++
+ /* do the flip (mmio) */
+ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
+ /* set the flip status */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0508c5c..8d6668c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
+ {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ #endif
+ /* topaz */
+- {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+- {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
++ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++ {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++ {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++ {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++ {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ /* tonga */
+ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+ {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index b1969f2..d4e2780 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
+
+ list_for_each_entry(bo, &node->bos, mn_list) {
+
+- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
++ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
++ end))
+ continue;
+
+ r = amdgpu_bo_reserve(bo, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index c3ce103..a2a16ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ }
+ if (fpfn > bo->placements[i].fpfn)
+ bo->placements[i].fpfn = fpfn;
+- if (lpfn && lpfn < bo->placements[i].lpfn)
++ if (!bo->placements[i].lpfn ||
++ (lpfn && lpfn < bo->placements[i].lpfn))
+ bo->placements[i].lpfn = lpfn;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 22a8c7d..03fe251 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -595,8 +595,6 @@ force:
+
+ /* update display watermarks based on new power state */
+ amdgpu_display_bandwidth_update(adev);
+- /* update displays */
+- amdgpu_dpm_display_configuration_changed(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+@@ -616,6 +614,9 @@ force:
+
+ amdgpu_dpm_post_set_power_state(adev);
+
++ /* update displays */
++ amdgpu_dpm_display_configuration_changed(adev);
++
+ if (adev->pm.funcs->force_performance_level) {
+ if (adev->pm.dpm.thermal_active) {
+ enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+index 8b88edb..ca72a2e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+
+ for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+ if (fences[i])
+- fences[count++] = fences[i];
++ fences[count++] = fence_get(fences[i]);
+
+ if (count) {
+ spin_unlock(&sa_manager->wq.lock);
+ t = fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT);
++ for (i = 0; i < count; ++i)
++ fence_put(fences[i]);
++
+ r = (t > 0) ? 0 : t;
+ spin_lock(&sa_manager->wq.lock);
+ } else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index dd005c3..181ce39 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
+ fence = to_amdgpu_fence(sync->sync_to[i]);
+
+ /* check if we really need to sync */
+- if (!amdgpu_fence_need_sync(fence, ring))
++ if (!amdgpu_enable_scheduler &&
++ !amdgpu_fence_need_sync(fence, ring))
+ continue;
+
+ /* prevent GPU deadlocks */
+@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
+ }
+
+ if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
+- r = fence_wait(&fence->base, true);
++ r = fence_wait(sync->sync_to[i], true);
+ if (r)
+ return r;
+ continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 8a1752f..1cbb16e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
+- while (--i) {
++ while (i--) {
+ pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+ return !!gtt->userptr;
+ }
+
++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
++ unsigned long end)
++{
++ struct amdgpu_ttm_tt *gtt = (void *)ttm;
++ unsigned long size;
++
++ if (gtt == NULL)
++ return false;
++
++ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
++ return false;
++
++ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
++ if (gtt->userptr > end || gtt->userptr + size <= start)
++ return false;
++
++ return true;
++}
++
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+ {
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ flags |= AMDGPU_PTE_SNOOPED;
+ }
+
+- if (adev->asic_type >= CHIP_TOPAZ)
++ if (adev->asic_type >= CHIP_TONGA)
+ flags |= AMDGPU_PTE_EXECUTABLE;
+
+ flags |= AMDGPU_PTE_READABLE;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b53d273..39adbb6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+- eaddr = saddr + size;
++ eaddr = saddr + size - 1;
+ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
+ return -EINVAL;
+
+ last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
+- if (last_pfn > adev->vm_manager.max_pfn) {
+- dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
++ if (last_pfn >= adev->vm_manager.max_pfn) {
++ dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ last_pfn, adev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
+
+ spin_lock(&vm->it_lock);
+- it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
++ it = interval_tree_iter_first(&vm->va, saddr, eaddr);
+ spin_unlock(&vm->it_lock);
+ if (it) {
+ struct amdgpu_bo_va_mapping *tmp;
+@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+
+ INIT_LIST_HEAD(&mapping->list);
+ mapping->it.start = saddr;
+- mapping->it.last = eaddr - 1;
++ mapping->it.last = eaddr;
+ mapping->offset = offset;
+ mapping->flags = flags;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index e1dcab9..4cb45f4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_me.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
+-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
+
+ MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+ adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+- if (adev->asic_type != CHIP_STONEY) {
++ if ((adev->asic_type != CHIP_STONEY) &&
++ (adev->asic_type != CHIP_TOPAZ)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (!err) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index ed8abb5..272110c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+
+ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
++MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
++
++static const u32 golden_settings_iceland_a11[] =
++{
++ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
++};
++
++static const u32 iceland_mgcg_cgcg_init[] =
++{
++ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
++};
++
++static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
++{
++ switch (adev->asic_type) {
++ case CHIP_TOPAZ:
++ amdgpu_program_register_sequence(adev,
++ iceland_mgcg_cgcg_init,
++ (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
++ amdgpu_program_register_sequence(adev,
++ golden_settings_iceland_a11,
++ (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
++ break;
++ default:
++ break;
++ }
++}
+
+ /**
+- * gmc8_mc_wait_for_idle - wait for MC idle callback.
++ * gmc7_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ case CHIP_HAWAII:
+ chip_name = "hawaii";
+ break;
++ case CHIP_TOPAZ:
++ chip_name = "topaz";
++ break;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ return 0;
+ default: BUG();
+ }
+
+- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++ if (adev->asic_type == CHIP_TOPAZ)
++ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
++ else
++ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++
+ err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ gmc_v7_0_init_golden_registers(adev);
++
+ gmc_v7_0_mc_program(adev);
+
+ if (!(adev->flags & AMD_IS_APU)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index d390284..ba4ad00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -42,9 +42,7 @@
+ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+
+-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
+
+ static const u32 golden_settings_tonga_a11[] =
+ {
+@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+ };
+
+-static const u32 golden_settings_iceland_a11[] =
+-{
+- mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+- mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+- mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+- mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+-};
+-
+-static const u32 iceland_mgcg_cgcg_init[] =
+-{
+- mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+-};
+-
+ static const u32 cz_mgcg_cgcg_init[] =
+ {
+ mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
+ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
+ {
+ switch (adev->asic_type) {
+- case CHIP_TOPAZ:
+- amdgpu_program_register_sequence(adev,
+- iceland_mgcg_cgcg_init,
+- (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+- amdgpu_program_register_sequence(adev,
+- golden_settings_iceland_a11,
+- (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+- break;
+ case CHIP_FIJI:
+ amdgpu_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+- case CHIP_TOPAZ:
+- chip_name = "topaz";
+- break;
+ case CHIP_TONGA:
+ chip_name = "tonga";
+ break;
+ case CHIP_FIJI:
+- chip_name = "fiji";
+- break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ return 0;
+@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
+
+ gmc_v8_0_mc_program(adev);
+
+- if (!(adev->flags & AMD_IS_APU)) {
++ if (adev->asic_type == CHIP_TONGA) {
+ r = gmc_v8_0_mc_load_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+index 966d4b2..090486c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
+ case AMDGPU_UCODE_ID_CP_ME:
+ return UCODE_ID_CP_ME_MASK;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
++ return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ return UCODE_ID_CP_MEC_MASK;
+ case AMDGPU_UCODE_ID_RLC_G:
+@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+ return -EINVAL;
+ }
+
+- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+- &toc->entry[toc->num_entries++])) {
+- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+- return -EINVAL;
+- }
+-
+ if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+ &toc->entry[toc->num_entries++])) {
+ DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+ UCODE_ID_CP_ME_MASK |
+ UCODE_ID_CP_PFP_MASK |
+ UCODE_ID_CP_MEC_MASK |
+- UCODE_ID_CP_MEC_JT1_MASK |
+- UCODE_ID_CP_MEC_JT2_MASK;
++ UCODE_ID_CP_MEC_JT1_MASK;
++
+
+ if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+ DRM_ERROR("Fail to request SMU load ucode\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+index 2049038..63d6cb3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
+
+ static int tonga_dpm_suspend(void *handle)
+ {
+- return 0;
++ return tonga_dpm_hw_fini(handle);
+ }
+
+ static int tonga_dpm_resume(void *handle)
+ {
+- int ret;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+- mutex_lock(&adev->pm.mutex);
+-
+- ret = tonga_smu_start(adev);
+- if (ret) {
+- DRM_ERROR("SMU start failed\n");
+- goto fail;
+- }
+-
+-fail:
+- mutex_unlock(&adev->pm.mutex);
+- return ret;
++ return tonga_dpm_hw_init(handle);
+ }
+
+ static int tonga_dpm_set_clockgating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 2adc1c8..7628eb4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -60,6 +60,7 @@
+ #include "vi.h"
+ #include "vi_dpm.h"
+ #include "gmc_v8_0.h"
++#include "gmc_v7_0.h"
+ #include "gfx_v8_0.h"
+ #include "sdma_v2_4.h"
+ #include "sdma_v3_0.h"
+@@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+- .major = 8,
+- .minor = 0,
++ .major = 7,
++ .minor = 4,
+ .rev = 0,
+- .funcs = &gmc_v8_0_ip_funcs,
++ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 809959d..39d7e2e 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+ return mstb;
+ }
+
++static void drm_dp_free_mst_port(struct kref *kref);
++
++static void drm_dp_free_mst_branch_device(struct kref *kref)
++{
++ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
++ if (mstb->port_parent) {
++ if (list_empty(&mstb->port_parent->next))
++ kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
++ }
++ kfree(mstb);
++}
++
+ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ {
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ bool wake_tx = false;
+
+ /*
++ * init kref again to be used by ports to remove mst branch when it is
++ * not needed anymore
++ */
++ kref_init(kref);
++
++ if (mstb->port_parent && list_empty(&mstb->port_parent->next))
++ kref_get(&mstb->port_parent->kref);
++
++ /*
+ * destroy all ports - don't need lock
+ * as there are no more references to the mst branch
+ * device at this point.
+@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+
+ if (wake_tx)
+ wake_up(&mstb->mgr->tx_waitq);
+- kfree(mstb);
++
++ kref_put(kref, drm_dp_free_mst_branch_device);
+ }
+
+ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
+ * from an EDID retrieval */
+
+ mutex_lock(&mgr->destroy_connector_lock);
++ kref_get(&port->parent->kref);
+ list_add(&port->next, &mgr->destroy_connector_list);
+ mutex_unlock(&mgr->destroy_connector_lock);
+ schedule_work(&mgr->destroy_connector_work);
+@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
+ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ u8 *rad)
+ {
+- int lct = port->parent->lct;
++ int parent_lct = port->parent->lct;
+ int shift = 4;
+- int idx = lct / 2;
+- if (lct > 1) {
+- memcpy(rad, port->parent->rad, idx);
+- shift = (lct % 2) ? 4 : 0;
++ int idx = (parent_lct - 1) / 2;
++ if (parent_lct > 1) {
++ memcpy(rad, port->parent->rad, idx + 1);
++ shift = (parent_lct % 2) ? 4 : 0;
+ } else
+ rad[0] = 0;
+
+ rad[idx] |= port->port_num << shift;
+- return lct + 1;
++ return parent_lct + 1;
+ }
+
+ /*
+@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+ return send_link;
+ }
+
+-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+- struct drm_dp_mst_port *port)
++static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+ {
+ int ret;
+- if (port->dpcd_rev >= 0x12) {
+- port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+- if (!port->guid_valid) {
+- ret = drm_dp_send_dpcd_write(mstb->mgr,
+- port,
+- DP_GUID,
+- 16, port->guid);
+- port->guid_valid = true;
++
++ memcpy(mstb->guid, guid, 16);
++
++ if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
++ if (mstb->port_parent) {
++ ret = drm_dp_send_dpcd_write(
++ mstb->mgr,
++ mstb->port_parent,
++ DP_GUID,
++ 16,
++ mstb->guid);
++ } else {
++
++ ret = drm_dp_dpcd_write(
++ mstb->mgr->aux,
++ DP_GUID,
++ mstb->guid,
++ 16);
+ }
+ }
+ }
+@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+ snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
+ for (i = 0; i < (mstb->lct - 1); i++) {
+ int shift = (i % 2) ? 0 : 4;
+- int port_num = mstb->rad[i / 2] >> shift;
++ int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
+ snprintf(temp, sizeof(temp), "-%d", port_num);
+ strlcat(proppath, temp, proppath_size);
+ }
+@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ port->dpcd_rev = port_msg->dpcd_revision;
+ port->num_sdp_streams = port_msg->num_sdp_streams;
+ port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+- memcpy(port->guid, port_msg->peer_guid, 16);
+
+ /* manage mstb port lists with mgr lock - take a reference
+ for this list */
+@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+- drm_dp_check_port_guid(mstb, port);
+ if (!port->input)
+ drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ } else {
+- port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+- drm_dp_check_port_guid(mstb, port);
+ dowork = true;
+ } else {
+- port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+
+ for (i = 0; i < lct - 1; i++) {
+ int shift = (i % 2) ? 0 : 4;
+- int port_num = rad[i / 2] >> shift;
++ int port_num = (rad[i / 2] >> shift) & 0xf;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+@@ -1210,6 +1237,48 @@ out:
+ return mstb;
+ }
+
++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
++ struct drm_dp_mst_branch *mstb,
++ uint8_t *guid)
++{
++ struct drm_dp_mst_branch *found_mstb;
++ struct drm_dp_mst_port *port;
++
++ if (memcmp(mstb->guid, guid, 16) == 0)
++ return mstb;
++
++
++ list_for_each_entry(port, &mstb->ports, next) {
++ if (!port->mstb)
++ continue;
++
++ found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
++
++ if (found_mstb)
++ return found_mstb;
++ }
++
++ return NULL;
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
++ struct drm_dp_mst_topology_mgr *mgr,
++ uint8_t *guid)
++{
++ struct drm_dp_mst_branch *mstb;
++
++ /* find the port by iterating down */
++ mutex_lock(&mgr->lock);
++
++ mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
++
++ if (mstb)
++ kref_get(&mstb->kref);
++
++ mutex_unlock(&mgr->lock);
++ return mstb;
++}
++
+ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+ {
+@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+ {
+ struct drm_dp_mst_branch *mstb = txmsg->dst;
++ u8 req_type;
+
+ /* both msg slots are full */
+ if (txmsg->seqno == -1) {
+@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ txmsg->seqno = 1;
+ mstb->tx_slots[txmsg->seqno] = txmsg;
+ }
+- hdr->broadcast = 0;
++
++ req_type = txmsg->msg[0] & 0x7f;
++ if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
++ req_type == DP_RESOURCE_STATUS_NOTIFY)
++ hdr->broadcast = 1;
++ else
++ hdr->broadcast = 0;
+ hdr->path_msg = txmsg->path_msg;
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+ }
+
+ /* called holding qlock */
+-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_sideband_msg_tx *txmsg)
+ {
+- struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+- if (list_empty(&mgr->tx_msg_upq)) {
+- mgr->tx_up_in_progress = false;
+- return;
+- }
+-
+- txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, true);
+- if (ret == 1) {
+- /* up txmsgs aren't put in slots - so free after we send it */
+- list_del(&txmsg->next);
+- kfree(txmsg);
+- } else if (ret)
++
++ if (ret != 1)
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+- mgr->tx_up_in_progress = true;
++
++ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
+
+ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ }
++
++ drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
++
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ }
+@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ return 0;
+ }
+
++static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
++{
++ if (!mstb->port_parent)
++ return NULL;
++
++ if (mstb->port_parent->mstb != mstb)
++ return mstb->port_parent;
++
++ return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
++ struct drm_dp_mst_branch *mstb,
++ int *port_num)
++{
++ struct drm_dp_mst_branch *rmstb = NULL;
++ struct drm_dp_mst_port *found_port;
++ mutex_lock(&mgr->lock);
++ if (mgr->mst_primary) {
++ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
++
++ if (found_port) {
++ rmstb = found_port->parent;
++ kref_get(&rmstb->kref);
++ *port_num = found_port->port_num;
++ }
++ }
++ mutex_unlock(&mgr->lock);
++ return rmstb;
++}
++
+ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ {
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+- int len, ret;
++ int len, ret, port_num;
+
++ port_num = port->port_num;
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+- if (!mstb)
+- return -EINVAL;
++ if (!mstb) {
++ mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
++
++ if (!mstb)
++ return -EINVAL;
++ }
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ }
+
+ txmsg->dst = mstb;
+- len = build_allocate_payload(txmsg, port->port_num,
++ len = build_allocate_payload(txmsg, port_num,
+ id,
+ pbn);
+
+@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+ mutex_lock(&mgr->qlock);
+- list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+- if (!mgr->tx_up_in_progress) {
+- process_single_up_tx_qlock(mgr);
+- }
++
++ process_single_up_tx_qlock(mgr, txmsg);
++
+ mutex_unlock(&mgr->qlock);
++
++ kfree(txmsg);
+ return 0;
+ }
+
+@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ mgr->mst_primary = mstb;
+ kref_get(&mgr->mst_primary->kref);
+
+- {
+- struct drm_dp_payload reset_pay;
+- reset_pay.start_slot = 0;
+- reset_pay.num_slots = 0x3f;
+- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+- }
+-
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
++ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ goto out_unlock;
+ }
+
+-
+- /* sort out guid */
+- ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+- if (ret != 16) {
+- DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+- goto out_unlock;
+- }
+-
+- mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+- if (!mgr->guid_valid) {
+- ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+- mgr->guid_valid = true;
++ {
++ struct drm_dp_payload reset_pay;
++ reset_pay.start_slot = 0;
++ reset_pay.num_slots = 0x3f;
++ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ }
+
+ queue_work(system_long_wq, &mgr->work);
+@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+
+ if (mgr->up_req_recv.have_eomt) {
+ struct drm_dp_sideband_msg_req_body msg;
+- struct drm_dp_mst_branch *mstb;
++ struct drm_dp_mst_branch *mstb = NULL;
+ bool seqno;
+- mstb = drm_dp_get_mst_branch_device(mgr,
+- mgr->up_req_recv.initial_hdr.lct,
+- mgr->up_req_recv.initial_hdr.rad);
+- if (!mstb) {
+- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+- return 0;
++
++ if (!mgr->up_req_recv.initial_hdr.broadcast) {
++ mstb = drm_dp_get_mst_branch_device(mgr,
++ mgr->up_req_recv.initial_hdr.lct,
++ mgr->up_req_recv.initial_hdr.rad);
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
+ }
+
+ seqno = mgr->up_req_recv.initial_hdr.seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++
++ if (!mstb)
++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
++
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
+ drm_dp_update_port(mstb, &msg.u.conn_stat);
++
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
+ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+- drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++ if (!mstb)
++ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
++
++ if (!mstb) {
++ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++ return 0;
++ }
++
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ }
+
+@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ if (pbn == port->vcpi.pbn) {
+ *slots = port->vcpi.num_slots;
++ drm_dp_put_port(port);
+ return true;
+ }
+ }
+@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
+ */
+ int drm_dp_calc_pbn_mode(int clock, int bpp)
+ {
+- fixed20_12 pix_bw;
+- fixed20_12 fbpp;
+- fixed20_12 result;
+- fixed20_12 margin, tmp;
+- u32 res;
+-
+- pix_bw.full = dfixed_const(clock);
+- fbpp.full = dfixed_const(bpp);
+- tmp.full = dfixed_const(8);
+- fbpp.full = dfixed_div(fbpp, tmp);
+-
+- result.full = dfixed_mul(pix_bw, fbpp);
+- margin.full = dfixed_const(54);
+- tmp.full = dfixed_const(64);
+- margin.full = dfixed_div(margin, tmp);
+- result.full = dfixed_div(result, margin);
+-
+- margin.full = dfixed_const(1006);
+- tmp.full = dfixed_const(1000);
+- margin.full = dfixed_div(margin, tmp);
+- result.full = dfixed_mul(result, margin);
+-
+- result.full = dfixed_div(result, tmp);
+- result.full = dfixed_ceil(result);
+- res = dfixed_trunc(result);
+- return res;
++ u64 kbps;
++ s64 peak_kbps;
++ u32 numerator;
++ u32 denominator;
++
++ kbps = clock * bpp;
++
++ /*
++ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
++ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
++ * common multiplier to render an integer PBN for all link rate/lane
++ * counts combinations
++ * calculate
++ * peak_kbps *= (1006/1000)
++ * peak_kbps *= (64/54)
++ * peak_kbps *= 8 convert to bytes
++ */
++
++ numerator = 64 * 1006;
++ denominator = 54 * 8 * 1000 * 1000;
++
++ kbps *= numerator;
++ peak_kbps = drm_fixp_from_fraction(kbps, denominator);
++
++ return drm_fixp2int_ceil(peak_kbps);
+ }
+ EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
+ {
+ int ret;
+ ret = drm_dp_calc_pbn_mode(154000, 30);
+- if (ret != 689)
++ if (ret != 689) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 154000, 30, 689, ret);
+ return -EINVAL;
++ }
+ ret = drm_dp_calc_pbn_mode(234000, 30);
+- if (ret != 1047)
++ if (ret != 1047) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 234000, 30, 1047, ret);
++ return -EINVAL;
++ }
++ ret = drm_dp_calc_pbn_mode(297000, 24);
++ if (ret != 1063) {
++ DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++ 297000, 24, 1063, ret);
+ return -EINVAL;
++ }
+ return 0;
+ }
+
+@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
+ mutex_unlock(&mgr->qlock);
+ }
+
++static void drm_dp_free_mst_port(struct kref *kref)
++{
++ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
++ kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
++ kfree(port);
++}
++
+ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ {
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ list_del(&port->next);
+ mutex_unlock(&mgr->destroy_connector_lock);
+
++ kref_init(&port->kref);
++ INIT_LIST_HEAD(&port->next);
++
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+- if (!port->input && port->vcpi.vcpi > 0)
+- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+- kfree(port);
++ if (!port->input && port->vcpi.vcpi > 0) {
++ if (mgr->mst_state) {
++ drm_dp_mst_reset_vcpi_slots(mgr, port);
++ drm_dp_update_payload_part1(mgr);
++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++ }
++ }
++
++ kref_put(&port->kref, drm_dp_free_mst_port);
+ send_hotplug = true;
+ }
+ if (send_hotplug)
+@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
+ mutex_init(&mgr->destroy_connector_lock);
+- INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 607f493..8090989 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
+ diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
+ }
+
++ /*
++ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
++ * interval? If so then vblank irqs keep running and it will likely
++ * happen that the hardware vblank counter is not trustworthy as it
++ * might reset at some point in that interval and vblank timestamps
++ * are not trustworthy either in that interval. Iow. this can result
++ * in a bogus diff >> 1 which must be avoided as it would cause
++ * random large forward jumps of the software vblank counter.
++ */
++ if (diff > 1 && (vblank->inmodeset & 0x2)) {
++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
++ " due to pre-modeset.\n", pipe, diff);
++ diff = 1;
++ }
++
++ /*
++ * FIMXE: Need to replace this hack with proper seqlocks.
++ *
++ * Restrict the bump of the software vblank counter to a safe maximum
++ * value of +1 whenever there is the possibility that concurrent readers
++ * of vblank timestamps could be active at the moment, as the current
++ * implementation of the timestamp caching and updating is not safe
++ * against concurrent readers for calls to store_vblank() with a bump
++ * of anything but +1. A bump != 1 would very likely return corrupted
++ * timestamps to userspace, because the same slot in the cache could
++ * be concurrently written by store_vblank() and read by one of those
++ * readers without the read-retry logic detecting the collision.
++ *
++ * Concurrent readers can exist when we are called from the
++ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
++ * irq callers. However, all those calls to us are happening with the
++ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
++ * can't increase while we are executing. Therefore a zero refcount at
++ * this point is safe for arbitrary counter bumps if we are called
++ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
++ * we must also accept a refcount of 1, as whenever we are called from
++ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
++ * we must let that one pass through in order to not lose vblank counts
++ * during vblank irq off - which would completely defeat the whole
++ * point of this routine.
++ *
++ * Whenever we are called from vblank irq, we have to assume concurrent
++ * readers exist or can show up any time during our execution, even if
++ * the refcount is currently zero, as vblank irqs are usually only
++ * enabled due to the presence of readers, and because when we are called
++ * from vblank irq we can't hold the vbl_lock to protect us from sudden
++ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
++ * called from vblank irq.
++ */
++ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
++ (flags & DRM_CALLED_FROM_VBLIRQ))) {
++ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
++ "refcount %u, vblirq %u\n", pipe, diff,
++ atomic_read(&vblank->refcount),
++ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
++ diff = 1;
++ }
++
+ DRM_DEBUG_VBL("updating vblank count on crtc %u:"
+ " current=%u, diff=%u, hw=%u hw_last=%u\n",
+ pipe, vblank->count, diff, cur_vblank, vblank->last);
+@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ spin_lock(&dev->vbl_lock);
+- vblank_disable_and_save(dev, pipe);
++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
++ pipe, vblank->enabled, vblank->inmodeset);
++
++ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
++ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
++ vblank_disable_and_save(dev, pipe);
++
+ wake_up(&vblank->queue);
+
+ /*
+@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+ return;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
++ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
++ pipe, vblank->enabled, vblank->inmodeset);
++
+ /* Drop our private "prevent drm_vblank_get" refcount */
+ if (vblank->inmodeset) {
+ atomic_dec(&vblank->refcount);
+@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+ * re-enable interrupts if there are users left, or the
+ * user wishes vblank interrupts to be enabled all the time.
+ */
+- if (atomic_read(&vblank->refcount) != 0 ||
+- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
++ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ WARN_ON(drm_vblank_enable(dev, pipe));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+ if (vblank->inmodeset) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = true;
++ drm_reset_vblank_timestamp(dev, pipe);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ if (vblank->inmodeset & 0x2)
+diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
+index c707fa6..e3bdc8b 100644
+--- a/drivers/gpu/drm/gma500/gem.c
++++ b/drivers/gpu/drm/gma500/gem.c
+@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
+ return ret;
+ }
+ /* We have the initial and handle reference but need only one now */
+- drm_gem_object_unreference(&r->gem);
++ drm_gem_object_unreference_unlocked(&r->gem);
+ *handlep = handle;
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index b4741d1..61fcb3b 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
+ if (ret)
+ goto cleanup_gem_stolen;
+
++ intel_setup_gmbus(dev);
++
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
+ intel_modeset_init(dev);
+@@ -451,6 +453,7 @@ cleanup_gem:
+ cleanup_irq:
+ intel_guc_ucode_fini(dev);
+ drm_irq_uninstall(dev);
++ intel_teardown_gmbus(dev);
+ cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
+ cleanup_vga_switcheroo:
+@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+- intel_setup_gmbus(dev);
+ intel_opregion_setup(dev);
+
+ i915_gem_load(dev);
+@@ -1099,7 +1101,6 @@ out_gem_unload:
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+- intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
+
+ intel_csr_ucode_fini(dev);
+
+- intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
+
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
+index 02ceb7a..0433d25 100644
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
+ i915_gem_context_unreference(lctx);
+ ring->last_context = NULL;
+ }
++
++ /* Force the GPU state to be reinitialised on enabling */
++ if (ring->default_context)
++ ring->default_context->legacy_hw_ctx.initialized = false;
+ }
+ }
+
+@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
+ if (ret)
+ goto unpin_out;
+
+- if (!to->legacy_hw_ctx.initialized) {
++ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
+ hw_flags |= MI_RESTORE_INHIBIT;
+ /* NB: If we inhibit the restore, the context is not allowed to
+ * die because future work may end up depending on valid address
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 0d228f9..0f42a27 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
+ spt_irq_handler(dev, pch_iir);
+ else
+ cpt_irq_handler(dev, pch_iir);
+- } else
+- DRM_ERROR("The master control interrupt lied (SDE)!\n");
+-
++ } else {
++ /*
++ * Like on previous PCH there seems to be something
++ * fishy going on with forwarding PCH interrupts.
++ */
++ DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
++ }
+ }
+
+ I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index a6752a6..7e6158b 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
++ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
++ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
+ switch (crtc_state->port_clock / 2) {
+ case 81000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 32cf973..f859a5b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
+ pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ }
+
+- /* Clamp bpp to 8 on screens without EDID 1.4 */
+- if (connector->base.display_info.bpc == 0 && bpp > 24) {
+- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+- bpp);
+- pipe_config->pipe_bpp = 24;
++ /* Clamp bpp to default limit on screens without EDID 1.4 */
++ if (connector->base.display_info.bpc == 0) {
++ int type = connector->base.connector_type;
++ int clamp_bpp = 24;
++
++ /* Fall back to 18 bpp when DP sink capability is unknown. */
++ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++ type == DRM_MODE_CONNECTOR_eDP)
++ clamp_bpp = 18;
++
++ if (bpp > clamp_bpp) {
++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++ bpp, clamp_bpp);
++ pipe_config->pipe_bpp = clamp_bpp;
++ }
+ }
+ }
+
+@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ bool can_position = false;
+
+- /* use scaler when colorkey is not required */
+- if (INTEL_INFO(plane->dev)->gen >= 9 &&
+- state->ckey.flags == I915_SET_COLORKEY_NONE) {
+- min_scale = 1;
+- max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
++ if (INTEL_INFO(plane->dev)->gen >= 9) {
++ /* use scaler when colorkey is not required */
++ if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
++ min_scale = 1;
++ max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
++ }
+ can_position = true;
+ }
+
+@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ mutex_lock(&dev->struct_mutex);
+ intel_cleanup_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
++
++ intel_teardown_gmbus(dev);
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+index a5e99ac..a8912ae 100644
+--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ gpio = *data++;
+
+ /* pull up/down */
+- action = *data++;
++ action = *data++ & 1;
++
++ if (gpio >= ARRAY_SIZE(gtable)) {
++ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
++ goto out;
++ }
+
+ function = gtable[gpio].function_reg;
+ pad = gtable[gpio].pad_reg;
+@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ vlv_gpio_nc_write(dev_priv, pad, val);
+ mutex_unlock(&dev_priv->sb_lock);
+
++out:
+ return data;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
+index b177857..d7a6437 100644
+--- a/drivers/gpu/drm/i915/intel_hotplug.c
++++ b/drivers/gpu/drm/i915/intel_hotplug.c
+@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ connector->polled = intel_connector->polled;
+- if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+- connector->polled = DRM_CONNECTOR_POLL_HPD;
++
++ /* MST has a dynamic intel_connector->encoder and it's reprobing
++ * is all handled by the MST helpers. */
+ if (intel_connector->mst_port)
++ continue;
++
++ if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
++ intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index 8324654..f3bee54 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
+ return 0;
+
+ err:
+- while (--pin) {
++ while (pin--) {
+ if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ continue;
+
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 88e12bd..d69547a 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 9461a23..f6b2a81 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+ if (invalidate_domains) {
+@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ }
+ if (invalidate_domains) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 2e7cbe9..2a5ed74 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+
++ mutex_lock(&drm->dev->mode_config.mutex);
+ if (plugged)
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ else
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ mutex_unlock(&drm->dev->mode_config.mutex);
++
+ drm_helper_hpd_irq_event(connector->dev);
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 64c8d93..58a3f7c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
+ nv_crtc->lut.depth = 0;
+ }
+
+- /* Make sure that drm and hw vblank irqs get resumed if needed. */
+- for (head = 0; head < dev->mode_config.num_crtc; head++)
+- drm_vblank_on(dev, head);
+-
+ /* This should ensure we don't hit a locking problem when someone
+ * wakes us up via a connector. We should never go into suspend
+ * while the display is on anyways.
+@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
+
+ drm_helper_resume_force_mode(dev);
+
++ /* Make sure that drm and hw vblank irqs get resumed if needed. */
++ for (head = 0; head < dev->mode_config.num_crtc; head++)
++ drm_vblank_on(dev, head);
++
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
+index 60e32c4..35ecc0d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
++++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
+@@ -24,7 +24,7 @@
+ static int nouveau_platform_probe(struct platform_device *pdev)
+ {
+ const struct nvkm_device_tegra_func *func;
+- struct nvkm_device *device;
++ struct nvkm_device *device = NULL;
+ struct drm_device *drm;
+ int ret;
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+index 7f8a427..e7e581d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
+
+ if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+ return -ENOMEM;
+- *pdevice = &tdev->device;
++
+ tdev->func = func;
+ tdev->pdev = pdev;
+ tdev->irq = -1;
+
+ tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+- if (IS_ERR(tdev->vdd))
+- return PTR_ERR(tdev->vdd);
++ if (IS_ERR(tdev->vdd)) {
++ ret = PTR_ERR(tdev->vdd);
++ goto free;
++ }
+
+ tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+- if (IS_ERR(tdev->rst))
+- return PTR_ERR(tdev->rst);
++ if (IS_ERR(tdev->rst)) {
++ ret = PTR_ERR(tdev->rst);
++ goto free;
++ }
+
+ tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+- if (IS_ERR(tdev->clk))
+- return PTR_ERR(tdev->clk);
++ if (IS_ERR(tdev->clk)) {
++ ret = PTR_ERR(tdev->clk);
++ goto free;
++ }
+
+ tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+- if (IS_ERR(tdev->clk_pwr))
+- return PTR_ERR(tdev->clk_pwr);
++ if (IS_ERR(tdev->clk_pwr)) {
++ ret = PTR_ERR(tdev->clk_pwr);
++ goto free;
++ }
+
+ nvkm_device_tegra_probe_iommu(tdev);
+
+ ret = nvkm_device_tegra_power_up(tdev);
+ if (ret)
+- return ret;
++ goto remove;
+
+ tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
+ cfg, dbg, detect, mmio, subdev_mask,
+ &tdev->device);
+ if (ret)
+- return ret;
++ goto powerdown;
++
++ *pdevice = &tdev->device;
+
+ return 0;
++
++powerdown:
++ nvkm_device_tegra_power_down(tdev);
++remove:
++ nvkm_device_tegra_remove_iommu(tdev);
++free:
++ kfree(tdev);
++ return ret;
+ }
+ #else
+ int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+index 74e2f7c..9688970 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
+ .outp = outp,
+ }, *dp = &_dp;
+ u32 datarate = 0;
++ u8 pwr;
+ int ret;
+
+ if (!outp->base.info.location && disp->func->sor.magic)
+@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
+ /* disable link interrupt handling during link training */
+ nvkm_notify_put(&outp->irq);
+
++ /* ensure sink is not in a low-power state */
++ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
++ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
++ pwr &= ~DPCD_SC00_SET_POWER;
++ pwr |= DPCD_SC00_SET_POWER_D0;
++ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
++ }
++ }
++
+ /* enable down-spreading and execute pre-train script from vbios */
+ dp_link_train_init(dp, outp->dpcd[3] & 0x01);
+
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+index 9596290..6e10c5e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+@@ -71,5 +71,11 @@
+ #define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
+ #define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+
++/* DPCD Sink Control */
++#define DPCD_SC00 0x00600
++#define DPCD_SC00_SET_POWER 0x03
++#define DPCD_SC00_SET_POWER_D0 0x01
++#define DPCD_SC00_SET_POWER_D3 0x03
++
+ void nvkm_dp_train(struct work_struct *);
+ #endif
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 2ae8577..7c2e782 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ cmd->command_size))
+ return -EFAULT;
+
+- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++ reloc_info = kmalloc_array(cmd->relocs_num,
++ sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 7520727..367a916 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE8(rdev)) {
++ unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++ div = radeon_audio_decode_dfs_div(div);
++
++ if (div)
++ clock = clock * 100 / div;
++
+ WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 9953356..3cf04a2 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
++ if (ASIC_IS_DCE41(rdev)) {
++ unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
++ DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++ DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++ div = radeon_audio_decode_dfs_div(div);
++
++ if (div)
++ clock = 100 * clock / div;
++ }
++
+ WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 4aa5f75..13b6029 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -511,6 +511,11 @@
+ #define DCCG_AUDIO_DTO1_CNTL 0x05cc
+ # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+
++#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
++
+ /* DCE 4.0 AFMT */
+ #define HDMI_CONTROL 0x7030
+ # define HDMI_KEEPOUT_MODE (1 << 0)
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 87db649..5580568 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -268,6 +268,7 @@ struct radeon_clock {
+ uint32_t current_dispclk;
+ uint32_t dp_extclk;
+ uint32_t max_pixel_clock;
++ uint32_t vco_freq;
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 8f28524..de9a2ff 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++ if (((dev->pdev->device == 0x9802) ||
++ (dev->pdev->device == 0x9805) ||
++ (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+ }
+
+- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+- if ((dev->pdev->device == 0x9805) &&
+- (dev->pdev->subsystem_vendor == 0x1734) &&
+- (dev->pdev->subsystem_device == 0x11bd)) {
+- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+- return false;
+- }
+-
+ return true;
+ }
+
+@@ -1112,6 +1106,31 @@ union firmware_info {
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
+ };
+
++union igp_info {
++ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
++ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
++};
++
++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
++{
++ struct radeon_mode_info *mode_info = &rdev->mode_info;
++ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++ union igp_info *igp_info;
++ u8 frev, crev;
++ u16 data_offset;
++
++ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
++ &frev, &crev, &data_offset)) {
++ igp_info = (union igp_info *)(mode_info->atom_context->bios +
++ data_offset);
++ rdev->clock.vco_freq =
++ le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
++ }
++}
++
+ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ {
+ struct radeon_device *rdev = dev->dev_private;
+@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ rdev->mode_info.firmware_flags =
+ le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
++ if (ASIC_IS_DCE8(rdev))
++ rdev->clock.vco_freq =
++ le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
++ else if (ASIC_IS_DCE5(rdev))
++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
++ else if (ASIC_IS_DCE41(rdev))
++ radeon_atombios_get_dentist_vco_freq(rdev);
++ else
++ rdev->clock.vco_freq = rdev->clock.current_dispclk;
++
++ if (rdev->clock.vco_freq == 0)
++ rdev->clock.vco_freq = 360000; /* 3.6 GHz */
++
+ return true;
+ }
+
+ return false;
+ }
+
+-union igp_info {
+- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+-};
+-
+ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+ {
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 2c02e99..b214663 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+- struct radeon_connector_atom_dig *dig_connector =
+- radeon_connector->con_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ radeon_audio_write_speaker_allocation(encoder);
+ radeon_audio_write_sad_regs(encoder);
+ radeon_audio_write_latency_fields(encoder, mode);
+- if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+- radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+- else
+- radeon_audio_set_dto(encoder, dig_connector->dp_clock);
++ radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
+ radeon_audio_set_audio_packet(encoder);
+ radeon_audio_select_pin(encoder);
+
+@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+ if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+ radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+ }
++
++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
++{
++ if (div >= 8 && div < 64)
++ return (div - 8) * 25 + 200;
++ else if (div >= 64 && div < 96)
++ return (div - 64) * 50 + 1600;
++ else if (div >= 96 && div < 128)
++ return (div - 96) * 100 + 3200;
++ else
++ return 0;
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index 059cc30..5c70cce 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+ void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
+
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c566993..d690df5 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ }
+
+ drm_kms_helper_poll_enable(dev);
++ drm_helper_hpd_irq_event(dev);
+
+ /* set the power state here in case we are a PX system or headless */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 1eca0ac..13767d2 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ struct drm_crtc *crtc = &radeon_crtc->base;
+ unsigned long flags;
+ int r;
+- int vpos, hpos, stat, min_udelay;
++ int vpos, hpos, stat, min_udelay = 0;
++ unsigned repcnt = 4;
+ struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+
+ down_read(&rdev->exclusive_lock);
+@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ * In practice this won't execute very often unless on very fast
+ * machines because the time window for this to happen is very small.
+ */
+- for (;;) {
++ while (radeon_crtc->enabled && repcnt--) {
+ /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ * start in hpos, and to the "fudged earlier" vblank start in
+ * vpos.
+@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ /* Sleep at least until estimated real start of hw vblank */
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
++ if (min_udelay > vblank->framedur_ns / 2000) {
++ /* Don't wait ridiculously long - something is wrong */
++ repcnt = 0;
++ break;
++ }
+ usleep_range(min_udelay, 2 * min_udelay);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ };
+
++ if (!repcnt)
++ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
++ "framedur %d, linedur %d, stat %d, vpos %d, "
++ "hpos %d\n", work->crtc_id, min_udelay,
++ vblank->framedur_ns / 1000,
++ vblank->linedur_ns / 1000, stat, vpos, hpos);
++
+ /* do the flip (mmio) */
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 84d4563..fb6ad14 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
++#include <drm/drm_cache.h>
+ #include "radeon.h"
+ #include "radeon_trace.h"
+
+@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
+ DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ "better performance thanks to write-combining\n");
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
++#else
++ /* For architectures that don't support WC memory,
++ * mask out the WC flag from the BO
++ */
++ if (!drm_arch_can_wc_memory())
++ bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+
+ radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 59abebd..2081a60 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1075,8 +1075,6 @@ force:
+
+ /* update display watermarks based on new power state */
+ radeon_bandwidth_update(rdev);
+- /* update displays */
+- radeon_dpm_display_configuration_changed(rdev);
+
+ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+@@ -1097,6 +1095,9 @@ force:
+
+ radeon_dpm_post_set_power_state(rdev);
+
++ /* update displays */
++ radeon_dpm_display_configuration_changed(rdev);
++
+ if (rdev->asic->dpm.force_performance_level) {
+ if (rdev->pm.dpm.thermal_active) {
+ enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index c507896..197b157 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
++ radeon_fence_ref(fences[i]);
++
+ spin_unlock(&sa_manager->wq.lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
++ radeon_fence_unref(&fences[i]);
+ spin_lock(&sa_manager->wq.lock);
+ /* if we have nothing to wait for block */
+ if (r == -ENOENT) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index e343074..e06ac54 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+- while (--i) {
++ while (i--) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 48d97c0..3979632 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+- eoffset = soffset + size;
++ eoffset = soffset + size - 1;
+ if (soffset >= eoffset) {
+ r = -EINVAL;
+ goto error_unreserve;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+- if (last_pfn > rdev->vm_manager.max_pfn) {
+- dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
++ if (last_pfn >= rdev->vm_manager.max_pfn) {
++ dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ r = -EINVAL;
+ goto error_unreserve;
+@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
++ it = interval_tree_iter_first(&vm->va, soffset, eoffset);
+ if (it && it != &bo_va->it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ if (soffset || eoffset) {
+ spin_lock(&vm->status_lock);
+ bo_va->it.start = soffset;
+- bo_va->it.last = eoffset - 1;
++ bo_va->it.last = eoffset;
+ list_add(&bo_va->vm_status, &vm->cleared);
+ spin_unlock(&vm->status_lock);
+ interval_tree_insert(&bo_va->it, &vm->va);
+@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ unsigned i;
+
+ start >>= radeon_vm_block_size;
+- end >>= radeon_vm_block_size;
++ end = (end - 1) >> radeon_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+ radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 4c4a721..d1a7b58 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -915,6 +915,11 @@
+ #define DCCG_AUDIO_DTO1_PHASE 0x05c0
+ #define DCCG_AUDIO_DTO1_MODULE 0x05c4
+
++#define DENTIST_DISPCLK_CNTL 0x0490
++# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
++# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
++
+ #define AFMT_AUDIO_SRC_CONTROL 0x713c
+ #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+ /* AFMT_AUDIO_SRC_SELECT
+diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
+index 07a0d37..a01efe3 100644
+--- a/drivers/gpu/drm/radeon/vce_v1_0.c
++++ b/drivers/gpu/drm/radeon/vce_v1_0.c
+@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
+ return -EINVAL;
+ }
+
+- for (i = 0; i < sign->num; ++i) {
+- if (sign->val[i].chip_id == chip_id)
++ for (i = 0; i < le32_to_cpu(sign->num); ++i) {
++ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ break;
+ }
+
+- if (i == sign->num)
++ if (i == le32_to_cpu(sign->num))
+ return -EINVAL;
+
+ data += (256 - 64) / 4;
+@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
+ data[1] = sign->val[i].nonce[1];
+ data[2] = sign->val[i].nonce[2];
+ data[3] = sign->val[i].nonce[3];
+- data[4] = sign->len + 64;
++ data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
+
+ memset(&data[5], 0, 44);
+ memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
+
+- data += data[4] / 4;
++ data += le32_to_cpu(data[4]) / 4;
+ data[0] = sign->val[i].sigval[0];
+ data[1] = sign->val[i].sigval[1];
+ data[2] = sign->val[i].sigval[2];
+ data[3] = sign->val[i].sigval[3];
+
+- rdev->vce.keyselect = sign->val[i].keyselect;
++ rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+index 6377e81..67cebb2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+ {
+ struct vmw_cmdbuf_man *man = header->man;
+
+- BUG_ON(!spin_is_locked(&man->lock));
++ lockdep_assert_held_once(&man->lock);
+
+ if (header->inline_space) {
+ vmw_cmdbuf_header_inline_free(header);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index c49812b..24fb348 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+ *
+ **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++ if (vgacon_text_force())
++ return -EINVAL;
++#endif
++
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
+ if (ret)
+ DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 9b4bb9e..7c2e118 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ uint32_t format;
+ struct drm_vmw_size content_base_size;
+ struct vmw_resource *res;
++ unsigned int bytes_pp;
+ int ret;
+
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ format = SVGA3D_X8R8G8B8;
++ bytes_pp = 4;
+ break;
+
+ case 16:
+ case 15:
+ format = SVGA3D_R5G6B5;
++ bytes_pp = 2;
+ break;
+
+ case 8:
+ format = SVGA3D_P8;
++ bytes_pp = 1;
+ break;
+
+ default:
+@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ return -EINVAL;
+ }
+
+- content_base_size.width = mode_cmd->width;
++ content_base_size.width = mode_cmd->pitch / bytes_pp;
+ content_base_size.height = mode_cmd->height;
+ content_base_size.depth = 1;
+
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index c4dcab0..9098f13 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
+ * on the ring. We will not signal if more data is
+ * to be placed.
+ *
++ * Based on the channel signal state, we will decide
++ * which signaling policy will be applied.
++ *
+ * If we cannot write to the ring-buffer; signal the host
+ * even if we may not have written anything. This is a rare
+ * enough condition that it should not matter.
+ */
++
++ if (channel->signal_policy)
++ signal = true;
++ else
++ kick_q = true;
++
+ if (((ret == 0) && kick_q && signal) || (ret))
+ vmbus_setevent(channel);
+
+@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ * on the ring. We will not signal if more data is
+ * to be placed.
+ *
++ * Based on the channel signal state, we will decide
++ * which signaling policy will be applied.
++ *
+ * If we cannot write to the ring-buffer; signal the host
+ * even if we may not have written anything. This is a rare
+ * enough condition that it should not matter.
+ */
++
++ if (channel->signal_policy)
++ signal = true;
++ else
++ kick_q = true;
++
+ if (((ret == 0) && kick_q && signal) || (ret))
+ vmbus_setevent(channel);
+
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index f155b83..2b3105c 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ unsigned int pga = data->channel_data[channel].pga;
+ int fullscale = fullscale_table[pga];
+- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+
+ return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index c848789..c43318d 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ {
+ /*
++ * CPU fan speed going up and down on Dell Studio XPS 8000
++ * for unknown reasons.
++ */
++ .ident = "Dell Studio XPS 8000",
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
++ },
++ },
++ {
++ /*
+ * CPU fan speed going up and down on Dell Studio XPS 8100
+ * for unknown reasons.
+ */
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index 82de3de..685568b 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+ {
+ struct gpio_fan_data *fan_data = cdev->devdata;
+- int r;
+
+ if (!fan_data)
+ return -EINVAL;
+
+- r = get_fan_speed_index(fan_data);
+- if (r < 0)
+- return r;
+-
+- *state = r;
++ *state = fan_data->speed_index;
+ return 0;
+ }
+
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index e254921..93738df 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
+ to_match = data;
+ i_csdev = to_coresight_device(dev);
+
+- if (!strcmp(to_match, dev_name(&i_csdev->dev)))
++ if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
+ return 1;
+
+ return 0;
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index f62d697..27fa0cb 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ switch (dev->device) {
+ case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+ case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
++ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+ case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
+ priv->features |= FEATURE_I2C_BLOCK_READ;
+ priv->features |= FEATURE_IRQ;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 0a26dd6..d6d2b35 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
+ wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
+
+ /* Check if the device started its remove_one */
+- spin_lock_irq(&cm.lock);
++ spin_lock_irqsave(&cm.lock, flags);
+ if (!cm_dev->going_down)
+ queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ msecs_to_jiffies(wait_time));
+- spin_unlock_irq(&cm.lock);
++ spin_unlock_irqrestore(&cm.lock, flags);
+
+ cm_id_priv->timewait_info = NULL;
+ }
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 2d762a2..17a15c5 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
+ if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
+ return ret;
+
+- if (dev_type == ARPHRD_ETHER)
++ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
+ ndev = dev_get_by_index(&init_net, bound_if_index);
+
+ ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index cb78b1e..f504ba7 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
+ error = l2t_send(tdev, skb, l2e);
+ if (error < 0)
+ kfree_skb(skb);
+- return error;
++ return error < 0 ? error : 0;
+ }
+
+ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+ error = cxgb3_ofld_send(tdev, skb);
+ if (error < 0)
+ kfree_skb(skb);
+- return error;
++ return error < 0 ? error : 0;
+ }
+
+ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 7e97cb5..c4e0915 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_sge_rd = props->max_sge;
+ props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+- props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
++ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+ props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 40f85bb..3eff35c 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
+ 32768 /* 1E */
+ };
+
+-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
++ gfp_t gfp)
+ {
+- unsigned long page = get_zeroed_page(GFP_KERNEL);
++ unsigned long page = get_zeroed_page(gfp);
+
+ /*
+ * Free the page if someone raced with us installing it.
+@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+ * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+ */
+ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+- enum ib_qp_type type, u8 port)
++ enum ib_qp_type type, u8 port, gfp_t gfp)
+ {
+ u32 i, offset, max_scan, qpn;
+ struct qpn_map *map;
+@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+- get_map_page(qpt, map);
++ get_map_page(qpt, map, gfp);
+ if (unlikely(!map->page))
+ break;
+ }
+@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret;
++ gfp_t gfp;
++
+
+ if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
+- init_attr->create_flags) {
+- ret = ERR_PTR(-EINVAL);
+- goto bail;
+- }
++ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
++ return ERR_PTR(-EINVAL);
++
++ /* GFP_NOIO is applicable in RC QPs only */
++ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
++ init_attr->qp_type != IB_QPT_RC)
++ return ERR_PTR(-EINVAL);
++
++ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
++ GFP_NOIO : GFP_KERNEL;
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ sz = sizeof(struct qib_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct qib_swqe);
+- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
++ swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
++ gfp, PAGE_KERNEL);
+ if (swq == NULL) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
++ qp = kzalloc(sz + sg_list_sz, gfp);
+ if (!qp) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_swq;
+ }
+ RCU_INIT_POINTER(qp->next, NULL);
+- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
++ qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
+ if (!qp->s_hdr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct qib_rwqe);
+- qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+- qp->r_rq.size * sz);
++ if (gfp != GFP_NOIO)
++ qp->r_rq.wq = vmalloc_user(
++ sizeof(struct qib_rwq) +
++ qp->r_rq.size * sz);
++ else
++ qp->r_rq.wq = __vmalloc(
++ sizeof(struct qib_rwq) +
++ qp->r_rq.size * sz,
++ gfp, PAGE_KERNEL);
++
+ if (!qp->r_rq.wq) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qp;
+@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ dev = to_idev(ibpd->device);
+ dd = dd_from_dev(dev);
+ err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+- init_attr->port_num);
++ init_attr->port_num, gfp);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ vfree(qp->r_rq.wq);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index f8ea069..b2fb528 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ struct qib_mcast *mcast = NULL;
+- struct qib_mcast_qp *p, *tmp;
++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ struct rb_node *n;
+ int last = 0;
+ int ret;
+
+- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+- ret = -EINVAL;
+- goto bail;
+- }
++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++ return -EINVAL;
+
+ spin_lock_irq(&ibp->lock);
+
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ while (1) {
+ if (n == NULL) {
+ spin_unlock_irq(&ibp->lock);
+- ret = -EINVAL;
+- goto bail;
++ return -EINVAL;
+ }
+
+ mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ */
+ list_del_rcu(&p->list);
+ mcast->n_attached--;
++ delp = p;
+
+ /* If this was the last attached QP, remove the GID too. */
+ if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ }
+
+ spin_unlock_irq(&ibp->lock);
++ /* QP not attached */
++ if (!delp)
++ return -EINVAL;
++ /*
++ * Wait for any list walkers to finish before freeing the
++ * list element.
++ */
++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++ qib_mcast_qp_free(delp);
+
+- if (p) {
+- /*
+- * Wait for any list walkers to finish before freeing the
+- * list element.
+- */
+- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+- qib_mcast_qp_free(p);
+- }
+ if (last) {
+ atomic_dec(&mcast->refcount);
+ wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ dev->n_mcast_grps_allocated--;
+ spin_unlock_irq(&dev->n_mcast_grps_lock);
+ }
+-
+- ret = 0;
+-
+-bail:
+- return ret;
++ return 0;
+ }
+
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index b12a5d5..37199b9 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
+ priority > AT91_AIC_IRQ_MAX_PRIORITY)
+ return -EINVAL;
+
+- *val &= AT91_AIC_PRIOR;
++ *val &= ~AT91_AIC_PRIOR;
+ *val |= priority;
+
+ return 0;
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index e23d1d1..a159529f 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
+ lpi_set_config(d, true);
+ }
+
+-static void its_eoi_irq(struct irq_data *d)
+-{
+- gic_write_eoir(d->hwirq);
+-}
+-
+ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+ {
+@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
+ .name = "ITS",
+ .irq_mask = its_mask_irq,
+ .irq_unmask = its_unmask_irq,
+- .irq_eoi = its_eoi_irq,
++ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_set_affinity,
+ .irq_compose_msi_msg = its_irq_compose_msi_msg,
+ };
+diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
+index c22e2d4..efe5084 100644
+--- a/drivers/irqchip/irq-mxs.c
++++ b/drivers/irqchip/irq-mxs.c
+@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
+ writel(0, icoll_priv.intr + i);
+
+ icoll_add_domain(np, ASM9260_NUM_IRQS);
++ set_handle_irq(icoll_handle_irq);
+
+ return 0;
+ }
+diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
+index 8587d0f..f6cb1b8 100644
+--- a/drivers/irqchip/irq-omap-intc.c
++++ b/drivers/irqchip/irq-omap-intc.c
+@@ -47,6 +47,7 @@
+ #define INTC_ILR0 0x0100
+
+ #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
++#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
+ #define INTCPS_NR_ILR_REGS 128
+ #define INTCPS_NR_MIR_REGS 4
+
+@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
+ static asmlinkage void __exception_irq_entry
+ omap_intc_handle_irq(struct pt_regs *regs)
+ {
++ extern unsigned long irq_err_count;
+ u32 irqnr;
+
+ irqnr = intc_readl(INTC_SIR);
++
++ /*
++ * A spurious IRQ can result if interrupt that triggered the
++ * sorting is no longer active during the sorting (10 INTC
++ * functional clock cycles after interrupt assertion). Or a
++ * change in interrupt mask affected the result during sorting
++ * time. There is no special handling required except ignoring
++ * the SIR register value just read and retrying.
++ * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
++ *
++ * Many a times, a spurious interrupt situation has been fixed
++ * by adding a flush for the posted write acking the IRQ in
++ * the device driver. Typically, this is going be the device
++ * driver whose interrupt was handled just before the spurious
++ * IRQ occurred. Pay attention to those device drivers if you
++ * run into hitting the spurious IRQ condition below.
++ */
++ if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
++ pr_err_once("%s: spurious irq!\n", __func__);
++ irq_err_count++;
++ omap_ack_irq(NULL);
++ return;
++ }
++
+ irqnr &= ACTIVEIRQ_MASK;
+- WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
+ handle_domain_irq(domain, irqnr, regs);
+ }
+
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 83392f8..22b9e34 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
+ do {
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
++ cond_resched();
+
+ if (ret && ret != -EAGAIN)
+ pr_warn("gc failed!");
+@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ rw_lock(true, b, b->level);
+
+ if (b->key.ptr[0] != btree_ptr ||
+- b->seq != seq + 1)
++ b->seq != seq + 1) {
++ op->lock = b->level;
+ goto out;
++ }
+ }
+
+ SET_KEY_PTRS(check_key, 1);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 679a093..8d0ead9 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+ sysfs_create_link(&c->kobj, &d->kobj, d->name),
+ "Couldn't create device <-> cache set symlinks");
++
++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
+ }
+
+ static void bcache_device_detach(struct bcache_device *d)
+@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ buf[SB_LABEL_SIZE] = '\0';
+ env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+
+- if (atomic_xchg(&dc->running, 1))
++ if (atomic_xchg(&dc->running, 1)) {
++ kfree(env[1]);
++ kfree(env[2]);
+ return;
++ }
+
+ if (!d->c &&
+ BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ else
+ err = "device busy";
+ mutex_unlock(&bch_register_lock);
++ if (attr == &ksysfs_register_quiet)
++ goto out;
+ }
+ goto err;
+ }
+@@ -1971,8 +1978,7 @@ out:
+ err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ err:
+- if (attr != &ksysfs_register_quiet)
+- pr_info("error opening %s: %s", path, err);
++ pr_info("error opening %s: %s", path, err);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
+ closure_debug_init();
+
+ bcache_major = register_blkdev(0, "bcache");
+- if (bcache_major < 0)
++ if (bcache_major < 0) {
++ unregister_reboot_notifier(&reboot);
+ return bcache_major;
++ }
+
+ if (!(bcache_wq = create_workqueue("bcache")) ||
+ !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index b23f88d..b9346cd 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+
+ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ {
++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
++
++ BUG_ON(KEY_INODE(k) != dc->disk.id);
++
+ return KEY_DIRTY(k);
+ }
+
+@@ -372,11 +376,24 @@ next:
+ }
+ }
+
++/*
++ * Returns true if we scanned the entire disk
++ */
+ static bool refill_dirty(struct cached_dev *dc)
+ {
+ struct keybuf *buf = &dc->writeback_keys;
++ struct bkey start = KEY(dc->disk.id, 0, 0);
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+- bool searched_from_start = false;
++ struct bkey start_pos;
++
++ /*
++ * make sure keybuf pos is inside the range for this disk - at bringup
++ * we might not be attached yet so this disk's inode nr isn't
++ * initialized then
++ */
++ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
++ bkey_cmp(&buf->last_scanned, &end) > 0)
++ buf->last_scanned = start;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
+ return false;
+ }
+
+- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+- buf->last_scanned = KEY(dc->disk.id, 0, 0);
+- searched_from_start = true;
+- }
+-
++ start_pos = buf->last_scanned;
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
++ if (bkey_cmp(&buf->last_scanned, &end) < 0)
++ return false;
++
++ /*
++ * If we get to the end start scanning again from the beginning, and
++ * only scan up to where we initially started scanning from:
++ */
++ buf->last_scanned = start;
++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
++
++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
+ }
+
+ static int bch_writeback_thread(void *arg)
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 0a9dab1..073a042 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+
+ static inline void bch_writeback_queue(struct cached_dev *dc)
+ {
+- wake_up_process(dc->writeback_thread);
++ if (!IS_ERR_OR_NULL(dc->writeback_thread))
++ wake_up_process(dc->writeback_thread);
+ }
+
+ static inline void bch_writeback_add(struct cached_dev *dc)
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index fae34e7..12b5216 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -69,7 +69,7 @@ struct dm_exception_store_type {
+ * Update the metadata with this exception.
+ */
+ void (*commit_exception) (struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context);
+
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 3164b8b..4d39093 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+
+ static void persistent_commit_exception(struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context)
+ {
+@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ struct core_exception ce;
+ struct commit_callback *cb;
+
++ if (!valid)
++ ps->valid = 0;
++
+ ce.old_chunk = e->old_chunk;
+ ce.new_chunk = e->new_chunk;
+ write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 9b7c8c8..4d50a12 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+
+ static void transient_commit_exception(struct dm_exception_store *store,
+- struct dm_exception *e,
++ struct dm_exception *e, int valid,
+ void (*callback) (void *, int success),
+ void *callback_context)
+ {
+ /* Just succeed */
+- callback(callback_context, 1);
++ callback(callback_context, valid);
+ }
+
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index c06b74e..61f184a 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ dm_table_event(s->ti->table);
+ }
+
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++ struct dm_snap_pending_exception *pe = context;
+ struct dm_exception *e;
+ struct dm_snapshot *s = pe->snap;
+ struct bio *origin_bios = NULL;
+@@ -1509,24 +1510,13 @@ out:
+ free_pending_exception(pe);
+ }
+
+-static void commit_callback(void *context, int success)
+-{
+- struct dm_snap_pending_exception *pe = context;
+-
+- pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ struct dm_snapshot *s = pe->snap;
+
+- if (unlikely(pe->copy_error))
+- pending_complete(pe, 0);
+-
+- else
+- /* Update the metadata if we are persistent */
+- s->store->type->commit_exception(s->store, &pe->e,
+- commit_callback, pe);
++ /* Update the metadata if we are persistent */
++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++ pending_complete, pe);
+ }
+
+ /*
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 63903a5..a1cc797 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
+ struct pool_c *pt = ti->private;
+ struct pool *pool = pt->pool;
+
+- cancel_delayed_work(&pool->waker);
+- cancel_delayed_work(&pool->no_space_timeout);
++ cancel_delayed_work_sync(&pool->waker);
++ cancel_delayed_work_sync(&pool->no_space_timeout);
+ flush_workqueue(pool->wq);
+ (void) commit(pool);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 5df4048..dd83492 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
+
+ if (clone)
+ free_rq_clone(clone);
++ else if (!tio->md->queue->mq_ops)
++ free_rq_tio(tio);
+ }
+
+ /*
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index fca6dbc..7e44005 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
+
+ static int brb_pop(struct bop_ring_buffer *brb)
+ {
+- struct block_op *bop;
+-
+ if (brb_empty(brb))
+ return -ENODATA;
+
+- bop = brb->bops + brb->begin;
+ brb->begin = brb_next(brb, brb->begin);
+
+ return 0;
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index c38ef1a..e2a3833 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ __func__, c->delivery_system, fe->ops.info.type);
+
+- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+- * do it, it is done for it. */
+- info->caps |= FE_CAN_INVERSION_AUTO;
++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++ info->caps |= FE_CAN_INVERSION_AUTO;
+ err = 0;
+ break;
+ }
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index 0e209b5..c6abeb4 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ struct tda1004x_state* state = fe->demodulator_priv;
++ int status;
+
+ dprintk("%s\n", __func__);
+
++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++ if (status == -1)
++ return -EIO;
++
++ /* Only update the properties cache if device is locked */
++ if (!(status & 8))
++ return 0;
++
+ // inversion status
+ fe_params->inversion = INVERSION_OFF;
+ if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
+index 7830aef..40f7768 100644
+--- a/drivers/media/rc/sunxi-cir.c
++++ b/drivers/media/rc/sunxi-cir.c
+@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
+ if (!ir)
+ return -ENOMEM;
+
++ spin_lock_init(&ir->ir_lock);
++
+ if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
+ ir->fifo_size = 64;
+ else
+diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
+index ce157ed..0e1ca2b 100644
+--- a/drivers/media/tuners/si2157.c
++++ b/drivers/media/tuners/si2157.c
+@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
+ len = fw->data[fw->size - remaining];
+ if (len > SI2157_ARGLEN) {
+ dev_err(&client->dev, "Bad firmware length\n");
++ ret = -EINVAL;
+ goto err_release_firmware;
+ }
+ memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 146071b..bfff1d1 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+- /* Set requested framerate */
+- sd->frame_rate = tpf->denominator / tpf->numerator;
++ if (tpf->numerator == 0 || tpf->denominator == 0)
++ /* Set default framerate */
++ sd->frame_rate = 30;
++ else
++ /* Set requested framerate */
++ sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ if (gspca_dev->streaming)
+ set_frame_rate(gspca_dev);
+
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index c70ff40..c028a5c 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ int fr, i;
+
+- sd->framerate = tpf->denominator / tpf->numerator;
++ if (tpf->numerator == 0 || tpf->denominator == 0)
++ sd->framerate = 30;
++ else
++ sd->framerate = tpf->denominator / tpf->numerator;
++
+ if (gspca_dev->streaming)
+ setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 27b4b9e..502984c 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ return res | POLLERR;
+
+ /*
+- * For output streams you can write as long as there are fewer buffers
+- * queued than there are buffers available.
++ * For output streams you can call write() as long as there are fewer
++ * buffers queued than there are buffers available.
+ */
+- if (q->is_output && q->queued_count < q->num_buffers)
++ if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
+ return res | POLLOUT | POLLWRNORM;
+
+ if (list_empty(&q->done_list)) {
+diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
+index c241e15..cbd4331 100644
+--- a/drivers/misc/cxl/vphb.c
++++ b/drivers/misc/cxl/vphb.c
+@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ mask <<= shift;
+ val <<= shift;
+
+- v = (in_le32(ioaddr) & ~mask) || (val & mask);
++ v = (in_le32(ioaddr) & ~mask) | (val & mask);
+
+ out_le32(ioaddr, v);
+ return PCIBIOS_SUCCESSFUL;
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index b2f2486..80f9afc 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+ {
+ struct mei_cl *cl = file->private_data;
+
+- return mei_cl_notify_request(cl, file, request);
++ if (request != MEI_HBM_NOTIFICATION_START &&
++ request != MEI_HBM_NOTIFICATION_STOP)
++ return -EINVAL;
++
++ return mei_cl_notify_request(cl, file, (u8)request);
+ }
+
+ /**
+@@ -657,7 +661,9 @@ out:
+ * @file: pointer to file structure
+ * @band: band bitmap
+ *
+- * Return: poll mask
++ * Return: negative on error,
++ * 0 if it did no changes,
++ * and positive a process was added or deleted
+ */
+ static int mei_fasync(int fd, struct file *file, int band)
+ {
+@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
+ struct mei_cl *cl = file->private_data;
+
+ if (!mei_cl_is_connected(cl))
+- return POLLERR;
++ return -ENODEV;
+
+ return fasync_helper(fd, file, band, &cl->ev_async);
+ }
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 3a9a79e..3d5087b 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch card to HS mode */
+- val = EXT_CSD_TIMING_HS |
+- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
++ val = EXT_CSD_TIMING_HS;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time,
+@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+- val = EXT_CSD_TIMING_HS |
+- card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
++ val = EXT_CSD_TIMING_HS;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ val, card->ext_csd.generic_cmd6_time,
+ true, send_status, true);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 141eaa9..967535d 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+- (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+- card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
+- card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
++ (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
++ card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
++ card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ err = mmc_execute_tuning(card);
+
+ /*
+@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ * difference between v3.00 and 3.01 spec means that CMD19
+ * tuning is also available for DDR50 mode.
+ */
+- if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
++ if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
+ pr_warn("%s: ddr50 tuning failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 16d838e..467b3cf 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+ * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ */
+ if (!mmc_host_is_spi(card->host) &&
+- ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
++ ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
++ (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
+ err = mmc_execute_tuning(card);
+ out:
+ return err;
+@@ -630,7 +630,7 @@ try_again:
+ */
+ if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+- ocr);
++ ocr_card);
+ if (err == -EAGAIN) {
+ sdio_reset(host);
+ mmc_go_idle(host);
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index fb26674..acece32 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
+ {
+ .id = 0x00280180,
+ .mask = 0x00ffffff,
+- .data = &variant_u300,
++ .data = &variant_nomadik,
+ },
+ {
+ .id = 0x00480180,
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index ce08896..28a057f 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ goto out;
+ } else {
+- mmc->caps |= host->pdata->gpio_card_ro_invert ?
++ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
+
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index f6047fc..a5cda92 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
+ .ops = &sdhci_acpi_ops_int,
+ };
+
++static int bxt_get_cd(struct mmc_host *mmc)
++{
++ int gpio_cd = mmc_gpio_get_cd(mmc);
++ struct sdhci_host *host = mmc_priv(mmc);
++ unsigned long flags;
++ int ret = 0;
++
++ if (!gpio_cd)
++ return 0;
++
++ pm_runtime_get_sync(mmc->parent);
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->flags & SDHCI_DEVICE_DEAD)
++ goto out;
++
++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
++out:
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ pm_runtime_mark_last_busy(mmc->parent);
++ pm_runtime_put_autosuspend(mmc->parent);
++
++ return ret;
++}
++
+ static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+ {
+@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
+
+ /* Platform specific code during sd probe slot goes here */
+
++ if (hid && !strcmp(hid, "80865ACA"))
++ host->mmc_host_ops.get_cd = bxt_get_cd;
++
+ return 0;
+ }
+
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index cf7ad45..45ee07d 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
+ if (sdhci_pci_spt_drive_strength > 0)
+ drive_strength = sdhci_pci_spt_drive_strength & 0xf;
+ else
+- drive_strength = 1; /* 33-ohm */
++ drive_strength = 0; /* Default 50-ohm */
+
+ if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
+ drive_strength = 0; /* Default 50-ohm */
+@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
+ sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
+ }
+
++static int bxt_get_cd(struct mmc_host *mmc)
++{
++ int gpio_cd = mmc_gpio_get_cd(mmc);
++ struct sdhci_host *host = mmc_priv(mmc);
++ unsigned long flags;
++ int ret = 0;
++
++ if (!gpio_cd)
++ return 0;
++
++ pm_runtime_get_sync(mmc->parent);
++
++ spin_lock_irqsave(&host->lock, flags);
++
++ if (host->flags & SDHCI_DEVICE_DEAD)
++ goto out;
++
++ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
++out:
++ spin_unlock_irqrestore(&host->lock, flags);
++
++ pm_runtime_mark_last_busy(mmc->parent);
++ pm_runtime_put_autosuspend(mmc->parent);
++
++ return ret;
++}
++
+ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ slot->cd_con_id = NULL;
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
++ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
++ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
++ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
++
+ return 0;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index b48565e..8814eb6 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+
+ BUG_ON(len > 65536);
+
+- /* tran, valid */
+- sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
+- desc += host->desc_sz;
++ if (len) {
++ /* tran, valid */
++ sdhci_adma_write_desc(host, desc, addr, len,
++ ADMA2_TRAN_VALID);
++ desc += host->desc_sz;
++ }
+
+ /*
+ * If this triggers then we have a calculation bug
+@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ sdhci_runtime_pm_get(host);
+
+ /* Firstly check card presence */
+- present = sdhci_do_get_cd(host);
++ present = mmc->ops->get_cd(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+- if (host->runtime_suspended || host->bus_on)
++ if (host->bus_on)
+ return;
+ host->bus_on = true;
+ pm_runtime_get_noresume(host->mmc->parent);
+@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+- if (host->runtime_suspended || !host->bus_on)
++ if (!host->bus_on)
+ return;
+ host->bus_on = false;
+ pm_runtime_put_noidle(host->mmc->parent);
+@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
++ host->mmc_host_ops = sdhci_ops;
++ mmc->ops = &host->mmc_host_ops;
+
+ return host;
+ }
+@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
+ /*
+ * Set host parameters.
+ */
+- mmc->ops = &sdhci_ops;
+ max_clk = host->max_clk;
+
+ if (host->ops->get_min_clock)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 9d4aa31..9c331ac 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -425,6 +425,7 @@ struct sdhci_host {
+
+ /* Internal data */
+ struct mmc_host *mmc; /* MMC structure */
++ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
+ u64 dma_mask; /* custom DMA mask */
+
+ #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index 4498e92..b47122d 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
+ struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq ? mrq->data : NULL;
+- struct scatterlist *sg = host->sg ?: data->sg;
++ struct scatterlist *sg;
+
+ dev_warn(mmc_dev(host->mmc),
+ "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
+@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
+ case USDHI6_WAIT_FOR_MWRITE:
+ case USDHI6_WAIT_FOR_READ:
+ case USDHI6_WAIT_FOR_WRITE:
++ sg = host->sg ?: data->sg;
+ dev_dbg(mmc_dev(host->mmc),
+ "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
+ data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f1692e4..28bbca0 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ struct rtnl_link_stats64 *stats);
+ static void bond_slave_arr_handler(struct work_struct *work);
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++ int mod);
+
+ /*---------------------------- General routines -----------------------------*/
+
+@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+ {
+ struct arphdr *arp = (struct arphdr *)skb->data;
+- struct slave *curr_active_slave;
++ struct slave *curr_active_slave, *curr_arp_slave;
+ unsigned char *arp_ptr;
+ __be32 sip, tip;
+ int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ &sip, &tip);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
++ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+- /* Backup slaves won't see the ARP reply, but do come through
+- * here for each ARP probe (so we swap the sip/tip to validate
+- * the probe). In a "redundant switch, common router" type of
+- * configuration, the ARP probe will (hopefully) travel from
+- * the active, through one switch, the router, then the other
+- * switch before reaching the backup.
++ /* We 'trust' the received ARP enough to validate it if:
++ *
++ * (a) the slave receiving the ARP is active (which includes the
++ * current ARP slave, if any), or
++ *
++ * (b) the receiving slave isn't active, but there is a currently
++ * active slave and it received valid arp reply(s) after it became
++ * the currently active slave, or
++ *
++ * (c) there is an ARP slave that sent an ARP during the prior ARP
++ * interval, and we receive an ARP reply on any slave. We accept
++ * these because switch FDB update delays may deliver the ARP
++ * reply to a slave other than the sender of the ARP request.
+ *
+- * We 'trust' the arp requests if there is an active slave and
+- * it received valid arp reply(s) after it became active. This
+- * is done to avoid endless looping when we can't reach the
++ * Note: for (b), backup slaves are receiving the broadcast ARP
++ * request, not a reply. This request passes from the sending
++ * slave through the L2 switch(es) to the receiving slave. Since
++ * this is checking the request, sip/tip are swapped for
++ * validation.
++ *
++ * This is done to avoid endless looping when we can't reach the
+ * arp_ip_target and fool ourselves with our own arp requests.
+ */
+-
+ if (bond_is_active_slave(slave))
+ bond_validate_arp(bond, slave, sip, tip);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_arp(bond, slave, tip, sip);
++ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
++ bond_time_in_interval(bond,
++ dev_trans_start(curr_arp_slave->dev), 1))
++ bond_validate_arp(bond, slave, sip, tip);
+
+ out_unlock:
+ if (arp != (struct arphdr *)skb->data)
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index fc5b756..eb7192f 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
+ */
+ #define EMS_USB_ARM7_CLOCK 8000000
+
++#define CPC_TX_QUEUE_TRIGGER_LOW 25
++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
++
+ /*
+ * CAN-Message representation in a CPC_MSG. Message object type is
+ * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ switch (urb->status) {
+ case 0:
+ dev->free_slots = dev->intr_in_buffer[1];
++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++ if (netif_queue_stopped(netdev)){
++ netif_wake_queue(netdev);
++ }
++ }
+ break;
+
+ case -ECONNRESET: /* unlink */
+@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+- if (netif_queue_stopped(netdev))
+- netif_wake_queue(netdev);
+ }
+
+ /*
+@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ int err, i;
+
+ dev->intr_in_buffer[0] = 0;
+- dev->free_slots = 15; /* initial size */
++ dev->free_slots = 50; /* initial size */
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+- dev->free_slots < 5) {
++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ netif_stop_queue(netdev);
+ }
+ }
+diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
+index b06dba0..2dea39b 100644
+--- a/drivers/net/dsa/mv88e6xxx.c
++++ b/drivers/net/dsa/mv88e6xxx.c
+@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+
+ /* no PVID with ranges, otherwise it's a bug */
+ if (pvid)
+- err = _mv88e6xxx_port_pvid_set(ds, port, vid);
++ err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
+ unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 79789d8..ca5ac5d 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ return ret;
+ }
+
++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
++{
++ /* Check if we will never have enough descriptors,
++ * as gso_segs can be more than current ring size
++ */
++ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
++}
++
+ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+
+ /* Use GSO to workaround all TSO packets that meet HW bug conditions
+@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ * vlan encapsulated.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) ||
+- skb->protocol == htons(ETH_P_8021AD))
+- return tg3_tso_bug(tp, tnapi, txq, skb);
++ skb->protocol == htons(ETH_P_8021AD)) {
++ if (tg3_tso_bug_gso_check(tnapi, skb))
++ return tg3_tso_bug(tp, tnapi, txq, skb);
++ goto drop;
++ }
+
+ if (!skb_is_gso_v6(skb)) {
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+- tg3_flag(tp, TSO_BUG))
+- return tg3_tso_bug(tp, tnapi, txq, skb);
+-
++ tg3_flag(tp, TSO_BUG)) {
++ if (tg3_tso_bug_gso_check(tnapi, skb))
++ return tg3_tso_bug(tp, tnapi, txq, skb);
++ goto drop;
++ }
+ ip_csum = iph->check;
+ ip_tot_len = iph->tot_len;
+ iph->check = 0;
+@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (would_hit_hwbug) {
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+
+- if (mss) {
++ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
+ /* If it's a TSO packet, do GSO instead of
+ * allocating and copying to a large linear SKB
+ */
+diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
+index 1671fa3..7ba6d53 100644
+--- a/drivers/net/ethernet/cisco/enic/enic.h
++++ b/drivers/net/ethernet/cisco/enic/enic.h
+@@ -33,7 +33,7 @@
+
+ #define DRV_NAME "enic"
+ #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
+-#define DRV_VERSION "2.3.0.12"
++#define DRV_VERSION "2.3.0.20"
+ #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
+
+ #define ENIC_BARS_MAX 6
+diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
+index 1ffd105..1fdf5fe 100644
+--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
++++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
+@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+ {
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
++ struct devcmd2_result *result;
++ u8 color;
+ unsigned int i;
+ int delay, err;
+ u32 fetch_index, new_posted;
+@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
++ result = dc2c->result + dc2c->next_result;
++ color = dc2c->color;
++
++ dc2c->next_result++;
++ if (dc2c->next_result == dc2c->result_size) {
++ dc2c->next_result = 0;
++ dc2c->color = dc2c->color ? 0 : 1;
++ }
++
+ for (delay = 0; delay < wait; delay++) {
+- if (result->color == dc2c->color) {
+- dc2c->next_result++;
+- if (dc2c->next_result == dc2c->result_size) {
+- dc2c->next_result = 0;
+- dc2c->color = dc2c->color ? 0 : 1;
+- }
++ if (result->color == color) {
+ if (result->error) {
+ err = result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index 038f9ce..1494997 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .enable = mlx4_en_phc_enable,
+ };
+
++#define MLX4_EN_WRAP_AROUND_SEC 10ULL
++
++/* This function calculates the max shift that enables the user range
++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
++ */
++static u32 freq_to_shift(u16 freq)
++{
++ u32 freq_khz = freq * 1000;
++ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
++ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
++ /* calculate max possible multiplier in order to fit in 64bit */
++ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++
++ /* This comes from the reverse of clocksource_khz2mult */
++ return ilog2(div_u64(max_mul * freq_khz, 1000000));
++}
++
+ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ struct mlx4_dev *dev = mdev->dev;
+@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ mdev->cycles.read = mlx4_en_read_clock;
+ mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+- /* Using shift to make calculation more accurate. Since current HW
+- * clock frequency is 427 MHz, and cycles are given using a 48 bits
+- * register, the biggest shift when calculating using u64, is 14
+- * (max_cycles * multiplier < 2^64)
+- */
+- mdev->cycles.shift = 14;
++ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
+ mdev->cycles.mult =
+ clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ mdev->nominal_c_mult = mdev->cycles.mult;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 7869f97..67e9633 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2381,8 +2381,6 @@ out:
+ /* set offloads */
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ /* unset offloads */
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
+
+ ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ }
+
++ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
++ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
++ }
++
+ mdev->pndev[port] = dev;
+ mdev->upper[port] = NULL;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+index ee99e67..3904b5f 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+ stats->collisions = 0;
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++ stats->rx_over_errors = 0;
+ stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++ stats->rx_missed_errors = 0;
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = 0;
+ stats->tx_fifo_errors = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 617fb22..7dbeafa 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -45,6 +45,7 @@
+ #include <linux/if_bridge.h>
+ #include <linux/workqueue.h>
+ #include <linux/jiffies.h>
++#include <linux/rtnetlink.h>
+ #include <net/switchdev.h>
+
+ #include "spectrum.h"
+@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+
+ mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
++ rtnl_lock();
+ do {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+ } while (num_rec);
++ rtnl_unlock();
+
+ kfree(sfn_pl);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index e9f2349..52ec3d6 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
+ info.addr = lw->addr;
+ info.vid = lw->vid;
+
++ rtnl_lock();
+ if (learned && removing)
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ lw->rocker_port->dev, &info.info);
+ else if (learned && !removing)
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ lw->rocker_port->dev, &info.info);
++ rtnl_unlock();
+
+ rocker_port_kfree(lw->trans, work);
+ }
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 47b7117..e6cefd0 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct sk_buff *skb;
+ unsigned long flags;
++ u8 overflow;
++
++ overflow = (phy_rxts->ns_hi >> 14) & 0x3;
++ if (overflow)
++ pr_debug("rx timestamp queue overflow, count %d\n", overflow);
+
+ spin_lock_irqsave(&dp83640->rx_lock, flags);
+
+@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ u64 ns;
++ u8 overflow;
+
+ /* We must already have the skb that triggered this. */
+
+@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
+ pr_debug("have timestamp but tx_queue empty\n");
+ return;
+ }
++
++ overflow = (phy_txts->ns_hi >> 14) & 0x3;
++ if (overflow) {
++ pr_debug("tx timestamp queue overflow, count %d\n", overflow);
++ while (skb) {
++ skb_complete_tx_timestamp(skb, NULL);
++ skb = skb_dequeue(&dp83640->tx_queue);
++ }
++ return;
++ }
++
+ ns = phy2txts(phy_txts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 0a37f84..4e0068e7 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+
+ if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ goto abort_put;
++
++ sock_put(sk_pppox(relay_po));
+ } else {
+ if (sock_queue_rcv_skb(sk, skb))
+ goto abort_kfree;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 597c53e..f7e8c79 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ return i < MAX_CALLID;
+ }
+
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++ struct pptp_addr *sa)
+ {
+ static int call_id;
+
+ spin_lock(&chan_lock);
+- if (!sock->proto.pptp.src_addr.call_id) {
++ if (!sa->call_id) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ if (call_id == MAX_CALLID) {
+ call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ if (call_id == MAX_CALLID)
+ goto out_err;
+ }
+- sock->proto.pptp.src_addr.call_id = call_id;
+- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++ sa->call_id = call_id;
++ } else if (test_bit(sa->call_id, callid_bitmap)) {
+ goto out_err;
++ }
+
+- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++ sock->proto.pptp.src_addr = *sa;
++ set_bit(sa->call_id, callid_bitmap);
++ rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ spin_unlock(&chan_lock);
+
+ return 0;
+@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+- struct pptp_opt *opt = &po->proto.pptp;
+ int error = 0;
+
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+
+ lock_sock(sk);
+
+- opt->src_addr = sp->sa_addr.pptp;
+- if (add_chan(po))
++ if (sk->sk_state & PPPOX_DEAD) {
++ error = -EALREADY;
++ goto out;
++ }
++
++ if (sk->sk_state & PPPOX_BOUND) {
+ error = -EBUSY;
++ goto out;
++ }
++
++ if (add_chan(po, &sp->sa_addr.pptp))
++ error = -EBUSY;
++ else
++ sk->sk_state |= PPPOX_BOUND;
+
++out:
+ release_sock(sk);
+ return error;
+ }
+@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ }
+
+ opt->dst_addr = sp->sa_addr.pptp;
+- sk->sk_state = PPPOX_CONNECTED;
++ sk->sk_state |= PPPOX_CONNECTED;
+
+ end:
+ release_sock(sk);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 5fccc5a..982e0acd 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
+
+ /* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
++ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
+ {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 405a7b6..e0fcda4 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ vxlan->cfg.port_max, true);
+
+ if (info) {
+- if (info->key.tun_flags & TUNNEL_CSUM)
+- flags |= VXLAN_F_UDP_CSUM;
+- else
+- flags &= ~VXLAN_F_UDP_CSUM;
+-
+ ttl = info->key.ttl;
+ tos = info->key.tos;
+
+@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ goto drop;
+ sk = vxlan->vn4_sock->sock->sk;
+
+- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+- df = htons(IP_DF);
++ if (info) {
++ if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
++ df = htons(IP_DF);
++
++ if (info->key.tun_flags & TUNNEL_CSUM)
++ flags |= VXLAN_F_UDP_CSUM;
++ else
++ flags &= ~VXLAN_F_UDP_CSUM;
++ }
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
+@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ return;
+ }
+
++ if (info) {
++ if (info->key.tun_flags & TUNNEL_CSUM)
++ flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
++ else
++ flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
++ }
++
+ ttl = ttl ? : ip6_dst_hoplimit(ndst);
+ err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
+ 0, ttl, src_port, dst_port, htonl(vni << 8), md,
+diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
+index e18629a..0961f33 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
+@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
+
+ priv->ucode_loaded = false;
+ iwl_trans_stop_device(priv->trans);
++ ret = iwl_trans_start_hw(priv->trans);
++ if (ret)
++ goto out;
+
+ priv->wowlan = true;
+
+diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
+index d6e0c1b..8215d74 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
+@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ return -EBUSY;
+ }
+
++ /* we don't support "match all" in the firmware */
++ if (!req->n_match_sets)
++ return -EOPNOTSUPP;
++
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 639761f..d58c094 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 9028345..8c72047 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -7,6 +7,7 @@
+ *
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+@@ -33,6 +34,7 @@
+ *
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+@@ -924,9 +926,16 @@ monitor:
+ if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ trans_pcie->fw_mon_phys >> dest->base_shift);
+- iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+- (trans_pcie->fw_mon_phys +
+- trans_pcie->fw_mon_size) >> dest->end_shift);
++ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size - 256) >>
++ dest->end_shift);
++ else
++ iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++ (trans_pcie->fw_mon_phys +
++ trans_pcie->fw_mon_size) >>
++ dest->end_shift);
+ }
+ }
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index f46c9d7..7f471bf 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ hw_queue);
+ if (rx_remained_cnt == 0)
+ return;
+-
++ buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
++ rtlpci->rx_ring[rxring_idx].idx];
++ pdesc = (struct rtl_rx_desc *)skb->data;
+ } else { /* rx descriptor */
+ pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ rtlpci->rx_ring[rxring_idx].idx];
+@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb))
+ goto no_new;
+- if (rtlpriv->use_new_trx_flow) {
+- buffer_desc =
+- &rtlpci->rx_ring[rxring_idx].buffer_desc
+- [rtlpci->rx_ring[rxring_idx].idx];
+- /*means rx wifi info*/
+- pdesc = (struct rtl_rx_desc *)skb->data;
+- }
+ memset(&rx_status , 0 , sizeof(rx_status));
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ &rx_status, (u8 *)pdesc, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+index 1134412..47e32cb 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ u8 tid;
+
+ rtl8188ee_bt_reg_init(hw);
+- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+-
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
++ rtlpriv->cfg->mod_params->disable_watchdog =
++ rtlpriv->cfg->mod_params->disable_watchdog;
+ if (rtlpriv->cfg->mod_params->disable_watchdog)
+ pr_info("watchdog disabled\n");
+ if (!rtlpriv->psc.inactiveps)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+index de6cb6c..4780bdc 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("rtl8192ce: Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+index fd4a535..7c6f7f0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->dm.disable_framebursting = false;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+index b19d039..c6e09a1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index e1fd27c..31baca41 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
+ if (!rtlpriv->psc.inactiveps)
+ pr_info("Power Save off (module option)\n");
+ if (!rtlpriv->psc.fwctrl_lps)
+@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+index 3859b3e..ff49a8c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
++ rtlpriv->cfg->mod_params->disable_watchdog =
++ rtlpriv->cfg->mod_params->disable_watchdog;
+ if (rtlpriv->cfg->mod_params->disable_watchdog)
+ pr_info("watchdog disabled\n");
+ rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
+ .swctrl_lps = false,
+ .fwctrl_lps = true,
+ .debug = DBG_EMERG,
++ .msi_support = false,
++ .disable_watchdog = false,
+ };
+
+ static struct rtl_hal_cfg rtl8723e_hal_cfg = {
+@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
+ module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
+ module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
++module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
+ bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+index d091f1d..a78eaed 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtl8723be_bt_reg_init(hw);
+- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++ rtlpriv->cfg->mod_params->sw_crypto =
++ rtlpriv->cfg->mod_params->sw_crypto;
++ rtlpriv->cfg->mod_params->disable_watchdog =
++ rtlpriv->cfg->mod_params->disable_watchdog;
+ if (rtlpriv->cfg->mod_params->disable_watchdog)
+ pr_info("watchdog disabled\n");
+ rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
+ .inactiveps = true,
+ .swctrl_lps = false,
+ .fwctrl_lps = true,
++ .msi_support = false,
++ .disable_watchdog = false,
++ .debug = DBG_EMERG,
+ };
+
+ static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 4fa916d..72a2c19 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
+ msi_base = be32_to_cpup(msi_map + 2);
+ rid_len = be32_to_cpup(msi_map + 3);
+
++ if (rid_base & ~map_mask) {
++ dev_err(parent_dev,
++ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
++ map_mask, rid_base);
++ return rid_out;
++ }
++
+ msi_controller_node = of_find_node_by_phandle(phandle);
+
+ matched = (masked_rid >= rid_base &&
+@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
+ if (!matched)
+ return rid_out;
+
+- rid_out = masked_rid + msi_base;
++ rid_out = masked_rid - rid_base + msi_base;
+ dev_dbg(dev,
+ "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ dev_name(parent_dev), map_mask, rid_base, msi_base,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index ff53856..0b3e0bf 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
+ {
+ pci_lock_rescan_remove();
+
+- if (slot->flags & SLOT_IS_GOING_AWAY)
++ if (slot->flags & SLOT_IS_GOING_AWAY) {
++ pci_unlock_rescan_remove();
+ return -ENODEV;
++ }
+
+ /* configure all functions */
+ if (!(slot->flags & SLOT_ENABLED))
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a2..48d21e0 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ rpc->rpd = dev;
+ INIT_WORK(&rpc->dpc_handler, aer_isr);
+ mutex_init(&rpc->rpc_mutex);
+- init_waitqueue_head(&rpc->wait_release);
+
+ /* Use PCIe bus function to store rpc into PCIe device */
+ set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ if (rpc->isr)
+ free_irq(dev->irq, dev);
+
+- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++ flush_work(&rpc->dpc_handler);
+ aer_disable_rootport(rpc);
+ kfree(rpc);
+ set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7..945c939 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ * recovery on the same
+ * root port hierarchy
+ */
+- wait_queue_head_t wait_release;
+ };
+
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index fba785e..4e14de0 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
+ mutex_unlock(&rpc->rpc_mutex);
+-
+- wake_up(&rpc->wait_release);
+ }
+
+ /**
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index c777b97..5f70fee 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -53,7 +53,7 @@ struct pcifront_device {
+ };
+
+ struct pcifront_sd {
+- int domain;
++ struct pci_sysdata sd;
+ struct pcifront_device *pdev;
+ };
+
+@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ unsigned int domain, unsigned int bus,
+ struct pcifront_device *pdev)
+ {
+- sd->domain = domain;
++ /* Because we do not expose that information via XenBus. */
++ sd->sd.node = first_online_node;
++ sd->sd.domain = domain;
+ sd->pdev = pdev;
+ }
+
+@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!bus_entry || !sd) {
+ err = -ENOMEM;
+ goto err_out;
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index 8c7f27d..e7e574d 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
+
+ int phy_power_on(struct phy *phy)
+ {
+- int ret;
++ int ret = 0;
+
+ if (!phy)
+- return 0;
++ goto out;
+
+ if (phy->pwr) {
+ ret = regulator_enable(phy->pwr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ ret = phy_pm_runtime_get_sync(phy);
+ if (ret < 0 && ret != -ENOTSUPP)
+- return ret;
++ goto err_pm_sync;
++
+ ret = 0; /* Override possible ret == -ENOTSUPP */
+
+ mutex_lock(&phy->mutex);
+@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
+ ret = phy->ops->power_on(phy);
+ if (ret < 0) {
+ dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
+- goto out;
++ goto err_pwr_on;
+ }
+ }
+ ++phy->power_count;
+ mutex_unlock(&phy->mutex);
+ return 0;
+
+-out:
++err_pwr_on:
+ mutex_unlock(&phy->mutex);
+ phy_pm_runtime_put_sync(phy);
++err_pm_sync:
+ if (phy->pwr)
+ regulator_disable(phy->pwr);
+-
++out:
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(phy_power_on);
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index a313dfc..d78ee15 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ },
+ },
+ {
++ .ident = "Lenovo ideapad Y700-17ISK",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
++ },
++ },
++ {
+ .ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ },
+ },
+ {
++ .ident = "Lenovo Yoga 700",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
++ },
++ },
++ {
+ .ident = "Lenovo Yoga 900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index c013029..b0f6214 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+ brightness = __get_lcd_brightness(dev);
+ if (brightness < 0)
+ return 0;
++ /*
++ * If transflective backlight is supported and the brightness is zero
++ * (lowest brightness level), the set_lcd_brightness function will
++ * activate the transflective backlight, making the LCD appear to be
++ * turned off, simply increment the brightness level to avoid that.
++ */
++ if (dev->tr_backlight_supported && brightness == 0)
++ brightness++;
+ ret = set_lcd_brightness(dev, brightness);
+ if (ret) {
+ pr_debug("Backlight method is read-only, disabling backlight support\n");
+diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
+index 8df0b0e..0067620 100644
+--- a/drivers/regulator/Kconfig
++++ b/drivers/regulator/Kconfig
+@@ -446,6 +446,7 @@ config REGULATOR_MC13892
+ config REGULATOR_MT6311
+ tristate "MediaTek MT6311 PMIC"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6311 PMIC.
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index 35de22f..f2e1a39 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -27,8 +27,8 @@
+ #define AXP20X_IO_ENABLED 0x03
+ #define AXP20X_IO_DISABLED 0x07
+
+-#define AXP22X_IO_ENABLED 0x04
+-#define AXP22X_IO_DISABLED 0x03
++#define AXP22X_IO_ENABLED 0x03
++#define AXP22X_IO_DISABLED 0x04
+
+ #define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
+ #define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index a263c10..4abfbdb 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
+ max = block->base->discipline->max_blocks << block->s2b_shift;
+ }
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
++ block->request_queue->limits.max_dev_sectors = max;
+ blk_queue_logical_block_size(block->request_queue,
+ block->bp_block);
+ blk_queue_max_hw_sectors(block->request_queue, max);
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index 184b1db..286782c 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ cancel_work_sync(&lcu->suc_data.worker);
+ spin_lock_irqsave(&lcu->lock, flags);
+- if (device == lcu->suc_data.device)
++ if (device == lcu->suc_data.device) {
++ dasd_put_device(device);
+ lcu->suc_data.device = NULL;
++ }
+ }
+ was_pending = 0;
+ if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ was_pending = 1;
+ cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ spin_lock_irqsave(&lcu->lock, flags);
+- if (device == lcu->ruac_data.device)
++ if (device == lcu->ruac_data.device) {
++ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
++ }
+ }
+ private->lcu = NULL;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ " alias data in lcu (rc = %d), retry later", rc);
+- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++ dasd_put_device(device);
+ } else {
++ dasd_put_device(device);
+ lcu->ruac_data.device = NULL;
+ lcu->flags &= ~UPDATE_PENDING;
+ }
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ */
+ if (!usedev)
+ return -EINVAL;
++ dasd_get_device(usedev);
+ lcu->ruac_data.device = usedev;
+- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++ dasd_put_device(usedev);
+ return 0;
+ }
+
+@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ ASCEBC((char *) &cqr->magic, 4);
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+- ccw->flags = 0 ;
++ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 16;
+ ccw->cda = (__u32)(addr_t) cqr->data;
+ ((char *)cqr->data)[0] = reason;
+@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ /* 3. read new alias configuration */
+ _schedule_lcu_update(lcu, device);
+ lcu->suc_data.device = NULL;
++ dasd_put_device(device);
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+
+@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ }
+ lcu->suc_data.reason = reason;
+ lcu->suc_data.device = device;
++ dasd_get_device(device);
+ spin_unlock(&lcu->lock);
+- schedule_work(&lcu->suc_data.worker);
++ if (!schedule_work(&lcu->suc_data.worker))
++ dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 16a1935c..e197c6f 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ /* Clear outstanding commands array. */
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+- if (!req)
++ if (!req || !test_bit(que, ha->req_qid_map))
+ continue;
+ req->out_ptr = (void *)(req->ring + req->length);
+ *req->out_ptr = 0;
+@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+
+ for (que = 0; que < ha->max_rsp_queues; que++) {
+ rsp = ha->rsp_q_map[que];
+- if (!rsp)
++ if (!rsp || !test_bit(que, ha->rsp_qid_map))
+ continue;
+ rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ *rsp->in_ptr = 0;
+@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+
+ for (i = 1; i < ha->max_rsp_queues; i++) {
+ rsp = ha->rsp_q_map[i];
+- if (rsp) {
++ if (rsp && test_bit(i, ha->rsp_qid_map)) {
+ rsp->options &= ~BIT_0;
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS)
+@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ }
+ for (i = 1; i < ha->max_req_queues; i++) {
+ req = ha->req_q_map[i];
+- if (req) {
+- /* Clear outstanding commands array. */
++ if (req && test_bit(i, ha->req_qid_map)) {
++ /* Clear outstanding commands array. */
+ req->options &= ~BIT_0;
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS)
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index ccf6a7f..0e59731 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ "MSI-X: Failed to enable support "
+ "-- %d/%d\n Retry with %d vectors.\n",
+ ha->msix_count, ret, ret);
++ ha->msix_count = ret;
++ ha->max_rsp_queues = ha->msix_count - 1;
+ }
+- ha->msix_count = ret;
+- ha->max_rsp_queues = ha->msix_count - 1;
+ ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ ha->msix_count, GFP_KERNEL);
+ if (!ha->msix_entries) {
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index c5dd594..cf7ba52 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ /* Delete request queues */
+ for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ req = ha->req_q_map[cnt];
+- if (req) {
++ if (req && test_bit(cnt, ha->req_qid_map)) {
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00ea,
+@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ /* Delete response queues */
+ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+- if (rsp) {
++ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x00eb,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index bfa9a64..fc6674d 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ int cnt;
+
+ for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
++ if (!test_bit(cnt, ha->req_qid_map))
++ continue;
++
+ req = ha->req_q_map[cnt];
+ qla2x00_free_req_que(ha, req);
+ }
+@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ ha->req_q_map = NULL;
+
+ for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
++ if (!test_bit(cnt, ha->rsp_qid_map))
++ continue;
++
+ rsp = ha->rsp_q_map[cnt];
+ qla2x00_free_rsp_que(ha, rsp);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index ddbe2e7..c3e6225 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
++
++ if (!test_bit(i, vha->hw->req_qid_map))
++ continue;
++
+ if (req || !buf) {
+ length = req ?
+ req->length : REQUEST_ENTRY_CNT_24XX;
+@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++ if (!test_bit(i, vha->hw->rsp_qid_map))
++ continue;
++
+ if (rsp || !buf) {
+ length = rsp ?
+ rsp->length : RESPONSE_ENTRY_CNT_MQ;
+@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ for (i = 0; i < vha->hw->max_req_queues; i++) {
+ struct req_que *req = vha->hw->req_q_map[i];
++
++ if (!test_bit(i, vha->hw->req_qid_map))
++ continue;
++
+ if (req || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++ if (!test_bit(i, vha->hw->rsp_qid_map))
++ continue;
++
+ if (rsp || !buf) {
+ qla27xx_insert16(i, buf, len);
+ qla27xx_insert16(1, buf, len);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 84fa4c4..bb669d3 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+ rw_max = q->limits.io_opt =
+- logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
++ sdkp->opt_xfer_blocks * sdp->sector_size;
+ else
+ rw_max = BLK_DEF_MAX_SECTORS;
+
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index aebad36..8feac59 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
+
+ as->use_cs_gpios = true;
+ if (atmel_spi_is_v2(as) &&
++ pdev->dev.of_node &&
+ !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
+ as->use_cs_gpios = false;
+ master->num_chipselect = 4;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 1f8903d..ed8283e 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ spi->controller_state = cs;
+ /* Link this to context save list */
+ list_add_tail(&cs->node, &ctx->cs);
++
++ if (gpio_is_valid(spi->cs_gpio)) {
++ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
++ if (ret) {
++ dev_err(&spi->dev, "failed to request gpio\n");
++ return ret;
++ }
++ gpio_direction_output(spi->cs_gpio,
++ !(spi->mode & SPI_CS_HIGH));
++ }
+ }
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
+@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ return ret;
+ }
+
+- if (gpio_is_valid(spi->cs_gpio)) {
+- ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+- if (ret) {
+- dev_err(&spi->dev, "failed to request gpio\n");
+- return ret;
+- }
+- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+- }
+-
+ ret = pm_runtime_get_sync(mcspi->dev);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 79ac192..70b8f4f 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
+ lcd_send_serial(0x1F); /* R/W=W, RS=0 */
+ lcd_send_serial(cmd & 0x0F);
+ lcd_send_serial((cmd >> 4) & 0x0F);
+- /* the shortest command takes at least 40 us */
+- usleep_range(40, 100);
++ udelay(40); /* the shortest command takes at least 40 us */
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
+ lcd_send_serial(0x5F); /* R/W=W, RS=1 */
+ lcd_send_serial(data & 0x0F);
+ lcd_send_serial((data >> 4) & 0x0F);
+- /* the shortest data takes at least 40 us */
+- usleep_range(40, 100);
++ udelay(40); /* the shortest data takes at least 40 us */
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, cmd);
+- /* maintain the data during 20 us before the strobe */
+- usleep_range(20, 100);
++ udelay(20); /* maintain the data during 20 us before the strobe */
+
+ bits.e = BIT_SET;
+ bits.rs = BIT_CLR;
+ bits.rw = BIT_CLR;
+ set_ctrl_bits();
+
+- usleep_range(40, 100); /* maintain the strobe during 40 us */
++ udelay(40); /* maintain the strobe during 40 us */
+
+ bits.e = BIT_CLR;
+ set_ctrl_bits();
+
+- usleep_range(120, 500); /* the shortest command takes at least 120 us */
++ udelay(120); /* the shortest command takes at least 120 us */
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, data);
+- /* maintain the data during 20 us before the strobe */
+- usleep_range(20, 100);
++ udelay(20); /* maintain the data during 20 us before the strobe */
+
+ bits.e = BIT_SET;
+ bits.rs = BIT_SET;
+ bits.rw = BIT_CLR;
+ set_ctrl_bits();
+
+- usleep_range(40, 100); /* maintain the strobe during 40 us */
++ udelay(40); /* maintain the strobe during 40 us */
+
+ bits.e = BIT_CLR;
+ set_ctrl_bits();
+
+- usleep_range(45, 100); /* the shortest data takes at least 45 us */
++ udelay(45); /* the shortest data takes at least 45 us */
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the control port */
+ w_ctr(pprt, cmd);
+- usleep_range(60, 120);
++ udelay(60);
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, data);
+- usleep_range(60, 120);
++ udelay(60);
+ spin_unlock_irq(&pprt_lock);
+ }
+
+@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
+ lcd_send_serial(0x5F); /* R/W=W, RS=1 */
+ lcd_send_serial(' ' & 0x0F);
+ lcd_send_serial((' ' >> 4) & 0x0F);
+- usleep_range(40, 100); /* the shortest data takes at least 40 us */
++ udelay(40); /* the shortest data takes at least 40 us */
+ }
+ spin_unlock_irq(&pprt_lock);
+
+@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
+ w_dtr(pprt, ' ');
+
+ /* maintain the data during 20 us before the strobe */
+- usleep_range(20, 100);
++ udelay(20);
+
+ bits.e = BIT_SET;
+ bits.rs = BIT_SET;
+@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
+ set_ctrl_bits();
+
+ /* maintain the strobe during 40 us */
+- usleep_range(40, 100);
++ udelay(40);
+
+ bits.e = BIT_CLR;
+ set_ctrl_bits();
+
+ /* the shortest data takes at least 45 us */
+- usleep_range(45, 100);
++ udelay(45);
+ }
+ spin_unlock_irq(&pprt_lock);
+
+@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
+ for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
+ /* present the data to the data port */
+ w_dtr(pprt, ' ');
+- usleep_range(60, 120);
++ udelay(60);
+ }
+
+ spin_unlock_irq(&pprt_lock);
+diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
+index 3b5835b..a5bbb33 100644
+--- a/drivers/staging/speakup/serialio.c
++++ b/drivers/staging/speakup/serialio.c
+@@ -6,6 +6,11 @@
+ #include "spk_priv.h"
+ #include "serialio.h"
+
++#include <linux/serial_core.h>
++/* WARNING: Do not change this to <linux/serial.h> without testing that
++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
++#include <asm/serial.h>
++
+ #ifndef SERIAL_PORT_DFNS
+ #define SERIAL_PORT_DFNS
+ #endif
+@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
+ int baud = 9600, quot = 0;
+ unsigned int cval = 0;
+ int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
+- const struct old_serial_port *ser = rs_table + index;
++ const struct old_serial_port *ser;
+ int err;
+
++ if (index >= ARRAY_SIZE(rs_table)) {
++ pr_info("no port info for ttyS%d\n", index);
++ return NULL;
++ }
++ ser = rs_table + index;
++
+ /* Divisor, bytesize and parity */
+ quot = ser->baud_base / baud;
+ cval = cflag & (CSIZE | CSTOPB);
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 28fb301..88029cc 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+
+ if (dev) {
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
+- list_del(&tmr->tmr_list);
++ list_del_init(&tmr->tmr_list);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+ }
+
+ kfree(tmr);
+ }
+
+-static void core_tmr_handle_tas_abort(
+- struct se_node_acl *tmr_nacl,
+- struct se_cmd *cmd,
+- int tas)
++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+- bool remove = true;
++ unsigned long flags;
++ bool remove = true, send_tas;
+ /*
+ * TASK ABORTED status (TAS) bit support
+ */
+- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ send_tas = (cmd->transport_state & CMD_T_TAS);
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
++ if (send_tas) {
+ remove = false;
+ transport_send_task_abort(cmd);
+ }
+@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
+ return 1;
+ }
+
++static bool __target_check_io_state(struct se_cmd *se_cmd,
++ struct se_session *tmr_sess, int tas)
++{
++ struct se_session *sess = se_cmd->se_sess;
++
++ assert_spin_locked(&sess->sess_cmd_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++ /*
++ * If command already reached CMD_T_COMPLETE state within
++ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
++ * this se_cmd has been passed to fabric driver and will
++ * not be aborted.
++ *
++ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
++ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
++ * long as se_cmd->cmd_kref is still active unless zero.
++ */
++ spin_lock(&se_cmd->t_state_lock);
++ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
++ pr_debug("Attempted to abort io tag: %llu already complete or"
++ " fabric stop, skipping\n", se_cmd->tag);
++ spin_unlock(&se_cmd->t_state_lock);
++ return false;
++ }
++ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
++ pr_debug("Attempted to abort io tag: %llu already shutdown,"
++ " skipping\n", se_cmd->tag);
++ spin_unlock(&se_cmd->t_state_lock);
++ return false;
++ }
++ se_cmd->transport_state |= CMD_T_ABORTED;
++
++ if ((tmr_sess != se_cmd->se_sess) && tas)
++ se_cmd->transport_state |= CMD_T_TAS;
++
++ spin_unlock(&se_cmd->t_state_lock);
++
++ return kref_get_unless_zero(&se_cmd->cmd_kref);
++}
++
+ void core_tmr_abort_task(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+@@ -130,34 +172,22 @@ void core_tmr_abort_task(
+ if (tmr->ref_task_tag != ref_tag)
+ continue;
+
+- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+- continue;
+-
+ printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
+ se_cmd->se_tfo->get_fabric_name(), ref_tag);
+
+- spin_lock(&se_cmd->t_state_lock);
+- if (se_cmd->transport_state & CMD_T_COMPLETE) {
+- printk("ABORT_TASK: ref_tag: %llu already complete,"
+- " skipping\n", ref_tag);
+- spin_unlock(&se_cmd->t_state_lock);
++ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+-
+ target_put_sess_cmd(se_cmd);
+-
+ goto out;
+ }
+- se_cmd->transport_state |= CMD_T_ABORTED;
+- spin_unlock(&se_cmd->t_state_lock);
+-
+ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+ cancel_work_sync(&se_cmd->work);
+ transport_wait_for_tasks(se_cmd);
+
+- target_put_sess_cmd(se_cmd);
+ transport_cmd_finish_abort(se_cmd, true);
++ target_put_sess_cmd(se_cmd);
+
+ printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ " ref_tag: %llu\n", ref_tag);
+@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
+ struct list_head *preempt_and_abort_list)
+ {
+ LIST_HEAD(drain_tmr_list);
++ struct se_session *sess;
+ struct se_tmr_req *tmr_p, *tmr_pp;
+ struct se_cmd *cmd;
+ unsigned long flags;
++ bool rc;
+ /*
+ * Release all pending and outgoing TMRs aside from the received
+ * LUN_RESET tmr..
+@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+ continue;
+
++ sess = cmd->se_sess;
++ if (WARN_ON_ONCE(!sess))
++ continue;
++
++ spin_lock(&sess->sess_cmd_lock);
+ spin_lock(&cmd->t_state_lock);
+- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
++ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
++ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
++ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
++ spin_unlock(&cmd->t_state_lock);
++ spin_unlock(&sess->sess_cmd_lock);
++ continue;
++ }
++ cmd->transport_state |= CMD_T_ABORTED;
+ spin_unlock(&cmd->t_state_lock);
+
++ rc = kref_get_unless_zero(&cmd->cmd_kref);
++ if (!rc) {
++ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++ spin_unlock(&sess->sess_cmd_lock);
++ continue;
++ }
++ spin_unlock(&sess->sess_cmd_lock);
++
+ list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ }
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
+ (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ tmr_p->function, tmr_p->response, cmd->t_state);
+
++ cancel_work_sync(&cmd->work);
++ transport_wait_for_tasks(cmd);
++
+ transport_cmd_finish_abort(cmd, 1);
++ target_put_sess_cmd(cmd);
+ }
+ }
+
+ static void core_tmr_drain_state_list(
+ struct se_device *dev,
+ struct se_cmd *prout_cmd,
+- struct se_node_acl *tmr_nacl,
++ struct se_session *tmr_sess,
+ int tas,
+ struct list_head *preempt_and_abort_list)
+ {
+ LIST_HEAD(drain_task_list);
++ struct se_session *sess;
+ struct se_cmd *cmd, *next;
+ unsigned long flags;
++ int rc;
+
+ /*
+ * Complete outstanding commands with TASK_ABORTED SAM status.
+@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
+ if (prout_cmd == cmd)
+ continue;
+
++ sess = cmd->se_sess;
++ if (WARN_ON_ONCE(!sess))
++ continue;
++
++ spin_lock(&sess->sess_cmd_lock);
++ rc = __target_check_io_state(cmd, tmr_sess, tas);
++ spin_unlock(&sess->sess_cmd_lock);
++ if (!rc)
++ continue;
++
+ list_move_tail(&cmd->state_list, &drain_task_list);
+ cmd->state_active = false;
+ }
+@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
+
+ while (!list_empty(&drain_task_list)) {
+ cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+- list_del(&cmd->state_list);
++ list_del_init(&cmd->state_list);
+
+ pr_debug("LUN_RESET: %s cmd: %p"
+ " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
+@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
+ * loop above, but we do it down here given that
+ * cancel_work_sync may block.
+ */
+- if (cmd->t_state == TRANSPORT_COMPLETE)
+- cancel_work_sync(&cmd->work);
+-
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
+- target_stop_cmd(cmd, &flags);
+-
+- cmd->transport_state |= CMD_T_ABORTED;
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ cancel_work_sync(&cmd->work);
++ transport_wait_for_tasks(cmd);
+
+- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++ core_tmr_handle_tas_abort(cmd, tas);
++ target_put_sess_cmd(cmd);
+ }
+ }
+
+@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
+ {
+ struct se_node_acl *tmr_nacl = NULL;
+ struct se_portal_group *tmr_tpg = NULL;
++ struct se_session *tmr_sess = NULL;
+ int tas;
+ /*
+ * TASK_ABORTED status bit, this is configurable via ConfigFS
+@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
+ * or struct se_device passthrough..
+ */
+ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
++ tmr_sess = tmr->task_cmd->se_sess;
++ tmr_nacl = tmr_sess->se_node_acl;
++ tmr_tpg = tmr_sess->se_tpg;
+ if (tmr_nacl && tmr_tpg) {
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
+ " initiator port %s\n",
+@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
+ dev->transport->name, tas);
+
+ core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
++ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ preempt_and_abort_list);
+
+ /*
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 4fdcee2..94f4ffa 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
+ }
+ EXPORT_SYMBOL(transport_deregister_session);
+
+-/*
+- * Called with cmd->t_state_lock held.
+- */
+ static void target_remove_from_state_list(struct se_cmd *cmd)
+ {
+ struct se_device *dev = cmd->se_dev;
+@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
+- if (write_pending)
+- cmd->t_state = TRANSPORT_WRITE_PENDING;
+-
+ if (remove_from_lists) {
+ target_remove_from_state_list(cmd);
+
+@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ cmd->se_lun = NULL;
+ }
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (write_pending)
++ cmd->t_state = TRANSPORT_WRITE_PENDING;
++
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
+@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_lun_remove_cmd(cmd);
+ /*
+@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+
+ if (transport_cmd_check_stop_to_fabric(cmd))
+ return;
+- if (remove)
++ if (remove && ack_kref)
+ transport_put_cmd(cmd);
+ }
+
+@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ * Check for case where an explicit ABORT_TASK has been received
+ * and transport_wait_for_tasks() will be waiting for completion..
+ */
+- if (cmd->transport_state & CMD_T_ABORTED &&
++ if (cmd->transport_state & CMD_T_ABORTED ||
+ cmd->transport_state & CMD_T_STOP) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete_all(&cmd->t_transport_stop_comp);
+@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
+ return true;
+ }
+
++static int __transport_check_aborted_status(struct se_cmd *, int);
++
+ void target_execute_cmd(struct se_cmd *cmd)
+ {
+ /*
+- * If the received CDB has aleady been aborted stop processing it here.
+- */
+- if (transport_check_aborted_status(cmd, 1))
+- return;
+-
+- /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
++ *
++ * If the received CDB has aleady been aborted stop processing it here.
+ */
+ spin_lock_irq(&cmd->t_state_lock);
++ if (__transport_check_aborted_status(cmd, 1)) {
++ spin_unlock_irq(&cmd->t_state_lock);
++ return;
++ }
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
+ }
+
+ /**
+- * transport_release_cmd - free a command
+- * @cmd: command to free
++ * transport_put_cmd - release a reference to a command
++ * @cmd: command to release
+ *
+- * This routine unconditionally frees a command, and reference counting
+- * or list removal must be done in the caller.
++ * This routine releases our reference to the command and frees it if possible.
+ */
+-static int transport_release_cmd(struct se_cmd *cmd)
++static int transport_put_cmd(struct se_cmd *cmd)
+ {
+ BUG_ON(!cmd->se_tfo);
+-
+- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+- core_tmr_release_req(cmd->se_tmr_req);
+- if (cmd->t_task_cdb != cmd->__t_task_cdb)
+- kfree(cmd->t_task_cdb);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ return target_put_sess_cmd(cmd);
+ }
+
+-/**
+- * transport_put_cmd - release a reference to a command
+- * @cmd: command to release
+- *
+- * This routine releases our reference to the command and frees it if possible.
+- */
+-static int transport_put_cmd(struct se_cmd *cmd)
+-{
+- transport_free_pages(cmd);
+- return transport_release_cmd(cmd);
+-}
+-
+ void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+ struct scatterlist *sg = cmd->t_data_sg;
+@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
+ }
+ }
+
+-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++static bool
++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
++ unsigned long *flags);
++
++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+ {
+ unsigned long flags;
++
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++}
++
++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++{
+ int ret = 0;
++ bool aborted = false, tas = false;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+- transport_wait_for_tasks(cmd);
++ target_wait_free_cmd(cmd, &aborted, &tas);
+
+- ret = transport_release_cmd(cmd);
++ if (!aborted || tas)
++ ret = transport_put_cmd(cmd);
+ } else {
+ if (wait_for_tasks)
+- transport_wait_for_tasks(cmd);
++ target_wait_free_cmd(cmd, &aborted, &tas);
+ /*
+ * Handle WRITE failure case where transport_generic_new_cmd()
+ * has already added se_cmd to state_list, but fabric has
+ * failed command before I/O submission.
+ */
+- if (cmd->state_active) {
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->state_active)
+ target_remove_from_state_list(cmd);
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+- }
+
+ if (cmd->se_lun)
+ transport_lun_remove_cmd(cmd);
+
+- ret = transport_put_cmd(cmd);
++ if (!aborted || tas)
++ ret = transport_put_cmd(cmd);
++ }
++ /*
++ * If the task has been internally aborted due to TMR ABORT_TASK
++ * or LUN_RESET, target_core_tmr.c is responsible for performing
++ * the remaining calls to target_put_sess_cmd(), and not the
++ * callers of this function.
++ */
++ if (aborted) {
++ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
++ wait_for_completion(&cmd->cmd_wait_comp);
++ cmd->se_tfo->release_cmd(cmd);
++ ret = 1;
+ }
+ return ret;
+ }
+@@ -2508,26 +2515,46 @@ out:
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+
++static void target_free_cmd_mem(struct se_cmd *cmd)
++{
++ transport_free_pages(cmd);
++
++ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
++ core_tmr_release_req(cmd->se_tmr_req);
++ if (cmd->t_task_cdb != cmd->__t_task_cdb)
++ kfree(cmd->t_task_cdb);
++}
++
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+ struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ struct se_session *se_sess = se_cmd->se_sess;
+ unsigned long flags;
++ bool fabric_stop;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (list_empty(&se_cmd->se_cmd_list)) {
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return;
+ }
+- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
++
++ spin_lock(&se_cmd->t_state_lock);
++ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++ spin_unlock(&se_cmd->t_state_lock);
++
++ if (se_cmd->cmd_wait_set || fabric_stop) {
++ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++ target_free_cmd_mem(se_cmd);
+ complete(&se_cmd->cmd_wait_comp);
+ return;
+ }
+- list_del(&se_cmd->se_cmd_list);
++ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+
+@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
+ struct se_session *se_sess = se_cmd->se_sess;
+
+ if (!se_sess) {
++ target_free_cmd_mem(se_cmd);
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ return 1;
+ }
+@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ {
+ struct se_cmd *se_cmd;
+ unsigned long flags;
++ int rc;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ se_sess->sess_tearing_down = 1;
+ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+- se_cmd->cmd_wait_set = 1;
++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
++ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
++ if (rc) {
++ se_cmd->cmd_wait_set = 1;
++ spin_lock(&se_cmd->t_state_lock);
++ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++ spin_unlock(&se_cmd->t_state_lock);
++ }
++ }
+
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ }
+@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ {
+ struct se_cmd *se_cmd, *tmp_cmd;
+ unsigned long flags;
++ bool tas;
+
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
+- list_del(&se_cmd->se_cmd_list);
++ list_del_init(&se_cmd->se_cmd_list);
+
+ pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ " %d\n", se_cmd, se_cmd->t_state,
+ se_cmd->se_tfo->get_cmd_state(se_cmd));
+
++ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
++ tas = (se_cmd->transport_state & CMD_T_TAS);
++ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
++
++ if (!target_put_sess_cmd(se_cmd)) {
++ if (tas)
++ target_put_sess_cmd(se_cmd);
++ }
++
+ wait_for_completion(&se_cmd->cmd_wait_comp);
+ pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ " fabric state: %d\n", se_cmd, se_cmd->t_state,
+@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
+ wait_for_completion(&lun->lun_ref_comp);
+ }
+
+-/**
+- * transport_wait_for_tasks - wait for completion to occur
+- * @cmd: command to wait
+- *
+- * Called from frontend fabric context to wait for storage engine
+- * to pause and/or release frontend generated struct se_cmd.
+- */
+-bool transport_wait_for_tasks(struct se_cmd *cmd)
++static bool
++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
++ bool *aborted, bool *tas, unsigned long *flags)
++ __releases(&cmd->t_state_lock)
++ __acquires(&cmd->t_state_lock)
+ {
+- unsigned long flags;
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ assert_spin_locked(&cmd->t_state_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++
++ if (fabric_stop)
++ cmd->transport_state |= CMD_T_FABRIC_STOP;
++
++ if (cmd->transport_state & CMD_T_ABORTED)
++ *aborted = true;
++
++ if (cmd->transport_state & CMD_T_TAS)
++ *tas = true;
++
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+- }
+
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+- }
+
+- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ if (!(cmd->transport_state & CMD_T_ACTIVE))
++ return false;
++
++ if (fabric_stop && *aborted)
+ return false;
+- }
+
+ cmd->transport_state |= CMD_T_STOP;
+
+- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
+- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
++ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
++ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
++ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ wait_for_completion(&cmd->t_transport_stop_comp);
+
+- spin_lock_irqsave(&cmd->t_state_lock, flags);
++ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
+- cmd->tag);
++ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
++ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
++
++ return true;
++}
+
++/**
++ * transport_wait_for_tasks - wait for completion to occur
++ * @cmd: command to wait
++ *
++ * Called from frontend fabric context to wait for storage engine
++ * to pause and/or release frontend generated struct se_cmd.
++ */
++bool transport_wait_for_tasks(struct se_cmd *cmd)
++{
++ unsigned long flags;
++ bool ret, aborted = false, tas = false;
++
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+- return true;
++ return ret;
+ }
+ EXPORT_SYMBOL(transport_wait_for_tasks);
+
+@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ }
+ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++ __releases(&cmd->t_state_lock)
++ __acquires(&cmd->t_state_lock)
+ {
++ assert_spin_locked(&cmd->t_state_lock);
++ WARN_ON_ONCE(!irqs_disabled());
++
+ if (!(cmd->transport_state & CMD_T_ABORTED))
+ return 0;
+-
+ /*
+ * If cmd has been aborted but either no status is to be sent or it has
+ * already been sent, just return
+ */
+- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
++ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
++ if (send_status)
++ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ return 1;
++ }
+
+- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
+- cmd->t_task_cdb[0], cmd->tag);
++ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
++ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
+
+ cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ trace_target_cmd_complete(cmd);
++
++ spin_unlock_irq(&cmd->t_state_lock);
+ cmd->se_tfo->queue_status(cmd);
++ spin_lock_irq(&cmd->t_state_lock);
+
+ return 1;
+ }
++
++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++{
++ int ret;
++
++ spin_lock_irq(&cmd->t_state_lock);
++ ret = __transport_check_aborted_status(cmd, send_status);
++ spin_unlock_irq(&cmd->t_state_lock);
++
++ return ret;
++}
+ EXPORT_SYMBOL(transport_check_aborted_status);
+
+ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ */
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+- cmd->transport_state |= CMD_T_ABORTED;
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto send_abort;
++ }
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ }
++send_abort:
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+
+ transport_lun_remove_cmd(cmd);
+@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+ struct se_device *dev = cmd->se_dev;
+ struct se_tmr_req *tmr = cmd->se_tmr_req;
++ unsigned long flags;
+ int ret;
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->transport_state & CMD_T_ABORTED) {
++ tmr->response = TMR_FUNCTION_REJECTED;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto check_stop;
++ }
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ switch (tmr->function) {
+ case TMR_ABORT_TASK:
+ core_tmr_abort_task(dev, tmr, cmd->se_sess);
+@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
+ break;
+ }
+
++ spin_lock_irqsave(&cmd->t_state_lock, flags);
++ if (cmd->transport_state & CMD_T_ABORTED) {
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++ goto check_stop;
++ }
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
++ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ cmd->se_tfo->queue_tm_rsp(cmd);
+
++check_stop:
+ transport_cmd_check_stop_to_fabric(cmd);
+ }
+
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index 2f9f708..ea9366a 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ next_target = instance->target;
+ dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
+
++ if (!instance->initialized) {
++ if (throttle) {
++ next_target = (cur_state + 1) >= instance->upper ?
++ instance->upper :
++ ((cur_state + 1) < instance->lower ?
++ instance->lower : (cur_state + 1));
++ } else {
++ next_target = THERMAL_NO_TARGET;
++ }
++
++ return next_target;
++ }
++
+ switch (trend) {
+ case THERMAL_TREND_RAISING:
+ if (throttle) {
+@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ old_target, (int)instance->target);
+
+- if (old_target == instance->target)
++ if (instance->initialized && old_target == instance->target)
+ continue;
+
+ /* Activate a passive thermal instance */
+@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ instance->target == THERMAL_NO_TARGET)
+ update_passive_instance(tz, trip_type, -1);
+
+-
++ instance->initialized = true;
+ instance->cdev->updated = false; /* cdev needs update */
+ }
+
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index d9e525c..ba08b55 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -37,6 +37,7 @@
+ #include <linux/of.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
++#include <linux/suspend.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/thermal.h>
+@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
+ static DEFINE_MUTEX(thermal_list_lock);
+ static DEFINE_MUTEX(thermal_governor_lock);
+
++static atomic_t in_suspend;
++
+ static struct thermal_governor *def_governor;
+
+ static struct thermal_governor *__find_governor(const char *name)
+@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
+ mutex_unlock(&tz->lock);
+
+ trace_thermal_temperature(tz);
+- dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+- tz->last_temperature, tz->temperature);
++ if (tz->last_temperature == THERMAL_TEMP_INVALID)
++ dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
++ tz->temperature);
++ else
++ dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
++ tz->last_temperature, tz->temperature);
++}
++
++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++{
++ struct thermal_instance *pos;
++
++ tz->temperature = THERMAL_TEMP_INVALID;
++ tz->passive = 0;
++ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
++ pos->initialized = false;
+ }
+
+ void thermal_zone_device_update(struct thermal_zone_device *tz)
+ {
+ int count;
+
++ if (atomic_read(&in_suspend))
++ return;
++
+ if (!tz->ops->get_temp)
+ return;
+
+@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ if (!result) {
+ list_add_tail(&dev->tz_node, &tz->thermal_instances);
+ list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
++ atomic_set(&tz->need_update, 1);
+ }
+ mutex_unlock(&cdev->lock);
+ mutex_unlock(&tz->lock);
+@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
+ const struct thermal_cooling_device_ops *ops)
+ {
+ struct thermal_cooling_device *cdev;
++ struct thermal_zone_device *pos = NULL;
+ int result;
+
+ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
+ /* Update binding information for 'this' new cdev */
+ bind_cdev(cdev);
+
++ mutex_lock(&thermal_list_lock);
++ list_for_each_entry(pos, &thermal_tz_list, node)
++ if (atomic_cmpxchg(&pos->need_update, 1, 0))
++ thermal_zone_device_update(pos);
++ mutex_unlock(&thermal_list_lock);
++
+ return cdev;
+ }
+
+@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ tz->trips = trips;
+ tz->passive_delay = passive_delay;
+ tz->polling_delay = polling_delay;
++ /* A new thermal zone needs to be updated anyway. */
++ atomic_set(&tz->need_update, 1);
+
+ dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ result = device_register(&tz->device);
+@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+
+ INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+
+- thermal_zone_device_update(tz);
++ thermal_zone_device_reset(tz);
++ /* Update the new thermal zone and mark it as already updated. */
++ if (atomic_cmpxchg(&tz->need_update, 1, 0))
++ thermal_zone_device_update(tz);
+
+ return tz;
+
+@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
+ thermal_gov_power_allocator_unregister();
+ }
+
++static int thermal_pm_notify(struct notifier_block *nb,
++ unsigned long mode, void *_unused)
++{
++ struct thermal_zone_device *tz;
++
++ switch (mode) {
++ case PM_HIBERNATION_PREPARE:
++ case PM_RESTORE_PREPARE:
++ case PM_SUSPEND_PREPARE:
++ atomic_set(&in_suspend, 1);
++ break;
++ case PM_POST_HIBERNATION:
++ case PM_POST_RESTORE:
++ case PM_POST_SUSPEND:
++ atomic_set(&in_suspend, 0);
++ list_for_each_entry(tz, &thermal_tz_list, node) {
++ thermal_zone_device_reset(tz);
++ thermal_zone_device_update(tz);
++ }
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static struct notifier_block thermal_pm_nb = {
++ .notifier_call = thermal_pm_notify,
++};
++
+ static int __init thermal_init(void)
+ {
+ int result;
+@@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
+ if (result)
+ goto exit_netlink;
+
++ result = register_pm_notifier(&thermal_pm_nb);
++ if (result)
++ pr_warn("Thermal: Can not register suspend notifier, return %d\n",
++ result);
++
+ return 0;
+
+ exit_netlink:
+@@ -2179,6 +2247,7 @@ error:
+
+ static void __exit thermal_exit(void)
+ {
++ unregister_pm_notifier(&thermal_pm_nb);
+ of_thermal_destroy_zones();
+ genetlink_exit();
+ class_unregister(&thermal_class);
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index d7ac1fc..749d41a 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -41,6 +41,7 @@ struct thermal_instance {
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ int trip;
++ bool initialized;
+ unsigned long upper; /* Highest cooling state for this trip point */
+ unsigned long lower; /* Lowest cooling state for this trip point */
+ unsigned long target; /* expected cooling state */
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e4c70dc..fa4e239 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
+ },
+ #endif
+
++ /*Samsung phone in firmware update mode */
++ { USB_DEVICE(0x04e8, 0x685d),
++ .driver_info = IGNORE_DEVICE,
++ },
++
+ /* Exclude Infineon Flash Loader utility */
+ { USB_DEVICE(0x058b, 0x0041),
+ .driver_info = IGNORE_DEVICE,
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 36f1cb7..78be201 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -853,7 +853,6 @@ struct dwc3 {
+ unsigned pullups_connected:1;
+ unsigned resize_fifos:1;
+ unsigned setup_packet_pending:1;
+- unsigned start_config_issued:1;
+ unsigned three_stage_setup:1;
+ unsigned usb3_lpm_capable:1;
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 5320e93..b13912d 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ int ret;
+ u32 reg;
+
+- dwc->start_config_issued = false;
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (state) {
+@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
+ ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ break;
+- case USB_REQ_SET_INTERFACE:
+- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+- dwc->start_config_issued = false;
+- /* Fall through */
+ default:
+ dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a58376f..69ffe6e 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+ dep->trb_pool_dma = 0;
+ }
+
++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
++
++/**
++ * dwc3_gadget_start_config - Configure EP resources
++ * @dwc: pointer to our controller context structure
++ * @dep: endpoint that is being enabled
++ *
++ * The assignment of transfer resources cannot perfectly follow the
++ * data book due to the fact that the controller driver does not have
++ * all knowledge of the configuration in advance. It is given this
++ * information piecemeal by the composite gadget framework after every
++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
++ * programming model in this scenario can cause errors. For two
++ * reasons:
++ *
++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
++ * multiple interfaces.
++ *
++ * 2) The databook does not mention doing more DEPXFERCFG for new
++ * endpoint on alt setting (8.1.6).
++ *
++ * The following simplified method is used instead:
++ *
++ * All hardware endpoints can be assigned a transfer resource and this
++ * setting will stay persistent until either a core reset or
++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
++ * do DEPXFERCFG for every hardware endpoint as well. We are
++ * guaranteed that there are as many transfer resources as endpoints.
++ *
++ * This function is called for each endpoint when it is being enabled
++ * but is triggered only when called for EP0-out, which always happens
++ * first, and which should only happen in one of the above conditions.
++ */
+ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
++ int i;
++ int ret;
++
++ if (dep->number)
++ return 0;
+
+ memset(&params, 0x00, sizeof(params));
++ cmd = DWC3_DEPCMD_DEPSTARTCFG;
+
+- if (dep->number != 1) {
+- cmd = DWC3_DEPCMD_DEPSTARTCFG;
+- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
+- if (dep->number > 1) {
+- if (dwc->start_config_issued)
+- return 0;
+- dwc->start_config_issued = true;
+- cmd |= DWC3_DEPCMD_PARAM(2);
+- }
++ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++ if (ret)
++ return ret;
+
+- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
++ struct dwc3_ep *dep = dwc->eps[i];
++
++ if (!dep)
++ continue;
++
++ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
++ if (ret)
++ return ret;
+ }
+
+ return 0;
+@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ struct dwc3_trb *trb_st_hw;
+ struct dwc3_trb *trb_link;
+
+- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+- if (ret)
+- return ret;
+-
+ dep->endpoint.desc = desc;
+ dep->comp_desc = comp_desc;
+ dep->type = usb_endpoint_type(desc);
+@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
+ }
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+- dwc->start_config_issued = false;
+-
+ /* Start with SuperSpeed Default */
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc3_disconnect_gadget(dwc);
+- dwc->start_config_issued = false;
+
+ dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
+@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+
+ dwc3_stop_active_transfers(dwc);
+ dwc3_clear_stall_all_ep(dwc);
+- dwc->start_config_issued = false;
+
+ /* Reset device address to zero */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1dd9919..a7caf53 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index db86e51..8849439a 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450 0x0d45
+
+ #define ALINK_VENDOR_ID 0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300 0x9100
+ #define ALINK_PRODUCT_3GU 0x9200
+
+@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++ .reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2),
+@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 7efc329..7d3e5d0 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ */
+ if (vb->num_pfns != 0)
+ tell_host(vb, vb->deflate_vq);
+- mutex_unlock(&vb->balloon_lock);
+ release_pages_balloon(vb);
++ mutex_unlock(&vb->balloon_lock);
+ return num_freed_pages;
+ }
+
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index 78f804a..2046a68 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -545,6 +545,7 @@ err_enable_device:
+ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ {
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
++ struct device *dev = get_device(&vp_dev->vdev.dev);
+
+ unregister_virtio_device(&vp_dev->vdev);
+
+@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ virtio_pci_modern_remove(vp_dev);
+
+ pci_disable_device(pci_dev);
++ put_device(dev);
+ }
+
+ static struct pci_driver virtio_pci_driver = {
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 73dafdc..fb02214 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ /*
+ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
+ * to access the BARs where the MSI-X entries reside.
++ * But VF devices are unique in which the PF needs to be checked.
+ */
+- pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
+ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
+ return -ENXIO;
+
+@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
+ struct xen_pcibk_dev_data *dev_data = NULL;
+ struct xen_pci_op *op = &pdev->op;
+ int test_intx = 0;
++#ifdef CONFIG_PCI_MSI
++ unsigned int nr = 0;
++#endif
+
+ *op = pdev->sh_info->op;
+ barrier();
+@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_enable_msix:
++ nr = op->value;
+ op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_disable_msix:
+@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
+ unsigned int i;
+
+- for (i = 0; i < op->value; i++)
++ for (i = 0; i < nr; i++)
+ pdev->sh_info->op.msix_entries[i].vector =
+ op->msix_entries[i].vector;
+ }
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index ad4eb10..51387d7 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -939,12 +939,12 @@ out:
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+ out_free:
+- mutex_lock(&tpg->tv_tpg_mutex);
+- tpg->tv_tpg_fe_count--;
+- mutex_unlock(&tpg->tv_tpg_mutex);
+-
+- if (err)
++ if (err) {
++ mutex_lock(&tpg->tv_tpg_mutex);
++ tpg->tv_tpg_fe_count--;
++ mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(new);
++ }
+
+ return err;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 0ddca67..4958360 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
+ ret = get_anon_bdev(&root->anon_dev);
+ if (ret)
+ goto free_writers;
++
++ mutex_lock(&root->objectid_mutex);
++ ret = btrfs_find_highest_objectid(root,
++ &root->highest_objectid);
++ if (ret) {
++ mutex_unlock(&root->objectid_mutex);
++ goto free_root_dev;
++ }
++
++ ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++ mutex_unlock(&root->objectid_mutex);
++
+ return 0;
+
++free_root_dev:
++ free_anon_bdev(root->anon_dev);
+ free_writers:
+ btrfs_free_subvolume_writers(root->subv_writers);
+ fail:
+@@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb,
+ if (btrfs_check_super_csum(bh->b_data)) {
+ printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
+ err = -EINVAL;
++ brelse(bh);
+ goto fail_alloc;
+ }
+
+@@ -2899,6 +2915,18 @@ retry_root_backup:
+ tree_root->commit_root = btrfs_root_node(tree_root);
+ btrfs_set_root_refs(&tree_root->root_item, 1);
+
++ mutex_lock(&tree_root->objectid_mutex);
++ ret = btrfs_find_highest_objectid(tree_root,
++ &tree_root->highest_objectid);
++ if (ret) {
++ mutex_unlock(&tree_root->objectid_mutex);
++ goto recovery_tree_root;
++ }
++
++ ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++ mutex_unlock(&tree_root->objectid_mutex);
++
+ ret = btrfs_read_roots(fs_info, tree_root);
+ if (ret)
+ goto recovery_tree_root;
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 767a605..07573dc 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -515,7 +515,7 @@ out:
+ return ret;
+ }
+
+-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+ {
+ struct btrfs_path *path;
+ int ret;
+@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
+ int ret;
+ mutex_lock(&root->objectid_mutex);
+
+- if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
+- ret = btrfs_find_highest_objectid(root,
+- &root->highest_objectid);
+- if (ret)
+- goto out;
+- }
+-
+ if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ ret = -ENOSPC;
+ goto out;
+diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
+index ddb347b..c8e864b 100644
+--- a/fs/btrfs/inode-map.h
++++ b/fs/btrfs/inode-map.h
+@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans);
+
+ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
+
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 54b5f0d..52fc1b5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6493,7 +6493,7 @@ out_unlock_inode:
+ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+ {
+- struct btrfs_trans_handle *trans;
++ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct inode *inode = d_inode(old_dentry);
+ u64 index;
+@@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ trans = btrfs_start_transaction(root, 5);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
++ trans = NULL;
+ goto fail;
+ }
+
+@@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ btrfs_log_new_name(trans, inode, NULL, parent);
+ }
+
+- btrfs_end_transaction(trans, root);
+ btrfs_balance_delayed_items(root);
+ fail:
++ if (trans)
++ btrfs_end_transaction(trans, root);
+ if (drop_inode) {
+ inode_dec_link_count(inode);
+ iput(inode);
+@@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page)
+ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ struct extent_io_tree *tree;
+-
++ struct inode *inode = page->mapping->host;
++ int ret;
+
+ if (current->flags & PF_MEMALLOC) {
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+ }
++
++ /*
++ * If we are under memory pressure we will call this directly from the
++ * VM, we need to make sure we have the inode referenced for the ordered
++ * extent. If not just return like we didn't do anything.
++ */
++ if (!igrab(inode)) {
++ redirty_page_for_writepage(wbc, page);
++ return AOP_WRITEPAGE_ACTIVATE;
++ }
+ tree = &BTRFS_I(page->mapping->host)->io_tree;
+- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++ btrfs_add_delayed_iput(inode);
++ return ret;
+ }
+
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ /*
+ * 2 items for inode item and ref
+ * 2 items for dir items
++ * 1 item for updating parent inode item
++ * 1 item for the inline extent item
+ * 1 item for xattr if selinux is on
+ */
+- trans = btrfs_start_transaction(root, 5);
++ trans = btrfs_start_transaction(root, 7);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 08fd3f0..f07d01b 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
+ goto fail;
+ }
+
++ mutex_lock(&new_root->objectid_mutex);
++ new_root->highest_objectid = new_dirid;
++ mutex_unlock(&new_root->objectid_mutex);
++
+ /*
+ * insert the directory item
+ */
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 355a458..63a6152 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+- BUG_ON(ret);
++ if (ret) {
++ /*
++ * An empty symlink inode. Can happen in rare error paths when
++ * creating a symlink (transaction committed before the inode
++ * eviction handler removed the symlink inode items and a crash
++ * happened in between or the subvol was snapshoted in between).
++ * Print an informative message to dmesg/syslog so that the user
++ * can delete the symlink.
++ */
++ btrfs_err(root->fs_info,
++ "Found empty symlink inode %llu at root %llu",
++ ino, root->root_key.objectid);
++ ret = -EIO;
++ goto out;
++ }
+
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 24154e4..fe609b8 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+ * there are other factors that may change the result (like a new metadata
+ * chunk).
+ *
++ * If metadata is exhausted, f_bavail will be 0.
++ *
+ * FIXME: not accurate for mixed block groups, total and free/used are ok,
+ * available appears slightly larger.
+ */
+@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ struct btrfs_space_info *found;
+ u64 total_used = 0;
+ u64 total_free_data = 0;
++ u64 total_free_meta = 0;
+ int bits = dentry->d_sb->s_blocksize_bits;
+ __be32 *fsid = (__be32 *)fs_info->fsid;
+ unsigned factor = 1;
+ struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+ int ret;
++ u64 thresh = 0;
+
+ /*
+ * holding chunk_muext to avoid allocating new chunks, holding
+@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ }
+ }
+ }
++ if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
++ total_free_meta += found->disk_total - found->disk_used;
+
+ total_used += found->disk_used;
+ }
+@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_bavail += div_u64(total_free_data, factor);
+ buf->f_bavail = buf->f_bavail >> bits;
+
++ /*
++ * We calculate the remaining metadata space minus global reserve. If
++ * this is (supposedly) smaller than zero, there's no space. But this
++ * does not hold in practice, the exhausted state happens where's still
++ * some positive delta. So we apply some guesswork and compare the
++ * delta to a 4M threshold. (Practically observed delta was ~2M.)
++ *
++ * We probably cannot calculate the exact threshold value because this
++ * depends on the internal reservations requested by various
++ * operations, so some operations that consume a few metadata will
++ * succeed even if the Avail is zero. But this is better than the other
++ * way around.
++ */
++ thresh = 4 * 1024 * 1024;
++
++ if (total_free_meta - thresh < block_rsv->size)
++ buf->f_bavail = 0;
++
+ buf->f_type = BTRFS_SUPER_MAGIC;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
+ buf->f_namelen = BTRFS_NAME_LEN;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 9e08447..9c62a6f 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
+ spin_lock_init(&dev->reada_lock);
+ atomic_set(&dev->reada_in_flight, 0);
+ atomic_set(&dev->dev_stats_ccnt, 0);
++ btrfs_device_data_ordered_init(dev);
+ INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+ INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 602e844..01171d8 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
+ dio->io_error = -EIO;
+
+ if (dio->is_async && dio->rw == READ && dio->should_dirty) {
+- bio_check_pages_dirty(bio); /* transfers ownership */
+ err = bio->bi_error;
++ bio_check_pages_dirty(bio); /* transfers ownership */
+ } else {
+ bio_for_each_segment_all(bvec, bio, i) {
+ struct page *page = bvec->bv_page;
+diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
+index 90001da..66842e5 100644
+--- a/fs/efivarfs/file.c
++++ b/fs/efivarfs/file.c
+@@ -10,6 +10,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
++#include <linux/mount.h>
+
+ #include "internal.h"
+
+@@ -103,9 +104,78 @@ out_free:
+ return size;
+ }
+
++static int
++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
++{
++ struct inode *inode = file->f_mapping->host;
++ unsigned int i_flags;
++ unsigned int flags = 0;
++
++ i_flags = inode->i_flags;
++ if (i_flags & S_IMMUTABLE)
++ flags |= FS_IMMUTABLE_FL;
++
++ if (copy_to_user(arg, &flags, sizeof(flags)))
++ return -EFAULT;
++ return 0;
++}
++
++static int
++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
++{
++ struct inode *inode = file->f_mapping->host;
++ unsigned int flags;
++ unsigned int i_flags = 0;
++ int error;
++
++ if (!inode_owner_or_capable(inode))
++ return -EACCES;
++
++ if (copy_from_user(&flags, arg, sizeof(flags)))
++ return -EFAULT;
++
++ if (flags & ~FS_IMMUTABLE_FL)
++ return -EOPNOTSUPP;
++
++ if (!capable(CAP_LINUX_IMMUTABLE))
++ return -EPERM;
++
++ if (flags & FS_IMMUTABLE_FL)
++ i_flags |= S_IMMUTABLE;
++
++
++ error = mnt_want_write_file(file);
++ if (error)
++ return error;
++
++ mutex_lock(&inode->i_mutex);
++ inode_set_flags(inode, i_flags, S_IMMUTABLE);
++ mutex_unlock(&inode->i_mutex);
++
++ mnt_drop_write_file(file);
++
++ return 0;
++}
++
++long
++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
++{
++ void __user *arg = (void __user *)p;
++
++ switch (cmd) {
++ case FS_IOC_GETFLAGS:
++ return efivarfs_ioc_getxflags(file, arg);
++ case FS_IOC_SETFLAGS:
++ return efivarfs_ioc_setxflags(file, arg);
++ }
++
++ return -ENOTTY;
++}
++
+ const struct file_operations efivarfs_file_operations = {
+ .open = simple_open,
+ .read = efivarfs_file_read,
+ .write = efivarfs_file_write,
+ .llseek = no_llseek,
++ .unlocked_ioctl = efivarfs_file_ioctl,
+ };
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 3381b9d..e2ab6d0 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -15,7 +15,8 @@
+ #include "internal.h"
+
+ struct inode *efivarfs_get_inode(struct super_block *sb,
+- const struct inode *dir, int mode, dev_t dev)
++ const struct inode *dir, int mode,
++ dev_t dev, bool is_removable)
+ {
+ struct inode *inode = new_inode(sb);
+
+@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
++ inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_fop = &efivarfs_file_operations;
+@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+ {
+- struct inode *inode;
++ struct inode *inode = NULL;
+ struct efivar_entry *var;
+ int namelen, i = 0, err = 0;
++ bool is_removable = false;
+
+ if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
+ return -EINVAL;
+
+- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+- if (!inode)
+- return -ENOMEM;
+-
+ var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+- if (!var) {
+- err = -ENOMEM;
+- goto out;
+- }
++ if (!var)
++ return -ENOMEM;
+
+ /* length of the variable name itself: remove GUID and separator */
+ namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
+@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ &var->var.VendorGuid);
+
++ if (efivar_variable_is_removable(var->var.VendorGuid,
++ dentry->d_name.name, namelen))
++ is_removable = true;
++
++ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
++ if (!inode) {
++ err = -ENOMEM;
++ goto out;
++ }
++
+ for (i = 0; i < namelen; i++)
+ var->var.VariableName[i] = dentry->d_name.name[i];
+
+@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ out:
+ if (err) {
+ kfree(var);
+- iput(inode);
++ if (inode)
++ iput(inode);
+ }
+ return err;
+ }
+diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
+index b5ff16a..b450518 100644
+--- a/fs/efivarfs/internal.h
++++ b/fs/efivarfs/internal.h
+@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
+ extern const struct inode_operations efivarfs_dir_inode_operations;
+ extern bool efivarfs_valid_name(const char *str, int len);
+ extern struct inode *efivarfs_get_inode(struct super_block *sb,
+- const struct inode *dir, int mode, dev_t dev);
++ const struct inode *dir, int mode, dev_t dev,
++ bool is_removable);
+
+ extern struct list_head efivarfs_list;
+
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 86a2121..abb244b 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ struct dentry *dentry, *root = sb->s_root;
+ unsigned long size = 0;
+ char *name;
+- int len, i;
++ int len;
+ int err = -ENOMEM;
++ bool is_removable = false;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ memcpy(entry->var.VariableName, name16, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+- len = ucs2_strlen(entry->var.VariableName);
++ len = ucs2_utf8size(entry->var.VariableName);
+
+ /* name, plus '-', plus GUID, plus NUL*/
+ name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ if (!name)
+ goto fail;
+
+- for (i = 0; i < len; i++)
+- name[i] = entry->var.VariableName[i] & 0xFF;
++ ucs2_as_utf8(name, entry->var.VariableName, len);
++
++ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
++ is_removable = true;
+
+ name[len] = '-';
+
+@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+
+ name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+
+- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
++ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
++ is_removable);
+ if (!inode)
+ goto fail_name;
+
+@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_d_op = &efivarfs_d_ops;
+ sb->s_time_gran = 1;
+
+- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
++ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_op = &efivarfs_dir_inode_operations;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ea433a7..06bda03 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -657,6 +657,34 @@ has_zeroout:
+ return retval;
+ }
+
++/*
++ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
++ * we have to be careful as someone else may be manipulating b_state as well.
++ */
++static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
++{
++ unsigned long old_state;
++ unsigned long new_state;
++
++ flags &= EXT4_MAP_FLAGS;
++
++ /* Dummy buffer_head? Set non-atomically. */
++ if (!bh->b_page) {
++ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
++ return;
++ }
++ /*
++ * Someone else may be modifying b_state. Be careful! This is ugly but
++ * once we get rid of using bh as a container for mapping information
++ * to pass to / from get_block functions, this can go away.
++ */
++ do {
++ old_state = READ_ONCE(bh->b_state);
++ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
++ } while (unlikely(
++ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
++}
++
+ /* Maximum number of blocks we map for direct IO at once. */
+ #define DIO_MAX_BLOCKS 4096
+
+@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
+ ext4_io_end_t *io_end = ext4_inode_aio(inode);
+
+ map_bh(bh, inode->i_sb, map.m_pblk);
+- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++ ext4_update_bh_state(bh, map.m_flags);
+ if (IS_DAX(inode) && buffer_unwritten(bh)) {
+ /*
+ * dgc: I suspect unwritten conversion on ext4+DAX is
+@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+ return ret;
+
+ map_bh(bh, inode->i_sb, map.m_pblk);
+- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++ ext4_update_bh_state(bh, map.m_flags);
+
+ if (buffer_unwritten(bh)) {
+ /* A delayed write to unwritten bh should be marked
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 023f6a1..e5232bb 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ struct inode_switch_wbs_context *isw =
+ container_of(work, struct inode_switch_wbs_context, work);
+ struct inode *inode = isw->inode;
++ struct super_block *sb = inode->i_sb;
+ struct address_space *mapping = inode->i_mapping;
+ struct bdi_writeback *old_wb = inode->i_wb;
+ struct bdi_writeback *new_wb = isw->new_wb;
+@@ -423,6 +424,7 @@ skip_switch:
+ wb_put(new_wb);
+
+ iput(inode);
++ deactivate_super(sb);
+ kfree(isw);
+ }
+
+@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+
+ /* while holding I_WB_SWITCH, no one else can update the association */
+ spin_lock(&inode->i_lock);
++
+ if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+- inode_to_wb(inode) == isw->new_wb) {
+- spin_unlock(&inode->i_lock);
+- goto out_free;
+- }
++ inode_to_wb(inode) == isw->new_wb)
++ goto out_unlock;
++
++ if (!atomic_inc_not_zero(&inode->i_sb->s_active))
++ goto out_unlock;
++
+ inode->i_state |= I_WB_SWITCH;
+ spin_unlock(&inode->i_lock);
+
+@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
+ return;
+
++out_unlock:
++ spin_unlock(&inode->i_lock);
+ out_free:
+ if (isw->new_wb)
+ wb_put(isw->new_wb);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 2ac99db..5a7b322 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+
+ init_special_inode(inode, mode, dev);
+ err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+- if (!err)
++ if (err)
+ goto out_free;
+
+ err = read_name(inode, name);
+ __putname(name);
+ if (err)
+ goto out_put;
+- if (err)
+- goto out_put;
+
+ d_instantiate(dentry, inode);
+ return 0;
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index ae4d5a1..bffb908 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
+ struct inode *inode = d_inode(dentry);
+ dnode_secno dno;
+ int r;
+- int rep = 0;
+ int err;
+
+ hpfs_lock(dir->i_sb);
+ hpfs_adjust_length(name, &len);
+-again:
++
+ err = -ENOENT;
+ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
+ if (!de)
+@@ -400,33 +399,9 @@ again:
+ hpfs_error(dir->i_sb, "there was error when removing dirent");
+ err = -EFSERROR;
+ break;
+- case 2: /* no space for deleting, try to truncate file */
+-
++ case 2: /* no space for deleting */
+ err = -ENOSPC;
+- if (rep++)
+- break;
+-
+- dentry_unhash(dentry);
+- if (!d_unhashed(dentry)) {
+- hpfs_unlock(dir->i_sb);
+- return -ENOSPC;
+- }
+- if (generic_permission(inode, MAY_WRITE) ||
+- !S_ISREG(inode->i_mode) ||
+- get_write_access(inode)) {
+- d_rehash(dentry);
+- } else {
+- struct iattr newattrs;
+- /*pr_info("truncating file before delete.\n");*/
+- newattrs.ia_size = 0;
+- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+- err = notify_change(dentry, &newattrs, NULL);
+- put_write_access(inode);
+- if (!err)
+- goto again;
+- }
+- hpfs_unlock(dir->i_sb);
+- return -ENOSPC;
++ break;
+ default:
+ drop_nlink(inode);
+ err = 0;
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d2b326..6333263 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ goto out;
+ }
+
+-again:
+ error = flock_to_posix_lock(filp, file_lock, &flock);
+ if (error)
+ goto out;
+@@ -2224,19 +2223,22 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- /*
+- * we need that spin_lock here - it prevents reordering between
+- * update of i_flctx->flc_posix and check for it done in close().
+- * rcu_read_lock() wouldn't do.
+- */
+- spin_lock(&current->files->file_lock);
+- f = fcheck(fd);
+- spin_unlock(&current->files->file_lock);
+- if (!error && f != filp && flock.l_type != F_UNLCK) {
+- flock.l_type = F_UNLCK;
+- goto again;
++ if (!error && file_lock->fl_type != F_UNLCK) {
++ /*
++ * We need that spin_lock here - it prevents reordering between
++ * update of i_flctx->flc_posix and check for it done in
++ * close(). rcu_read_lock() wouldn't do.
++ */
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (f != filp) {
++ file_lock->fl_type = F_UNLCK;
++ error = do_lock_file_wait(filp, cmd, file_lock);
++ WARN_ON_ONCE(error);
++ error = -EBADF;
++ }
+ }
+-
+ out:
+ locks_free_lock(file_lock);
+ return error;
+@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ goto out;
+ }
+
+-again:
+ error = flock64_to_posix_lock(filp, file_lock, &flock);
+ if (error)
+ goto out;
+@@ -2364,14 +2365,22 @@ again:
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+- spin_lock(&current->files->file_lock);
+- f = fcheck(fd);
+- spin_unlock(&current->files->file_lock);
+- if (!error && f != filp && flock.l_type != F_UNLCK) {
+- flock.l_type = F_UNLCK;
+- goto again;
++ if (!error && file_lock->fl_type != F_UNLCK) {
++ /*
++ * We need that spin_lock here - it prevents reordering between
++ * update of i_flctx->flc_posix and check for it done in
++ * close(). rcu_read_lock() wouldn't do.
++ */
++ spin_lock(&current->files->file_lock);
++ f = fcheck(fd);
++ spin_unlock(&current->files->file_lock);
++ if (f != filp) {
++ file_lock->fl_type = F_UNLCK;
++ error = do_lock_file_wait(filp, cmd, file_lock);
++ WARN_ON_ONCE(error);
++ error = -EBADF;
++ }
+ }
+-
+ out:
+ locks_free_lock(file_lock);
+ return error;
+diff --git a/fs/namei.c b/fs/namei.c
+index 0c3974c..d8ee4da 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
+ return 0;
+ if (!follow)
+ return 0;
++ /* make sure that d_is_symlink above matches inode */
++ if (nd->flags & LOOKUP_RCU) {
++ if (read_seqcount_retry(&link->dentry->d_seq, seq))
++ return -ECHILD;
++ }
+ return pick_link(nd, link, inode, seq);
+ }
+
+@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
+ if (err < 0)
+ return err;
+
+- inode = d_backing_inode(path.dentry);
+ seq = 0; /* we are already out of RCU mode */
+ err = -ENOENT;
+ if (d_is_negative(path.dentry))
+ goto out_path_put;
++ inode = d_backing_inode(path.dentry);
+ }
+
+ if (flags & WALK_PUT)
+@@ -3130,12 +3135,12 @@ retry_lookup:
+ return error;
+
+ BUG_ON(nd->flags & LOOKUP_RCU);
+- inode = d_backing_inode(path.dentry);
+ seq = 0; /* out of RCU mode, so the value doesn't matter */
+ if (unlikely(d_is_negative(path.dentry))) {
+ path_to_nameidata(&path, nd);
+ return -ENOENT;
+ }
++ inode = d_backing_inode(path.dentry);
+ finish_lookup:
+ if (nd->depth)
+ put_link(nd);
+@@ -3144,11 +3149,6 @@ finish_lookup:
+ if (unlikely(error))
+ return error;
+
+- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
+- path_to_nameidata(&path, nd);
+- return -ELOOP;
+- }
+-
+ if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
+ path_to_nameidata(&path, nd);
+ } else {
+@@ -3167,6 +3167,10 @@ finish_open:
+ return error;
+ }
+ audit_inode(nd->name, nd->path.dentry, 0);
++ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
++ error = -ELOOP;
++ goto out;
++ }
+ error = -EISDIR;
+ if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
+ goto out;
+@@ -3210,6 +3214,10 @@ opened:
+ goto exit_fput;
+ }
+ out:
++ if (unlikely(error > 0)) {
++ WARN_ON(1);
++ error = -EINVAL;
++ }
+ if (got_write)
+ mnt_drop_write(nd->path.mnt);
+ path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index f496ed7..98a4415 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ dentry = d_add_unique(dentry, igrab(state->inode));
+ if (dentry == NULL) {
+ dentry = opendata->dentry;
+- } else if (dentry != ctx->dentry) {
++ } else {
+ dput(ctx->dentry);
+- ctx->dentry = dget(dentry);
++ ctx->dentry = dentry;
+ }
+ nfs_set_verifier(dentry,
+ nfs_save_change_attribute(d_inode(opendata->dir)));
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 7f60472..e6795c7 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -956,6 +956,7 @@ clean_orphan:
+ tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
+ update_isize, end);
+ if (tmp_ret < 0) {
++ ocfs2_inode_unlock(inode, 1);
+ ret = tmp_ret;
+ mlog_errno(ret);
+ brelse(di_bh);
+diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
+index 0419485..0f1c6f3 100644
+--- a/include/asm-generic/cputime_nsecs.h
++++ b/include/asm-generic/cputime_nsecs.h
+@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
+ */
+ static inline cputime_t timespec_to_cputime(const struct timespec *val)
+ {
+- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+ */
+ static inline cputime_t timeval_to_cputime(const struct timeval *val)
+ {
+- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
++ val->tv_usec * NSEC_PER_USEC;
+ return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 7bfb063..461a055 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -35,4 +35,13 @@
+
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+
++static inline bool drm_arch_can_wc_memory(void)
++{
++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++ return false;
++#else
++ return true;
++#endif
++}
++
+ #endif
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index 5340099..f356f97 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
+ /**
+ * struct drm_dp_mst_port - MST port
+ * @kref: reference count for this port.
+- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+- * @guid: guid for DP 1.2 device on this port.
+ * @port_num: port number
+ * @input: if this port is an input port.
+ * @mcs: message capability status - DP 1.2 spec.
+@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
+ struct drm_dp_mst_port {
+ struct kref kref;
+
+- /* if dpcd 1.2 device is on this port - its GUID info */
+- bool guid_valid;
+- u8 guid[16];
+-
+ u8 port_num;
+ bool input;
+ bool mcs;
+@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
+ * @tx_slots: transmission slots for this device.
+ * @last_seqno: last sequence number used to talk to this.
+ * @link_address_sent: if a link address message has been sent to this device yet.
++ * @guid: guid for DP 1.2 branch device. port under this branch can be
++ * identified by port #.
+ *
+ * This structure represents an MST branch device, there is one
+- * primary branch device at the root, along with any others connected
+- * to downstream ports
++ * primary branch device at the root, along with any other branches connected
++ * to downstream port of parent branches.
+ */
+ struct drm_dp_mst_branch {
+ struct kref kref;
+@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
+ struct drm_dp_sideband_msg_tx *tx_slots[2];
+ int last_seqno;
+ bool link_address_sent;
++
++ /* global unique identifier to identify branch devices */
++ u8 guid[16];
+ };
+
+
+@@ -405,11 +404,9 @@ struct drm_dp_payload {
+ * @conn_base_id: DRM connector ID this mgr is connected to.
+ * @down_rep_recv: msg receiver state for down replies.
+ * @up_req_recv: msg receiver state for up requests.
+- * @lock: protects mst state, primary, guid, dpcd.
++ * @lock: protects mst state, primary, dpcd.
+ * @mst_state: if this manager is enabled for an MST capable port.
+ * @mst_primary: pointer to the primary branch device.
+- * @guid_valid: GUID valid for the primary branch device.
+- * @guid: GUID for primary port.
+ * @dpcd: cache of DPCD for primary port.
+ * @pbn_div: PBN to slots divisor.
+ *
+@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
+ struct drm_dp_sideband_msg_rx up_req_recv;
+
+ /* pointer to info about the initial MST device */
+- struct mutex lock; /* protects mst_state + primary + guid + dpcd */
++ struct mutex lock; /* protects mst_state + primary + dpcd */
+
+ bool mst_state;
+ struct drm_dp_mst_branch *mst_primary;
+- /* primary MST device GUID */
+- bool guid_valid;
+- u8 guid[16];
++
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 sink_count;
+ int pbn_div;
+@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
+ the mstb tx_slots and txmsg->state once they are queued */
+ struct mutex qlock;
+ struct list_head tx_msg_downq;
+- struct list_head tx_msg_upq;
+ bool tx_down_in_progress;
+- bool tx_up_in_progress;
+
+ /* payload info + lock for it */
+ struct mutex payload_lock;
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index d639049..553210c 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
++#define DRM_FIXED_EPSILON 1LL
++#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+
+ static inline s64 drm_int2fixp(int a)
+ {
+ return ((s64)a) << DRM_FIXED_POINT;
+ }
+
+-static inline int drm_fixp2int(int64_t a)
++static inline int drm_fixp2int(s64 a)
+ {
+ return ((s64)a) >> DRM_FIXED_POINT;
+ }
+
+-static inline unsigned drm_fixp_msbset(int64_t a)
++static inline int drm_fixp2int_ceil(s64 a)
++{
++ if (a > 0)
++ return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
++ else
++ return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
++}
++
++static inline unsigned drm_fixp_msbset(s64 a)
+ {
+ unsigned shift, sign = (a >> 63) & 1;
+
+@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
+ return result;
+ }
+
++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
++{
++ s64 res;
++ bool a_neg = a < 0;
++ bool b_neg = b < 0;
++ u64 a_abs = a_neg ? -a : a;
++ u64 b_abs = b_neg ? -b : b;
++ u64 rem;
++
++ /* determine integer part */
++ u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
++
++ /* determine fractional part */
++ {
++ u32 i = DRM_FIXED_POINT;
++
++ do {
++ rem <<= 1;
++ res_abs <<= 1;
++ if (rem >= b_abs) {
++ res_abs |= 1;
++ rem -= b_abs;
++ }
++ } while (--i != 0);
++ }
++
++ /* round up LSB */
++ {
++ u64 summand = (rem << 1) >= b_abs;
++
++ res_abs += summand;
++ }
++
++ res = (s64) res_abs;
++ if (a_neg ^ b_neg)
++ res = -res;
++ return res;
++}
++
+ static inline s64 drm_fixp_exp(s64 x)
+ {
+ s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index 71b1d6c..8dbd787 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -220,6 +220,7 @@ struct ceph_connection {
+ struct ceph_entity_addr actual_peer_addr;
+
+ /* message out temps */
++ struct ceph_msg_header out_hdr;
+ struct ceph_msg *out_msg; /* sending message (== tail of
+ out_sent) */
+ bool out_msg_done;
+@@ -229,7 +230,6 @@ struct ceph_connection {
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+- bool out_kvec_is_msg; /* kvec refers to out_msg */
+ int out_more; /* there is more data after the kvecs */
+ __le64 out_temp_ack; /* for writing an ack */
+ struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 06b77f9d..8e30fae 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
+ */
+ u64 serial_nr;
+
++ /*
++ * Incremented by online self and children. Used to guarantee that
++ * parents are not offlined before their children.
++ */
++ atomic_t online_cnt;
++
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 85a868c..fea160e 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ task_unlock(current);
+ }
+
++extern void cpuset_post_attach_flush(void);
++
+ #else /* !CONFIG_CPUSETS */
+
+ static inline bool cpusets_enabled(void) { return false; }
+@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ return false;
+ }
+
++static inline void cpuset_post_attach_flush(void)
++{
++}
++
+ #endif /* !CONFIG_CPUSETS */
+
+ #endif /* _LINUX_CPUSET_H */
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 569b5a8..47be3ad 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ struct list_head *head, bool remove);
+
+-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++ unsigned long data_size);
++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
++ size_t len);
+
+ extern struct work_struct efivar_work;
+ void efivar_run_worker(void);
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 8fdc17b..ae6a711 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
+ struct hv_input_signal_event event;
+ };
+
++enum hv_signal_policy {
++ HV_SIGNAL_POLICY_DEFAULT = 0,
++ HV_SIGNAL_POLICY_EXPLICIT,
++};
++
+ struct vmbus_channel {
+ /* Unique channel id */
+ int id;
+@@ -757,8 +762,21 @@ struct vmbus_channel {
+ * link up channels based on their CPU affinity.
+ */
+ struct list_head percpu_list;
++ /*
++ * Host signaling policy: The default policy will be
++ * based on the ring buffer state. We will also support
++ * a policy where the client driver can have explicit
++ * signaling control.
++ */
++ enum hv_signal_policy signal_policy;
+ };
+
++static inline void set_channel_signal_state(struct vmbus_channel *c,
++ enum hv_signal_policy policy)
++{
++ c->signal_policy = policy;
++}
++
+ static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+ {
+ c->batched_reading = state;
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index c0e9614..5455b66 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+- if (size > (__u64) OFFSET_MAX - 1)
+- return OFFSET_MAX - 1;
+- return (loff_t) size;
++ return min_t(u64, size, OFFSET_MAX);
+ }
+
+ static inline ino_t
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 50777b5..92d112a 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -15,10 +15,7 @@ struct shmem_inode_info {
+ unsigned int seals; /* shmem seals */
+ unsigned long flags;
+ unsigned long alloced; /* data pages alloced to file */
+- union {
+- unsigned long swapped; /* subtotal assigned to swap */
+- char *symlink; /* unswappable short symlink */
+- };
++ unsigned long swapped; /* subtotal assigned to swap */
+ struct shared_policy policy; /* NUMA memory alloc policy */
+ struct list_head swaplist; /* chain of maybes on swap */
+ struct simple_xattrs xattrs; /* list of xattrs */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 9147f9f..75f136a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -219,6 +219,7 @@ struct sk_buff;
+ #else
+ #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+ #endif
++extern int sysctl_max_skb_frags;
+
+ typedef struct skb_frag_struct skb_frag_t;
+
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 613c29b..e13a1ac 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -43,6 +43,9 @@
+ /* Default weight of a bound cooling device */
+ #define THERMAL_WEIGHT_DEFAULT 0
+
++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
++#define THERMAL_TEMP_INVALID -274000
++
+ /* Unit conversion macros */
+ #define DECI_KELVIN_TO_CELSIUS(t) ({ \
+ long _t = (t); \
+@@ -167,6 +170,7 @@ struct thermal_attr {
+ * @forced_passive: If > 0, temperature at which to switch on all ACPI
+ * processor cooling devices. Currently only used by the
+ * step-wise governor.
++ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
+ * @ops: operations this &thermal_zone_device supports
+ * @tzp: thermal zone parameters
+ * @governor: pointer to the governor for this thermal zone
+@@ -194,6 +198,7 @@ struct thermal_zone_device {
+ int emul_temperature;
+ int passive;
+ unsigned int forced_passive;
++ atomic_t need_update;
+ struct thermal_zone_device_ops *ops;
+ struct thermal_zone_params *tzp;
+ struct thermal_governor *governor;
+diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
+index cbb20af..bb679b4 100644
+--- a/include/linux/ucs2_string.h
++++ b/include/linux/ucs2_string.h
+@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
+ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+
++unsigned long ucs2_utf8size(const ucs2_char_t *src);
++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
++ unsigned long maxlength);
++
+ #endif /* _LINUX_UCS2_STRING_H_ */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 2a91a05..9b4c418 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -6,8 +6,8 @@
+ #include <linux/mutex.h>
+ #include <net/sock.h>
+
+-void unix_inflight(struct file *fp);
+-void unix_notinflight(struct file *fp);
++void unix_inflight(struct user_struct *user, struct file *fp);
++void unix_notinflight(struct user_struct *user, struct file *fp);
+ void unix_gc(void);
+ void wait_for_unix_gc(void);
+ struct sock *unix_get_socket(struct file *filp);
+diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
+index 6816f0f..30a56ab 100644
+--- a/include/net/dst_metadata.h
++++ b/include/net/dst_metadata.h
+@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
+ return dst && !(dst->flags & DST_METADATA);
+ }
+
++static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
++ const struct sk_buff *skb_b)
++{
++ const struct metadata_dst *a, *b;
++
++ if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
++ return 0;
++
++ a = (const struct metadata_dst *) skb_dst(skb_a);
++ b = (const struct metadata_dst *) skb_dst(skb_b);
++
++ if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
++ return 1;
++
++ return memcmp(&a->u.tun_info, &b->u.tun_info,
++ sizeof(a->u.tun_info) + a->u.tun_info.options_len);
++}
++
+ struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+ struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 481fe1c..49dcad4 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ struct sock *newsk,
+ const struct request_sock *req);
+
+-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+- struct sock *child);
++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
++ struct request_sock *req,
++ struct sock *child);
+ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ unsigned long timeout);
+ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 877f682..295d291 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
+
+ void ip6_route_input(struct sk_buff *skb);
+
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+- struct flowi6 *fl6);
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++ struct flowi6 *fl6, int flags);
++
++static inline struct dst_entry *ip6_route_output(struct net *net,
++ const struct sock *sk,
++ struct flowi6 *fl6)
++{
++ return ip6_route_output_flags(net, sk, fl6, 0);
++}
++
+ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ int flags);
+
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 9f4df68..3f98233 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -61,6 +61,7 @@ struct fib_nh_exception {
+ struct rtable __rcu *fnhe_rth_input;
+ struct rtable __rcu *fnhe_rth_output;
+ unsigned long fnhe_stamp;
++ struct rcu_head rcu;
+ };
+
+ struct fnhe_hash_bucket {
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 262532d..59fa93c 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -21,6 +21,7 @@ struct scm_creds {
+ struct scm_fp_list {
+ short count;
+ short max;
++ struct user_struct *user;
+ struct file *fp[SCM_MAX_FD];
+ };
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index f80e74c..414d822 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+
+ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+ void tcp_v4_mtu_reduced(struct sock *sk);
+-void tcp_req_err(struct sock *sk, u32 seq);
++void tcp_req_err(struct sock *sk, u32 seq, bool abort);
+ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+ struct sock *tcp_create_openreq_child(const struct sock *sk,
+ struct request_sock *req,
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index aabf0ac..689f4d2 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
+ SCF_COMPARE_AND_WRITE = 0x00080000,
+ SCF_COMPARE_AND_WRITE_POST = 0x00100000,
+ SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
++ SCF_ACK_KREF = 0x00400000,
+ };
+
+ /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+@@ -490,6 +491,8 @@ struct se_cmd {
+ #define CMD_T_DEV_ACTIVE (1 << 7)
+ #define CMD_T_REQUEST_STOP (1 << 8)
+ #define CMD_T_BUSY (1 << 9)
++#define CMD_T_TAS (1 << 10)
++#define CMD_T_FABRIC_STOP (1 << 11)
+ spinlock_t t_state_lock;
+ struct kref cmd_kref;
+ struct completion t_transport_stop_comp;
+diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
+index c2e5d6c..ebd10e6 100644
+--- a/include/uapi/linux/Kbuild
++++ b/include/uapi/linux/Kbuild
+@@ -307,7 +307,7 @@ header-y += nfs_mount.h
+ header-y += nl80211.h
+ header-y += n_r3964.h
+ header-y += nubus.h
+-header-y += nvme.h
++header-y += nvme_ioctl.h
+ header-y += nvram.h
+ header-y += omap3isp.h
+ header-y += omapfb.h
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d1d3e8f..2e7f7ab 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
+ /* adjust offset of jmps if necessary */
+ if (i < pos && i + insn->off + 1 > pos)
+ insn->off += delta;
+- else if (i > pos && i + insn->off + 1 < pos)
++ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+ insn->off -= delta;
+ }
+ }
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 470f653..fb1ecfd 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -57,7 +57,7 @@
+ #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
+ #include <linux/kthread.h>
+ #include <linux/delay.h>
+-
++#include <linux/cpuset.h>
+ #include <linux/atomic.h>
+
+ /*
+@@ -2764,6 +2764,7 @@ out_unlock_rcu:
+ out_unlock_threadgroup:
+ percpu_up_write(&cgroup_threadgroup_rwsem);
+ cgroup_kn_unlock(of->kn);
++ cpuset_post_attach_flush();
+ return ret ?: nbytes;
+ }
+
+@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
+ INIT_LIST_HEAD(&css->sibling);
+ INIT_LIST_HEAD(&css->children);
+ css->serial_nr = css_serial_nr_next++;
++ atomic_set(&css->online_cnt, 0);
+
+ if (cgroup_parent(cgrp)) {
+ css->parent = cgroup_css(cgroup_parent(cgrp), ss);
+@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
+ if (!ret) {
+ css->flags |= CSS_ONLINE;
+ rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
++
++ atomic_inc(&css->online_cnt);
++ if (css->parent)
++ atomic_inc(&css->parent->online_cnt);
+ }
+ return ret;
+ }
+@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+
+ mutex_lock(&cgroup_mutex);
+- offline_css(css);
+- mutex_unlock(&cgroup_mutex);
+
+- css_put(css);
++ do {
++ offline_css(css);
++ css_put(css);
++ /* @css can't go away while we're holding cgroup_mutex */
++ css = css->parent;
++ } while (css && atomic_dec_and_test(&css->online_cnt));
++
++ mutex_unlock(&cgroup_mutex);
+ }
+
+ /* css kill confirmation processing requires process context, bounce */
+@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+ struct cgroup_subsys_state *css =
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+- INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ if (atomic_dec_and_test(&css->online_cnt)) {
++ INIT_WORK(&css->destroy_work, css_killed_work_fn);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
++ }
+ }
+
+ /**
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 02a8ea5..2ade632 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
+ static DEFINE_MUTEX(cpuset_mutex);
+ static DEFINE_SPINLOCK(callback_lock);
+
++static struct workqueue_struct *cpuset_migrate_mm_wq;
++
+ /*
+ * CPU / memory hotplug is handled asynchronously.
+ */
+@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ }
+
+ /*
+- * cpuset_migrate_mm
+- *
+- * Migrate memory region from one set of nodes to another.
+- *
+- * Temporarilly set tasks mems_allowed to target nodes of migration,
+- * so that the migration code can allocate pages on these nodes.
+- *
+- * While the mm_struct we are migrating is typically from some
+- * other task, the task_struct mems_allowed that we are hacking
+- * is for our current task, which must allocate new pages for that
+- * migrating memory region.
++ * Migrate memory region from one set of nodes to another. This is
++ * performed asynchronously as it can be called from process migration path
++ * holding locks involved in process management. All mm migrations are
++ * performed in the queued order and can be waited for by flushing
++ * cpuset_migrate_mm_wq.
+ */
+
++struct cpuset_migrate_mm_work {
++ struct work_struct work;
++ struct mm_struct *mm;
++ nodemask_t from;
++ nodemask_t to;
++};
++
++static void cpuset_migrate_mm_workfn(struct work_struct *work)
++{
++ struct cpuset_migrate_mm_work *mwork =
++ container_of(work, struct cpuset_migrate_mm_work, work);
++
++ /* on a wq worker, no need to worry about %current's mems_allowed */
++ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
++ mmput(mwork->mm);
++ kfree(mwork);
++}
++
+ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to)
+ {
+- struct task_struct *tsk = current;
+-
+- tsk->mems_allowed = *to;
++ struct cpuset_migrate_mm_work *mwork;
+
+- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
++ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
++ if (mwork) {
++ mwork->mm = mm;
++ mwork->from = *from;
++ mwork->to = *to;
++ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
++ queue_work(cpuset_migrate_mm_wq, &mwork->work);
++ } else {
++ mmput(mm);
++ }
++}
+
+- rcu_read_lock();
+- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
+- rcu_read_unlock();
++void cpuset_post_attach_flush(void)
++{
++ flush_workqueue(cpuset_migrate_mm_wq);
+ }
+
+ /*
+@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
+ mpol_rebind_mm(mm, &cs->mems_allowed);
+ if (migrate)
+ cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
+- mmput(mm);
++ else
++ mmput(mm);
+ }
+ css_task_iter_end(&it);
+
+@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ * @old_mems_allowed is the right nodesets that we
+ * migrate mm from.
+ */
+- if (is_memory_migrate(cs)) {
++ if (is_memory_migrate(cs))
+ cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
+ &cpuset_attach_nodemask_to);
+- }
+- mmput(mm);
++ else
++ mmput(mm);
+ }
+ }
+
+@@ -1710,6 +1733,7 @@ out_unlock:
+ mutex_unlock(&cpuset_mutex);
+ kernfs_unbreak_active_protection(of->kn);
+ css_put(&cs->css);
++ flush_workqueue(cpuset_migrate_mm_wq);
+ return retval ?: nbytes;
+ }
+
+@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
+ top_cpuset.effective_mems = node_states[N_MEMORY];
+
+ register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
++
++ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
++ BUG_ON(!cpuset_migrate_mm_wq);
+ }
+
+ /**
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index a302cf9..57bff78 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ unsigned int flags = 0, irq = desc->irq_data.irq;
+ struct irqaction *action = desc->action;
+
+- do {
++ /* action might have become NULL since we dropped the lock */
++ while (action) {
+ irqreturn_t res;
+
+ trace_irq_handler_entry(irq, action);
+@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+
+ retval |= res;
+ action = action->next;
+- } while (action);
++ }
+
+ add_interrupt_randomness(irq, flags);
+
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 7a4e473..25ced16 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+- } else
++ } else {
+ devres_free(ptr);
++ return ERR_PTR(-ENXIO);
++ }
+
+ return addr;
+ }
+diff --git a/kernel/resource.c b/kernel/resource.c
+index f150dbb..249b1eb 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
+ if (!conflict)
+ break;
+ if (conflict != parent) {
+- parent = conflict;
+- if (!(conflict->flags & IORESOURCE_BUSY))
++ if (!(conflict->flags & IORESOURCE_BUSY)) {
++ parent = conflict;
+ continue;
++ }
+ }
+ if (conflict->flags & flags & IORESOURCE_MUXED) {
+ add_wait_queue(&muxed_resource_wait, &wait);
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 580ac2d..15a1795 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
+ put_seccomp_filter(thread);
+ smp_store_release(&thread->seccomp.filter,
+ caller->seccomp.filter);
++
++ /*
++ * Don't let an unprivileged task work around
++ * the no_new_privs restriction by creating
++ * a thread that sets it up, enters seccomp,
++ * then dies.
++ */
++ if (task_no_new_privs(caller))
++ task_set_no_new_privs(thread);
++
+ /*
+ * Opt the other thread into seccomp if needed.
+ * As threads are considered to be trust-realm
+ * equivalent (see ptrace_may_access), it is safe to
+ * allow one thread to transition the other.
+ */
+- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+- /*
+- * Don't let an unprivileged task work around
+- * the no_new_privs restriction by creating
+- * a thread that sets it up, enters seccomp,
+- * then dies.
+- */
+- if (task_no_new_privs(caller))
+- task_set_no_new_privs(thread);
+-
++ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+- }
+ }
+ }
+
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7..9cff0ab 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ struct posix_clock *clk = get_posix_clock(fp);
+- int result = 0;
++ unsigned int result = 0;
+
+ if (!clk)
+- return -ENODEV;
++ return POLLERR;
+
+ if (clk->ops.poll)
+ result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 7c7ec45..22c57e1 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
+ /* Get the next period */
+ next = tick_init_jiffy_update();
+
+- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_set_expires(&ts->sched_timer, next);
+- tick_program_event(next, 1);
++ hrtimer_forward_now(&ts->sched_timer, tick_period);
++ tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+ tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
+ }
+
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d563c19..99188ee 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+
+ delta = timekeeping_get_delta(tkr);
+
+- nsec = delta * tkr->mult + tkr->xtime_nsec;
+- nsec >>= tkr->shift;
++ nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
+
+ /* If arch requires, add in get_arch_timeoffset() */
+ return nsec + arch_gettimeoffset();
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 4f6ef69..debf6e8 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ * The ftrace subsystem is for showing formats only.
+ * They can not be enabled or disabled via the event files.
+ */
+- if (call->class && call->class->reg)
++ if (call->class && call->class->reg &&
++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ return file;
+ }
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index c579dba..450c21f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
+ int node)
+ {
+ assert_rcu_or_wq_mutex_or_pool_mutex(wq);
++
++ /*
++ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
++ * delayed item is pending. The plan is to keep CPU -> NODE
++ * mapping valid and stable across CPU on/offlines. Once that
++ * happens, this workaround can be removed.
++ */
++ if (unlikely(node == NUMA_NO_NODE))
++ return wq->dfl_pwq;
++
+ return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
+ }
+
+@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+ dwork->wq = wq;
+- /* timer isn't guaranteed to run in this cpu, record earlier */
+- if (cpu == WORK_CPU_UNBOUND)
+- cpu = raw_smp_processor_id();
+ dwork->cpu = cpu;
+ timer->expires = jiffies + delay;
+
+- add_timer_on(timer, cpu);
++ if (unlikely(cpu != WORK_CPU_UNBOUND))
++ add_timer_on(timer, cpu);
++ else
++ add_timer(timer);
+ }
+
+ /**
+diff --git a/lib/Kconfig b/lib/Kconfig
+index f0df318..1a48744 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
+ # compression support is select'ed if needed
+ #
+ config 842_COMPRESS
++ select CRC32
+ tristate
+
+ config 842_DECOMPRESS
++ select CRC32
+ tristate
+
+ config ZLIB_INFLATE
+diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
+index 6f500ef..f0b323a 100644
+--- a/lib/ucs2_string.c
++++ b/lib/ucs2_string.c
+@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+ }
+ }
+ EXPORT_SYMBOL(ucs2_strncmp);
++
++unsigned long
++ucs2_utf8size(const ucs2_char_t *src)
++{
++ unsigned long i;
++ unsigned long j = 0;
++
++ for (i = 0; i < ucs2_strlen(src); i++) {
++ u16 c = src[i];
++
++ if (c >= 0x800)
++ j += 3;
++ else if (c >= 0x80)
++ j += 2;
++ else
++ j += 1;
++ }
++
++ return j;
++}
++EXPORT_SYMBOL(ucs2_utf8size);
++
++/*
++ * copy at most maxlength bytes of whole utf8 characters to dest from the
++ * ucs2 string src.
++ *
++ * The return value is the number of characters copied, not including the
++ * final NUL character.
++ */
++unsigned long
++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
++{
++ unsigned int i;
++ unsigned long j = 0;
++ unsigned long limit = ucs2_strnlen(src, maxlength);
++
++ for (i = 0; maxlength && i < limit; i++) {
++ u16 c = src[i];
++
++ if (c >= 0x800) {
++ if (maxlength < 3)
++ break;
++ maxlength -= 3;
++ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
++ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
++ dest[j++] = 0x80 | (c & 0x003f);
++ } else if (c >= 0x80) {
++ if (maxlength < 2)
++ break;
++ maxlength -= 2;
++ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
++ dest[j++] = 0x80 | (c & 0x03f);
++ } else {
++ maxlength -= 1;
++ dest[j++] = c & 0x7f;
++ }
++ }
++ if (maxlength)
++ dest[j] = '\0';
++ return j;
++}
++EXPORT_SYMBOL(ucs2_as_utf8);
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index d3116be..300117f 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ bool dequeued_page;
+
+ dequeued_page = false;
++ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
+ /*
+ * Block others from accessing the 'page' while we get around
+@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ continue;
+ }
+ #endif
+- spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_delete(page);
+ __count_vm_event(BALLOON_DEFLATE);
+- spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ unlock_page(page);
+ dequeued_page = true;
+ break;
+ }
+ }
++ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+ if (!dequeued_page) {
+ /*
+diff --git a/mm/memory.c b/mm/memory.c
+index c387430..b80bf47 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(pmd_none(*pmd)) &&
+ unlikely(__pte_alloc(mm, vma, pmd, address)))
+ return VM_FAULT_OOM;
+- /* if an huge pmd materialized from under us just retry later */
+- if (unlikely(pmd_trans_huge(*pmd)))
++ /*
++ * If a huge pmd materialized under us just retry later. Use
++ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
++ * didn't become pmd_trans_huge under us and then back to pmd_none, as
++ * a result of MADV_DONTNEED running immediately after a huge pmd fault
++ * in a different thread of this mm, in turn leading to a misleading
++ * pmd_trans_huge() retval. All we have to ensure is that it is a
++ * regular pmd that we can walk with pte_offset_map() and we can do that
++ * through an atomic read in C, which is what pmd_trans_unstable()
++ * provides.
++ */
++ if (unlikely(pmd_trans_unstable(pmd)))
+ return 0;
+ /*
+ * A regular pmd is established and it can't morph into a huge pmd
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 7890d0b..6d17e0a 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
+ (GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_NOWARN) &
+- ~(__GFP_IO | __GFP_FS), 0);
++ ~__GFP_RECLAIM, 0);
+
+ return newpage;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 2afcdbb..ea5a70c 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
+ list_del_init(&info->swaplist);
+ mutex_unlock(&shmem_swaplist_mutex);
+ }
+- } else
+- kfree(info->symlink);
++ }
+
+ simple_xattrs_free(&info->xattrs);
+ WARN_ON(inode->i_blocks);
+@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
+ info = SHMEM_I(inode);
+ inode->i_size = len-1;
+ if (len <= SHORT_SYMLINK_LEN) {
+- info->symlink = kmemdup(symname, len, GFP_KERNEL);
+- if (!info->symlink) {
++ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
++ if (!inode->i_link) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ inode->i_op = &shmem_short_symlink_operations;
+- inode->i_link = info->symlink;
+ } else {
+ error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
+ if (error) {
+@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
+ static void shmem_destroy_callback(struct rcu_head *head)
+ {
+ struct inode *inode = container_of(head, struct inode, i_rcu);
++ kfree(inode->i_link);
+ kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
+ }
+
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 9e9cca3..795ddd8 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+
+ /* check that it's our buffer */
+ if (lowpan_is_ipv6(*skb_network_header(skb))) {
++ /* Pull off the 1-byte of 6lowpan header. */
++ skb_pull(skb, 1);
++
+ /* Copy the packet so that the IPv6 header is
+ * properly aligned.
+ */
+@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+
+ local_skb->protocol = htons(ETH_P_IPV6);
+ local_skb->pkt_type = PACKET_HOST;
++ local_skb->dev = dev;
+
+ skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
+
+@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ if (!local_skb)
+ goto drop;
+
++ local_skb->dev = dev;
++
+ ret = iphc_decompress(local_skb, dev, chan);
+ if (ret < 0) {
+ kfree_skb(local_skb);
+@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+
+ local_skb->protocol = htons(ETH_P_IPV6);
+ local_skb->pkt_type = PACKET_HOST;
+- local_skb->dev = dev;
+
+ if (give_skb_to_upper(local_skb, dev)
+ != NET_RX_SUCCESS) {
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 85b82f7..24e9410 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
+ if (hci_update_random_address(req, false, &own_addr_type))
+ return;
+
++ /* Set window to be the same value as the interval to enable
++ * continuous scanning.
++ */
+ cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+- cp.scan_window = cpu_to_le16(hdev->le_scan_window);
++ cp.scan_window = cp.scan_interval;
++
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.own_address_type = own_addr_type;
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 981f8a2..02778c5 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
+ * command to remove it from the controller.
+ */
+ list_for_each_entry(b, &hdev->le_white_list, list) {
+- struct hci_cp_le_del_from_white_list cp;
++ /* If the device is neither in pend_le_conns nor
++ * pend_le_reports then remove it from the whitelist.
++ */
++ if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
++ &b->bdaddr, b->bdaddr_type) &&
++ !hci_pend_le_action_lookup(&hdev->pend_le_reports,
++ &b->bdaddr, b->bdaddr_type)) {
++ struct hci_cp_le_del_from_white_list cp;
++
++ cp.bdaddr_type = b->bdaddr_type;
++ bacpy(&cp.bdaddr, &b->bdaddr);
+
+- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
+- &b->bdaddr, b->bdaddr_type) ||
+- hci_pend_le_action_lookup(&hdev->pend_le_reports,
+- &b->bdaddr, b->bdaddr_type)) {
+- white_list_entries++;
++ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
++ sizeof(cp), &cp);
+ continue;
+ }
+
+- cp.bdaddr_type = b->bdaddr_type;
+- bacpy(&cp.bdaddr, &b->bdaddr);
++ if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
++ /* White list can not be used with RPAs */
++ return 0x00;
++ }
+
+- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+- sizeof(cp), &cp);
++ white_list_entries++;
+ }
+
+ /* Since all no longer valid white list entries have been
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index ffed8a1..4b175df 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ hcon->dst_type = smp->remote_irk->addr_type;
+ queue_work(hdev->workqueue, &conn->id_addr_update_work);
+ }
+-
+- /* When receiving an indentity resolving key for
+- * a remote device that does not use a resolvable
+- * private address, just remove the key so that
+- * it is possible to use the controller white
+- * list for scanning.
+- *
+- * Userspace will have been told to not store
+- * this key at this point. So it is safe to
+- * just remove it.
+- */
+- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
+- list_del_rcu(&smp->remote_irk->list);
+- kfree_rcu(smp->remote_irk, rcu);
+- smp->remote_irk = NULL;
+- }
+ }
+
+ if (smp->csrk) {
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index a1abe49..3addc05 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
+ .notifier_call = br_device_event
+ };
+
++/* called with RTNL */
+ static int br_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+ {
+@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err = NOTIFY_DONE;
+
+- rtnl_lock();
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ goto out;
+@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
+ }
+
+ out:
+- rtnl_unlock();
+ return err;
+ }
+
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 9981039..63ae5dd 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
+ }
+ con->in_seq = 0;
+ con->in_seq_acked = 0;
++
++ con->out_skip = 0;
+ }
+
+ /*
+@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
+
+ static void con_out_kvec_reset(struct ceph_connection *con)
+ {
++ BUG_ON(con->out_skip);
++
+ con->out_kvec_left = 0;
+ con->out_kvec_bytes = 0;
+ con->out_kvec_cur = &con->out_kvec[0];
+@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
+ static void con_out_kvec_add(struct ceph_connection *con,
+ size_t size, void *data)
+ {
+- int index;
++ int index = con->out_kvec_left;
+
+- index = con->out_kvec_left;
++ BUG_ON(con->out_skip);
+ BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
+
+ con->out_kvec[index].iov_len = size;
+@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
+ con->out_kvec_bytes += size;
+ }
+
++/*
++ * Chop off a kvec from the end. Return residual number of bytes for
++ * that kvec, i.e. how many bytes would have been written if the kvec
++ * hadn't been nuked.
++ */
++static int con_out_kvec_skip(struct ceph_connection *con)
++{
++ int off = con->out_kvec_cur - con->out_kvec;
++ int skip = 0;
++
++ if (con->out_kvec_bytes > 0) {
++ skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
++ BUG_ON(con->out_kvec_bytes < skip);
++ BUG_ON(!con->out_kvec_left);
++ con->out_kvec_bytes -= skip;
++ con->out_kvec_left--;
++ }
++
++ return skip;
++}
++
+ #ifdef CONFIG_BLOCK
+
+ /*
+@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
+ return new_piece;
+ }
+
++static size_t sizeof_footer(struct ceph_connection *con)
++{
++ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
++ sizeof(struct ceph_msg_footer) :
++ sizeof(struct ceph_msg_footer_old);
++}
++
+ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
+ {
+ BUG_ON(!msg);
+@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
+ m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+
+ dout("prepare_write_message_footer %p\n", con);
+- con->out_kvec_is_msg = true;
+ con->out_kvec[v].iov_base = &m->footer;
+ if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ if (con->ops->sign_message)
+@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
+ u32 crc;
+
+ con_out_kvec_reset(con);
+- con->out_kvec_is_msg = true;
+ con->out_msg_done = false;
+
+ /* Sneak an ack in there first? If we can get it into the same
+@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
+
+ /* tag + hdr + front + middle */
+ con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+- con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
++ con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
+ con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+
+ if (m->middle)
+ con_out_kvec_add(con, m->middle->vec.iov_len,
+ m->middle->vec.iov_base);
+
+- /* fill in crc (except data pages), footer */
++ /* fill in hdr crc and finalize hdr */
+ crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
+ con->out_msg->hdr.crc = cpu_to_le32(crc);
+- con->out_msg->footer.flags = 0;
++ memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
+
++ /* fill in front and middle crc, footer */
+ crc = crc32c(0, m->front.iov_base, m->front.iov_len);
+ con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ if (m->middle) {
+@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
+ dout("%s front_crc %u middle_crc %u\n", __func__,
+ le32_to_cpu(con->out_msg->footer.front_crc),
+ le32_to_cpu(con->out_msg->footer.middle_crc));
++ con->out_msg->footer.flags = 0;
+
+ /* is there a data payload? */
+ con->out_msg->footer.data_crc = 0;
+@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
+ }
+ }
+ con->out_kvec_left = 0;
+- con->out_kvec_is_msg = false;
+ ret = 1;
+ out:
+ dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
+ {
+ int ret;
+
++ dout("%s %p %d left\n", __func__, con, con->out_skip);
+ while (con->out_skip > 0) {
+ size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+
+@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
+ ceph_pr_addr(&con->peer_addr.in_addr),
+ seq, con->in_seq + 1);
+ con->in_base_pos = -front_len - middle_len - data_len -
+- sizeof(m->footer);
++ sizeof_footer(con);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+- return 0;
++ return 1;
+ } else if ((s64)seq - (s64)con->in_seq > 1) {
+ pr_err("read_partial_message bad seq %lld expected %lld\n",
+ seq, con->in_seq + 1);
+@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
+ /* skip this message */
+ dout("alloc_msg said skip message\n");
+ con->in_base_pos = -front_len - middle_len - data_len -
+- sizeof(m->footer);
++ sizeof_footer(con);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ con->in_seq++;
+- return 0;
++ return 1;
+ }
+
+ BUG_ON(!con->in_msg);
+@@ -2506,13 +2538,13 @@ more:
+
+ more_kvec:
+ /* kvec data queued? */
+- if (con->out_skip) {
+- ret = write_partial_skip(con);
++ if (con->out_kvec_left) {
++ ret = write_partial_kvec(con);
+ if (ret <= 0)
+ goto out;
+ }
+- if (con->out_kvec_left) {
+- ret = write_partial_kvec(con);
++ if (con->out_skip) {
++ ret = write_partial_skip(con);
+ if (ret <= 0)
+ goto out;
+ }
+@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
+ ceph_msg_put(msg);
+ }
+ if (con->out_msg == msg) {
+- dout("%s %p msg %p - was sending\n", __func__, con, msg);
+- con->out_msg = NULL;
+- if (con->out_kvec_is_msg) {
+- con->out_skip = con->out_kvec_bytes;
+- con->out_kvec_is_msg = false;
++ BUG_ON(con->out_skip);
++ /* footer */
++ if (con->out_msg_done) {
++ con->out_skip += con_out_kvec_skip(con);
++ } else {
++ BUG_ON(!msg->data_length);
++ if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
++ con->out_skip += sizeof(msg->footer);
++ else
++ con->out_skip += sizeof(msg->old_footer);
+ }
++ /* data, middle, front */
++ if (msg->data_length)
++ con->out_skip += msg->cursor.total_resid;
++ if (msg->middle)
++ con->out_skip += con_out_kvec_skip(con);
++ con->out_skip += con_out_kvec_skip(con);
++
++ dout("%s %p msg %p - was sending, will write %d skip %d\n",
++ __func__, con, msg, con->out_kvec_bytes, con->out_skip);
+ msg->hdr.seq = 0;
+-
++ con->out_msg = NULL;
+ ceph_msg_put(msg);
+ }
++
+ mutex_unlock(&con->mutex);
+ }
+
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f8f2359..a28e47f 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
+ mutex_lock(&osdc->request_mutex);
+ req = __lookup_request(osdc, tid);
+ if (!req) {
+- pr_warn("%s osd%d tid %llu unknown, skipping\n",
+- __func__, osd->o_osd, tid);
++ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
++ osd->o_osd, tid);
+ m = NULL;
+ *skip = 1;
+ goto out;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7f00f24..9efbdb3 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
++ diffs |= skb_metadata_dst_cmp(p, skb);
+ if (maclen == ETH_HLEN)
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_mac_header(skb));
+@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
+ break;
+
+ case GRO_MERGED_FREE:
+- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
++ skb_dst_drop(skb);
+ kmem_cache_free(skbuff_head_cache, skb);
+- else
++ } else {
+ __kfree_skb(skb);
++ }
+ break;
+
+ case GRO_HELD:
+@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
+ setup(dev);
+
+- if (!dev->tx_queue_len)
++ if (!dev->tx_queue_len) {
+ dev->priv_flags |= IFF_NO_QUEUE;
++ dev->tx_queue_len = 1;
++ }
+
+ dev->num_tx_queues = txqs;
+ dev->real_num_tx_queues = txqs;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index d79699c..12e7003 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -208,7 +208,6 @@ ip:
+ case htons(ETH_P_IPV6): {
+ const struct ipv6hdr *iph;
+ struct ipv6hdr _iph;
+- __be32 flow_label;
+
+ ipv6:
+ iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
+@@ -230,8 +229,12 @@ ipv6:
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+- flow_label = ip6_flowlabel(iph);
+- if (flow_label) {
++ if ((dissector_uses_key(flow_dissector,
++ FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
++ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
++ ip6_flowlabel(iph)) {
++ __be32 flow_label = ip6_flowlabel(iph);
++
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+ key_tags = skb_flow_dissector_target(flow_dissector,
+@@ -396,6 +399,13 @@ ip_proto_again:
+ goto out_bad;
+ proto = eth->h_proto;
+ nhoff += sizeof(*eth);
++
++ /* Cap headers that we access via pointers at the
++ * end of the Ethernet header as our maximum alignment
++ * at that point is only 2 bytes.
++ */
++ if (NET_IP_ALIGN)
++ hlen = nhoff;
+ }
+
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 8a1741b..dce0acb 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ *fplp = fpl;
+ fpl->count = 0;
+ fpl->max = SCM_MAX_FD;
++ fpl->user = NULL;
+ }
+ fpp = &fpl->fp[fpl->count];
+
+@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ *fpp++ = file;
+ fpl->count++;
+ }
++
++ if (!fpl->user)
++ fpl->user = get_uid(current_user());
++
+ return num;
+ }
+
+@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
+ scm->fp = NULL;
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
++ free_uid(fpl->user);
+ kfree(fpl);
+ }
+ }
+@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
+ for (i = 0; i < fpl->count; i++)
+ get_file(fpl->fp[i]);
+ new_fpl->max = new_fpl->count;
++ new_fpl->user = get_uid(fpl->user);
+ }
+ return new_fpl;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index b2df375..5bf88f5 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -79,6 +79,8 @@
+
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
++EXPORT_SYMBOL(sysctl_max_skb_frags);
+
+ /**
+ * skb_panic - private function for out-of-line support
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 95b6139..a6beb7b 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -26,6 +26,7 @@ static int zero = 0;
+ static int one = 1;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
++static int max_skb_frags = MAX_SKB_FRAGS;
+
+ static int net_msg_warn; /* Unused, but still a sysctl */
+
+@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "max_skb_frags",
++ .data = &sysctl_max_skb_frags,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &one,
++ .extra2 = &max_skb_frags,
++ },
+ { }
+ };
+
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 5684e14..902d606 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -824,26 +824,26 @@ lookup:
+
+ if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+- struct sock *nsk = NULL;
++ struct sock *nsk;
+
+ sk = req->rsk_listener;
+- if (likely(sk->sk_state == DCCP_LISTEN)) {
+- nsk = dccp_check_req(sk, skb, req);
+- } else {
++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
++ sock_hold(sk);
++ nsk = dccp_check_req(sk, skb, req);
+ if (!nsk) {
+ reqsk_put(req);
+- goto discard_it;
++ goto discard_and_relse;
+ }
+ if (nsk == sk) {
+- sock_hold(sk);
+ reqsk_put(req);
+ } else if (dccp_child_process(sk, nsk, skb)) {
+ dccp_v4_ctl_send_reset(sk, skb);
+- goto discard_it;
++ goto discard_and_relse;
+ } else {
++ sock_put(sk);
+ return 0;
+ }
+ }
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 9c6d050..b8608b7 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -691,26 +691,26 @@ lookup:
+
+ if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+- struct sock *nsk = NULL;
++ struct sock *nsk;
+
+ sk = req->rsk_listener;
+- if (likely(sk->sk_state == DCCP_LISTEN)) {
+- nsk = dccp_check_req(sk, skb, req);
+- } else {
++ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
++ sock_hold(sk);
++ nsk = dccp_check_req(sk, skb, req);
+ if (!nsk) {
+ reqsk_put(req);
+- goto discard_it;
++ goto discard_and_relse;
+ }
+ if (nsk == sk) {
+- sock_hold(sk);
+ reqsk_put(req);
+ } else if (dccp_child_process(sk, nsk, skb)) {
+ dccp_v6_ctl_send_reset(sk, skb);
+- goto discard_it;
++ goto discard_and_relse;
+ } else {
++ sock_put(sk);
+ return 0;
+ }
+ }
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index cebd9d3..f6303b1 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+ if (err < 0)
+ goto errout;
+
+- err = EINVAL;
++ err = -EINVAL;
+ if (!tb[NETCONFA_IFINDEX])
+ goto errout;
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 46b9c88..6414891 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
+ reqsk_put(req);
+ }
+
+-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+- struct sock *child)
++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
++ struct request_sock *req,
++ struct sock *child)
+ {
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+ spin_lock(&queue->rskq_lock);
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ inet_child_forget(sk, req, child);
++ child = NULL;
+ } else {
+ req->sk = child;
+ req->dl_next = NULL;
+@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+ sk_acceptq_added(sk);
+ }
+ spin_unlock(&queue->rskq_lock);
++ return child;
+ }
+ EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+
+@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ if (own_req) {
+ inet_csk_reqsk_queue_drop(sk, req);
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+- inet_csk_reqsk_queue_add(sk, req, child);
+- /* Warning: caller must not call reqsk_put(req);
+- * child stole last reference on it.
+- */
+- return child;
++ if (inet_csk_reqsk_queue_add(sk, req, child))
++ return child;
+ }
+ /* Too bad, another child took ownership of the request, undo. */
+ bh_unlock_sock(child);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 1fe55ae..b8a0607d 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ struct ipq *qp;
+
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
++ skb_orphan(skb);
+
+ /* Lookup (or create) queue header */
+ qp = ip_find(net, ip_hdr(skb), user, vif);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 5f73a7c..a501242 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ switch (cmsg->cmsg_type) {
+ case IP_RETOPTS:
+ err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
++
++ /* Our caller is responsible for freeing ipc->opt */
+ err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+ err < 40 ? err : 40);
+ if (err)
+diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
+index 6fb869f6..a04dee5 100644
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
+ {
+ int err;
+
+- skb_orphan(skb);
+-
+ local_bh_disable();
+ err = ip_defrag(net, skb, user);
+ local_bh_enable();
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index e89094a..aa67e0e 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ if (msg->msg_controllen) {
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+- if (err)
++ if (unlikely(err)) {
++ kfree(ipc.opt);
+ return err;
++ }
+ if (ipc.opt)
+ free = 1;
+ }
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index bc35f18..7113bae 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ if (msg->msg_controllen) {
+ err = ip_cmsg_send(net, msg, &ipc, false);
+- if (err)
++ if (unlikely(err)) {
++ kfree(ipc.opt);
+ goto out;
++ }
+ if (ipc.opt)
+ free = 1;
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 85f184e..02c6229 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly = 256;
+
++static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+ /*
+ * Interface to generic destination cache.
+ */
+@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ struct fib_nh *nh = &FIB_RES_NH(res);
+
+ update_or_create_fnhe(nh, fl4->daddr, new_gw,
+- 0, 0);
++ 0, jiffies + ip_rt_gc_timeout);
+ }
+ if (kill_route)
+ rt->dst.obsolete = DST_OBSOLETE_KILL;
+@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++ struct fnhe_hash_bucket *hash;
++ struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++ u32 hval = fnhe_hashfun(daddr);
++
++ spin_lock_bh(&fnhe_lock);
++
++ hash = rcu_dereference_protected(nh->nh_exceptions,
++ lockdep_is_held(&fnhe_lock));
++ hash += hval;
++
++ fnhe_p = &hash->chain;
++ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++ while (fnhe) {
++ if (fnhe->fnhe_daddr == daddr) {
++ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++ fnhe_flush_routes(fnhe);
++ kfree_rcu(fnhe, rcu);
++ break;
++ }
++ fnhe_p = &fnhe->fnhe_next;
++ fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++ lockdep_is_held(&fnhe_lock));
++ }
++
++ spin_unlock_bh(&fnhe_lock);
++}
++
+ /* called in rcu_read_lock() section */
+ static int __mkroute_input(struct sk_buff *skb,
+ const struct fib_result *res,
+@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
+
+ fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ if (do_cache) {
+- if (fnhe)
++ if (fnhe) {
+ rth = rcu_dereference(fnhe->fnhe_rth_input);
+- else
+- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++ if (rth && rth->dst.expires &&
++ time_after(jiffies, rth->dst.expires)) {
++ ip_del_fnhe(&FIB_RES_NH(*res), daddr);
++ fnhe = NULL;
++ } else {
++ goto rt_cache;
++ }
++ }
++
++ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+
++rt_cache:
+ if (rt_cache_valid(rth)) {
+ skb_dst_set_noref(skb, &rth->dst);
+ goto out;
+@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ struct fib_nh *nh = &FIB_RES_NH(*res);
+
+ fnhe = find_exception(nh, fl4->daddr);
+- if (fnhe)
++ if (fnhe) {
+ prth = &fnhe->fnhe_rth_output;
+- else {
+- if (unlikely(fl4->flowi4_flags &
+- FLOWI_FLAG_KNOWN_NH &&
+- !(nh->nh_gw &&
+- nh->nh_scope == RT_SCOPE_LINK))) {
+- do_cache = false;
+- goto add;
++ rth = rcu_dereference(*prth);
++ if (rth && rth->dst.expires &&
++ time_after(jiffies, rth->dst.expires)) {
++ ip_del_fnhe(nh, fl4->daddr);
++ fnhe = NULL;
++ } else {
++ goto rt_cache;
+ }
+- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ }
++
++ if (unlikely(fl4->flowi4_flags &
++ FLOWI_FLAG_KNOWN_NH &&
++ !(nh->nh_gw &&
++ nh->nh_scope == RT_SCOPE_LINK))) {
++ do_cache = false;
++ goto add;
++ }
++ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ rth = rcu_dereference(*prth);
++
++rt_cache:
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ return rth;
+@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
+ }
+
+ #ifdef CONFIG_SYSCTL
+-static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
+ static int ip_rt_gc_interval __read_mostly = 60 * HZ;
+ static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
+ static int ip_rt_gc_elasticity __read_mostly = 8;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c82cca1..036a76b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -279,6 +279,7 @@
+
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
++#include <asm/unaligned.h>
+ #include <net/busy_poll.h>
+
+ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
+@@ -938,7 +939,7 @@ new_segment:
+
+ i = skb_shinfo(skb)->nr_frags;
+ can_coalesce = skb_can_coalesce(skb, i, page, offset);
+- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
++ if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
+ }
+@@ -1211,7 +1212,7 @@ new_segment:
+
+ if (!skb_can_coalesce(skb, i, pfrag->page,
+ pfrag->offset)) {
+- if (i == MAX_SKB_FRAGS || !sg) {
++ if (i == sysctl_max_skb_frags || !sg) {
+ tcp_mark_push(tp, skb);
+ goto new_segment;
+ }
+@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 now = tcp_time_stamp;
+ unsigned int start;
++ u64 rate64;
+ u32 rate;
+
+ memset(info, 0, sizeof(*info));
+@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ info->tcpi_total_retrans = tp->total_retrans;
+
+ rate = READ_ONCE(sk->sk_pacing_rate);
+- info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
++ rate64 = rate != ~0U ? rate : ~0ULL;
++ put_unaligned(rate64, &info->tcpi_pacing_rate);
+
+ rate = READ_ONCE(sk->sk_max_pacing_rate);
+- info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
++ rate64 = rate != ~0U ? rate : ~0ULL;
++ put_unaligned(rate64, &info->tcpi_max_pacing_rate);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tp->syncp);
+- info->tcpi_bytes_acked = tp->bytes_acked;
+- info->tcpi_bytes_received = tp->bytes_received;
++ put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
++ put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
+ } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+ info->tcpi_segs_out = tp->segs_out;
+ info->tcpi_segs_in = tp->segs_in;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index d8841a2..8c7e631 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
+
+
+ /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
+-void tcp_req_err(struct sock *sk, u32 seq)
++void tcp_req_err(struct sock *sk, u32 seq, bool abort)
+ {
+ struct request_sock *req = inet_reqsk(sk);
+ struct net *net = sock_net(sk);
+@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
+
+ if (seq != tcp_rsk(req)->snt_isn) {
+ NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+- } else {
++ } else if (abort) {
+ /*
+ * Still in SYN_RECV, just remove it silently.
+ * There is no good way to pass the error to the newly
+@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ }
+ seq = ntohl(th->seq);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+- return tcp_req_err(sk, seq);
++ return tcp_req_err(sk, seq,
++ type == ICMP_PARAMETERPROB ||
++ type == ICMP_TIME_EXCEEDED ||
++ (type == ICMP_DEST_UNREACH &&
++ (code == ICMP_NET_UNREACH ||
++ code == ICMP_HOST_UNREACH)));
+
+ bh_lock_sock(sk);
+ /* If too many ICMPs get dropped on busy
+@@ -705,7 +710,8 @@ release_sk1:
+ outside socket context is ugly, certainly. What can I do?
+ */
+
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct net *net,
++ struct sk_buff *skb, u32 seq, u32 ack,
+ u32 win, u32 tsval, u32 tsecr, int oif,
+ struct tcp_md5sig_key *key,
+ int reply_flags, u8 tos)
+@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ ];
+ } rep;
+ struct ip_reply_arg arg;
+- struct net *net = dev_net(skb_dst(skb)->dev);
+
+ memset(&rep.th, 0, sizeof(struct tcphdr));
+ memset(&arg, 0, sizeof(arg));
+@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+
+- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++ tcp_v4_send_ack(sock_net(sk), skb,
++ tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ tcp_time_stamp + tcptw->tw_ts_offset,
+ tcptw->tw_ts_recent,
+@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ */
+- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+- tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
++ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
++ tcp_sk(sk)->snd_nxt;
++
++ tcp_v4_send_ack(sock_net(sk), skb, seq,
+ tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+ tcp_time_stamp,
+ req->ts_recent,
+@@ -1586,28 +1594,30 @@ process:
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+- struct sock *nsk = NULL;
++ struct sock *nsk;
+
+ sk = req->rsk_listener;
+- if (tcp_v4_inbound_md5_hash(sk, skb))
+- goto discard_and_relse;
+- if (likely(sk->sk_state == TCP_LISTEN)) {
+- nsk = tcp_check_req(sk, skb, req, false);
+- } else {
++ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
++ reqsk_put(req);
++ goto discard_it;
++ }
++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
++ sock_hold(sk);
++ nsk = tcp_check_req(sk, skb, req, false);
+ if (!nsk) {
+ reqsk_put(req);
+- goto discard_it;
++ goto discard_and_relse;
+ }
+ if (nsk == sk) {
+- sock_hold(sk);
+ reqsk_put(req);
+ } else if (tcp_child_process(sk, nsk, skb)) {
+ tcp_v4_send_reset(nsk, skb);
+- goto discard_it;
++ goto discard_and_relse;
+ } else {
++ sock_put(sk);
+ return 0;
+ }
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c438908..7f8ab46 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ if (msg->msg_controllen) {
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+ sk->sk_family == AF_INET6);
+- if (err)
++ if (unlikely(err)) {
++ kfree(ipc.opt);
+ return err;
++ }
+ if (ipc.opt)
+ free = 1;
+ connected = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 1f21087..e8d3da0 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ if (err < 0)
+ goto errout;
+
+- err = EINVAL;
++ err = -EINVAL;
+ if (!tb[NETCONFA_IFINDEX])
+ goto errout;
+
+@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ {
+ struct inet6_dev *idev = ifp->idev;
+ struct net_device *dev = idev->dev;
++ bool notify = false;
+
+ addrconf_join_solict(dev, &ifp->addr);
+
+@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ /* Because optimistic nodes can use this address,
+ * notify listeners. If DAD fails, RTM_DELADDR is sent.
+ */
+- ipv6_ifa_notify(RTM_NEWADDR, ifp);
++ notify = true;
+ }
+ }
+
+@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ out:
+ spin_unlock(&ifp->lock);
+ read_unlock_bh(&idev->lock);
++ if (notify)
++ ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+
+ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 517c55b..4281621 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -162,6 +162,9 @@ ipv4_connected:
+ fl6.fl6_dport = inet->inet_dport;
+ fl6.fl6_sport = inet->inet_sport;
+
++ if (!fl6.flowi6_oif)
++ fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++
+ if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ fl6.flowi6_oif = np->mcast_oif;
+
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index 1f9ebe3..dc2db4f 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ }
+ spin_lock_bh(&ip6_sk_fl_lock);
+ for (sflp = &np->ipv6_fl_list;
+- (sfl = rcu_dereference(*sflp)) != NULL;
++ (sfl = rcu_dereference_protected(*sflp,
++ lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
+ sflp = &sfl->next) {
+ if (sfl->fl->label == freq.flr_label) {
+ if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+- *sflp = rcu_dereference(sfl->next);
++ *sflp = sfl->next;
+ spin_unlock_bh(&ip6_sk_fl_lock);
+ fl_release(sfl->fl);
+ kfree_rcu(sfl, rcu);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6473889..31144c4 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ struct rt6_info *rt;
+ #endif
+ int err;
++ int flags = 0;
+
+ /* The correct way to handle this would be to do
+ * ip6_route_get_saddr, and then ip6_route_output; however,
+@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ dst_release(*dst);
+ *dst = NULL;
+ }
++
++ if (fl6->flowi6_oif)
++ flags |= RT6_LOOKUP_F_IFACE;
+ }
+
+ if (!*dst)
+- *dst = ip6_route_output(net, sk, fl6);
++ *dst = ip6_route_output_flags(net, sk, fl6, flags);
+
+ err = (*dst)->error;
+ if (err)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 826e6aa..3f164d3 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
+ return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
+ }
+
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+- struct flowi6 *fl6)
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++ struct flowi6 *fl6, int flags)
+ {
+ struct dst_entry *dst;
+- int flags = 0;
+ bool any_src;
+
+ dst = l3mdev_rt6_dst_by_oif(net, fl6);
+@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+
+ return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
+ }
+-EXPORT_SYMBOL(ip6_route_output);
++EXPORT_SYMBOL_GPL(ip6_route_output_flags);
+
+ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+ {
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index bd100b4..b8d4056 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ struct tcp_sock *tp;
+ __u32 seq, snd_una;
+ struct sock *sk;
++ bool fatal;
+ int err;
+
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ return;
+ }
+ seq = ntohl(th->seq);
++ fatal = icmpv6_err_convert(type, code, &err);
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+- return tcp_req_err(sk, seq);
++ return tcp_req_err(sk, seq, fatal);
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
+@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ goto out;
+ }
+
+- icmpv6_err_convert(type, code, &err);
+
+ /* Might be for an request_sock */
+ switch (sk->sk_state) {
+@@ -1387,7 +1388,7 @@ process:
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+- struct sock *nsk = NULL;
++ struct sock *nsk;
+
+ sk = req->rsk_listener;
+ tcp_v6_fill_cb(skb, hdr, th);
+@@ -1395,24 +1396,24 @@ process:
+ reqsk_put(req);
+ goto discard_it;
+ }
+- if (likely(sk->sk_state == TCP_LISTEN)) {
+- nsk = tcp_check_req(sk, skb, req, false);
+- } else {
++ if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ goto lookup;
+ }
++ sock_hold(sk);
++ nsk = tcp_check_req(sk, skb, req, false);
+ if (!nsk) {
+ reqsk_put(req);
+- goto discard_it;
++ goto discard_and_relse;
+ }
+ if (nsk == sk) {
+- sock_hold(sk);
+ reqsk_put(req);
+ tcp_v6_restore_cb(skb);
+ } else if (tcp_child_process(sk, nsk, skb)) {
+ tcp_v6_send_reset(nsk, skb);
+- goto discard_it;
++ goto discard_and_relse;
+ } else {
++ sock_put(sk);
+ return 0;
+ }
+ }
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 435608c..20ab7b2 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ if (!addr || addr->sa_family != AF_IUCV)
+ return -EINVAL;
+
++ if (addr_len < sizeof(struct sockaddr_iucv))
++ return -EINVAL;
++
+ lock_sock(sk);
+ if (sk->sk_state != IUCV_OPEN) {
+ err = -EBADFD;
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index f93c5be..2caaa84 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
+ ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, tunnel, cmd);
+
+- if (ret >= 0)
+- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ if (ret >= 0) {
++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ /* We don't care if no one is listening */
++ if (ret == -ESRCH)
++ ret = 0;
++ return ret;
++ }
+
+ nlmsg_free(msg);
+
+@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
+ ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ NLM_F_ACK, session, cmd);
+
+- if (ret >= 0)
+- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ if (ret >= 0) {
++ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ /* We don't care if no one is listening */
++ if (ret == -ESRCH)
++ ret = 0;
++ return ret;
++ }
+
+ nlmsg_free(msg);
+
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 337bb5d..6a12b0f 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ continue;
+ sdata->u.ibss.last_scan_completed = jiffies;
+- ieee80211_queue_work(&local->hw, &sdata->work);
+ }
+ mutex_unlock(&local->iflist_mtx);
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index fa28500..6f85b6a 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1370,17 +1370,6 @@ out:
+ sdata_unlock(sdata);
+ }
+
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
+-{
+- struct ieee80211_sub_if_data *sdata;
+-
+- rcu_read_lock();
+- list_for_each_entry_rcu(sdata, &local->interfaces, list)
+- if (ieee80211_vif_is_mesh(&sdata->vif) &&
+- ieee80211_sdata_running(sdata))
+- ieee80211_queue_work(&local->hw, &sdata->work);
+- rcu_read_unlock();
+-}
+
+ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index a159634..4a8019f 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
+ }
+
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
+-
+ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+ void ieee80211s_stop(void);
+ #else
+-static inline void
+-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
+ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ { return false; }
+ static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 3aa0434..83097c3 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+ if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.monitor_work);
+- /* and do all the other regular work too */
+- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ }
+ }
+
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index a413e52..acbe182 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ bool was_scanning = local->scanning;
+ struct cfg80211_scan_request *scan_req;
+ struct ieee80211_sub_if_data *scan_sdata;
++ struct ieee80211_sub_if_data *sdata;
+
+ lockdep_assert_held(&local->mtx);
+
+@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+
+ ieee80211_mlme_notify_scan_completed(local);
+ ieee80211_ibss_notify_scan_completed(local);
+- ieee80211_mesh_notify_scan_completed(local);
++
++ /* Requeue all the work that might have been ignored while
++ * the scan was in progress; if there was none this will
++ * just be a no-op for the particular interface.
++ */
++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ if (ieee80211_sdata_running(sdata))
++ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
++ }
++
+ if (was_scanning)
+ ieee80211_start_next_roc(local);
+ }
+diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
+index 1605691..d933cb8 100644
+--- a/net/openvswitch/vport-vxlan.c
++++ b/net/openvswitch/vport-vxlan.c
+@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+ int err;
+ struct vxlan_config conf = {
+ .no_share = true,
+- .flags = VXLAN_F_COLLECT_METADATA,
++ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
+ };
+
+ if (!options) {
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index f53bf3b6..cf5b69a 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ return res;
+ }
+
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+- bool r;
+-
+- mutex_lock(&data->mtx);
+- r = !list_empty(&data->events);
+- mutex_unlock(&data->mtx);
+-
+- return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+ {
+@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ goto out;
+ }
+ mutex_unlock(&data->mtx);
++ /* since we re-check and it just compares pointers,
++ * using !list_empty() without locking isn't a problem
++ */
+ ret = wait_event_interruptible(data->read_wait,
+- rfkill_readable(data));
++ !list_empty(&data->events));
+ mutex_lock(&data->mtx);
+
+ if (ret)
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index b5c2cf2..af1acf0 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1852,6 +1852,7 @@ reset:
+ }
+
+ tp = old_tp;
++ protocol = tc_skb_protocol(skb);
+ goto reclassify;
+ #endif
+ }
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 3d9ea9a..8b4ff31 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -60,6 +60,8 @@
+ #include <net/inet_common.h>
+ #include <net/inet_ecn.h>
+
++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
++
+ /* Global data structures. */
+ struct sctp_globals sctp_globals __read_mostly;
+
+@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
+ unsigned long limit;
+ int max_share;
+ int order;
++ int num_entries;
++ int max_entry_order;
+
+ sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
+
+@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
+
+ /* Size and allocate the association hash table.
+ * The methodology is similar to that of the tcp hash tables.
++ * Though not identical. Start by getting a goal size
+ */
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (22 - PAGE_SHIFT);
+ else
+ goal = totalram_pages >> (24 - PAGE_SHIFT);
+
+- for (order = 0; (1UL << order) < goal; order++)
+- ;
++ /* Then compute the page order for said goal */
++ order = get_order(goal);
++
++ /* Now compute the required page order for the maximum sized table we
++ * want to create
++ */
++ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
++ sizeof(struct sctp_bind_hashbucket));
++
++ /* Limit the page order by that maximum hash table size */
++ order = min(order, max_entry_order);
+
+ do {
+ sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
+@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
+ INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ }
+
+- /* Allocate and initialize the SCTP port hash table. */
++ /* Allocate and initialize the SCTP port hash table.
++ * Note that order is initalized to start at the max sized
++ * table we want to support. If we can't get that many pages
++ * reduce the order and try again
++ */
+ do {
+- sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
+- sizeof(struct sctp_bind_hashbucket);
+- if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
+- continue;
+ sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
+ } while (!sctp_port_hashtable && --order > 0);
++
+ if (!sctp_port_hashtable) {
+ pr_err("Failed bind hash alloc\n");
+ status = -ENOMEM;
+ goto err_bhash_alloc;
+ }
++
++ /* Now compute the number of entries that will fit in the
++ * port hash space we allocated
++ */
++ num_entries = (1UL << order) * PAGE_SIZE /
++ sizeof(struct sctp_bind_hashbucket);
++
++ /* And finish by rounding it down to the nearest power of two
++ * this wastes some memory of course, but its needed because
++ * the hash function operates based on the assumption that
++ * that the number of entries is a power of two
++ */
++ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
++
+ for (i = 0; i < sctp_port_hashsize; i++) {
+ spin_lock_init(&sctp_port_hashtable[i].lock);
+ INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ef1d90f..be1489f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ struct sctp_hmac_algo_param *hmacs;
+ __u16 data_len = 0;
+ u32 num_idents;
++ int i;
+
+ if (!ep->auth_enable)
+ return -EACCES;
+@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ return -EFAULT;
+ if (put_user(num_idents, &p->shmac_num_idents))
+ return -EFAULT;
+- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
+- return -EFAULT;
++ for (i = 0; i < num_idents; i++) {
++ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
++
++ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
++ return -EFAULT;
++ }
+ return 0;
+ }
+
+@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+
+ if (cmsgs->srinfo->sinfo_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++ SCTP_SACK_IMMEDIATELY |
+ SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+ break;
+@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+
+ if (cmsgs->sinfo->snd_flags &
+ ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++ SCTP_SACK_IMMEDIATELY |
+ SCTP_ABORT | SCTP_EOF))
+ return -EINVAL;
+ break;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 5e4f815..21e2035 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ if (bp[0] == '\\' && bp[1] == 'x') {
+ /* HEX STRING */
+ bp += 2;
+- while (len < bufsize) {
++ while (len < bufsize - 1) {
+ int h, l;
+
+ h = hex_to_bin(bp[0]);
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index f34e535..d5d7132 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -20,6 +20,7 @@
+ #include <linux/list.h>
+ #include <linux/workqueue.h>
+ #include <linux/if_vlan.h>
++#include <linux/rtnetlink.h>
+ #include <net/ip_fib.h>
+ #include <net/switchdev.h>
+
+@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+ }
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
+
+-static DEFINE_MUTEX(switchdev_mutex);
+ static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
+
+ /**
+@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
+ {
+ int err;
+
+- mutex_lock(&switchdev_mutex);
++ rtnl_lock();
+ err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
+- mutex_unlock(&switchdev_mutex);
++ rtnl_unlock();
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(register_switchdev_notifier);
+@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
+ {
+ int err;
+
+- mutex_lock(&switchdev_mutex);
++ rtnl_lock();
+ err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
+- mutex_unlock(&switchdev_mutex);
++ rtnl_unlock();
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
+@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
+ * Call all network notifier blocks. This should be called by driver
+ * when it needs to propagate hardware event.
+ * Return values are same as for atomic_notifier_call_chain().
++ * rtnl_lock must be held.
+ */
+ int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info)
+ {
+ int err;
+
++ ASSERT_RTNL();
++
+ info->dev = dev;
+- mutex_lock(&switchdev_mutex);
+ err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+- mutex_unlock(&switchdev_mutex);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 9dc239d..92e367a 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ NLM_F_MULTI, TIPC_NL_LINK_GET);
+- if (!hdr)
++ if (!hdr) {
++ tipc_bcast_unlock(net);
+ return -EMSGSIZE;
++ }
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ if (!attrs)
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 20cddec..3926b56 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
+ skb_queue_head_init(&n_ptr->bc_entry.inputq1);
+ __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
+ skb_queue_head_init(&n_ptr->bc_entry.inputq2);
+- hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+- if (n_ptr->addr < temp_node->addr)
+- break;
+- }
+- list_add_tail_rcu(&n_ptr->list, &temp_node->list);
+ n_ptr->state = SELF_DOWN_PEER_LEAVING;
+ n_ptr->signature = INVALID_NODE_SIG;
+ n_ptr->active_links[0] = INVALID_BEARER_ID;
+@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
+ tipc_node_get(n_ptr);
+ setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+ n_ptr->keepalive_intv = U32_MAX;
++ hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
++ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
++ if (n_ptr->addr < temp_node->addr)
++ break;
++ }
++ list_add_tail_rcu(&n_ptr->list, &temp_node->list);
+ exit:
+ spin_unlock_bh(&tn->node_list_lock);
+ return n_ptr;
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index 350cca3..69ee2ee 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len)
+ {
+- struct tipc_subscriber *subscriber = usr_data;
++ struct tipc_subscriber *subscrb = usr_data;
+ struct tipc_subscription *sub = NULL;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+- tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
+- if (sub)
+- tipc_nametbl_subscribe(sub);
+- else
+- tipc_conn_terminate(tn->topsrv, subscriber->conid);
++ if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
++ return tipc_conn_terminate(tn->topsrv, subscrb->conid);
++
++ tipc_nametbl_subscribe(sub);
+ }
+
+ /* Handle one request to establish a new subscriber */
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e3f85bc..898a53a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ UNIXCB(skb).fp = NULL;
+
+ for (i = scm->fp->count-1; i >= 0; i--)
+- unix_notinflight(scm->fp->fp[i]);
++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+ }
+
+ static void unix_destruct_scm(struct sk_buff *skb)
+@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ return -ENOMEM;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+- unix_inflight(scm->fp->fp[i]);
++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
+ return max_level;
+ }
+
+@@ -1781,7 +1781,12 @@ restart_locked:
+ goto out_unlock;
+ }
+
+- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++ /* other == sk && unix_peer(other) != sk if
++ * - unix_peer(sk) == NULL, destination address bound to sk
++ * - unix_peer(sk) == sk by time of get but disconnected before lock
++ */
++ if (other != sk &&
++ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ if (timeo) {
+ timeo = unix_wait_for_peer(other, timeo);
+
+@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
+ size_t size = state->size;
+ unsigned int last_len;
+
+- err = -EINVAL;
+- if (sk->sk_state != TCP_ESTABLISHED)
++ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++ err = -EINVAL;
+ goto out;
++ }
+
+- err = -EOPNOTSUPP;
+- if (flags & MSG_OOB)
++ if (unlikely(flags & MSG_OOB)) {
++ err = -EOPNOTSUPP;
+ goto out;
++ }
+
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+@@ -2322,9 +2329,11 @@ again:
+ goto unlock;
+
+ unix_state_unlock(sk);
+- err = -EAGAIN;
+- if (!timeo)
++ if (!timeo) {
++ err = -EAGAIN;
+ break;
++ }
++
+ mutex_unlock(&u->readlock);
+
+ timeo = unix_stream_data_wait(sk, timeo, last,
+@@ -2332,6 +2341,7 @@ again:
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
++ scm_destroy(&scm);
+ goto out;
+ }
+
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index c512f64..4d96797 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -220,7 +220,7 @@ done:
+ return skb->len;
+ }
+
+-static struct sock *unix_lookup_by_ino(int ino)
++static struct sock *unix_lookup_by_ino(unsigned int ino)
+ {
+ int i;
+ struct sock *sk;
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 8fcdc22..6a0d485 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
+ * descriptor if it is for an AF_UNIX socket.
+ */
+
+-void unix_inflight(struct file *fp)
++void unix_inflight(struct user_struct *user, struct file *fp)
+ {
+ struct sock *s = unix_get_socket(fp);
+
+@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
+ }
+ unix_tot_inflight++;
+ }
+- fp->f_cred->user->unix_inflight++;
++ user->unix_inflight++;
+ spin_unlock(&unix_gc_lock);
+ }
+
+-void unix_notinflight(struct file *fp)
++void unix_notinflight(struct user_struct *user, struct file *fp)
+ {
+ struct sock *s = unix_get_socket(fp);
+
+@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
+ list_del_init(&u->link);
+ unix_tot_inflight--;
+ }
+- fp->f_cred->user->unix_inflight--;
++ user->unix_inflight--;
+ spin_unlock(&unix_gc_lock);
+ }
+
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index dacf71a..ba6c34e 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -62,7 +62,7 @@ vmlinux_link()
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ -Wl,--end-group \
+- -lutil -lrt ${1}
++ -lutil -lrt -lpthread ${1}
+ rm -f linux
+ fi
+ }
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index ff81026..7c57c7f 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
+ */
+ static inline unsigned int smk_ptrace_mode(unsigned int mode)
+ {
+- switch (mode) {
+- case PTRACE_MODE_READ:
+- return MAY_READ;
+- case PTRACE_MODE_ATTACH:
++ if (mode & PTRACE_MODE_ATTACH)
+ return MAY_READWRITE;
+- }
++ if (mode & PTRACE_MODE_READ)
++ return MAY_READ;
+
+ return 0;
+ }
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index d3c19c9..cb6ed10 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ int rc = 0;
+
+ /* require ptrace target be a child of ptracer on attach */
+- if (mode == PTRACE_MODE_ATTACH) {
++ if (mode & PTRACE_MODE_ATTACH) {
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_DISABLED:
+ /* No additional restrictions. */
+@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ }
+ }
+
+- if (rc) {
++ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
+ printk_ratelimited(KERN_NOTICE
+ "ptrace of pid %d was attempted by: %s (pid %d)\n",
+ child->pid, current->comm, current->pid);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 2c13298..2ff692d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -357,7 +357,10 @@ enum {
+ ((pci)->device == 0x0d0c) || \
+ ((pci)->device == 0x160c))
+
+-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+
+ static char *driver_short_names[] = {
+ [AZX_DRIVER_ICH] = "HDA Intel",
+@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
+
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+ snd_hdac_set_codec_wakeup(bus, true);
+- if (IS_BROXTON(pci)) {
++ if (IS_SKL_PLUS(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ }
+ azx_init_chip(chip, full_reset);
+- if (IS_BROXTON(pci)) {
++ if (IS_SKL_PLUS(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
+ snd_hdac_set_codec_wakeup(bus, false);
+
+ /* reduce dma latency to avoid noise */
+- if (IS_BROXTON(pci))
++ if (IS_BXT(pci))
+ bxt_reduce_dma_latency(chip);
+ }
+
+@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
+ /* put codec down to D3 at hibernation for Intel SKL+;
+ * otherwise BIOS may still access the codec and screw up the driver
+ */
+-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+-
+ static int azx_freeze_noirq(struct device *dev)
+ {
+ struct pci_dev *pci = to_pci_dev(dev);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index efd4980..72fa58d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4749,6 +4749,7 @@ enum {
+ ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+ ALC293_FIXUP_LENOVO_SPK_NOISE,
+ ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++ ALC255_FIXUP_DELL_SPK_NOISE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ },
++ [ALC255_FIXUP_DELL_SPK_NOISE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc_fixup_disable_aamix,
++ .chained = true,
++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
+index 96234b6..5d51d6f 100644
+--- a/tools/hv/hv_vss_daemon.c
++++ b/tools/hv/hv_vss_daemon.c
+@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
+ syslog(LOG_ERR, "Illegal op:%d\n", op);
+ }
+ vss_msg->error = error;
+- len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
++ len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
+ if (len != sizeof(struct hv_vss_msg)) {
+ syslog(LOG_ERR, "write failed; error: %d %s", errno,
+ strerror(errno));
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index 2d9d830..4a3a72c 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
+ int i, ret;
+
+ aggr->val = aggr->ena = aggr->run = 0;
+- init_stats(ps->res_stats);
+
+ if (counter->per_pkg)
+ zero_per_pkg(counter);
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index 77edcdc..0572784 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -88,7 +88,11 @@ test_delete()
+ exit 1
+ fi
+
+- rm $file
++ rm $file 2>/dev/null
++ if [ $? -ne 0 ]; then
++ chattr -i $file
++ rm $file
++ fi
+
+ if [ -e $file ]; then
+ echo "$file couldn't be deleted" >&2
+@@ -111,6 +115,7 @@ test_zero_size_delete()
+ exit 1
+ fi
+
++ chattr -i $file
+ printf "$attrs" > $file
+
+ if [ -e $file ]; then
+@@ -141,7 +146,11 @@ test_valid_filenames()
+ echo "$file could not be created" >&2
+ ret=1
+ else
+- rm $file
++ rm $file 2>/dev/null
++ if [ $? -ne 0 ]; then
++ chattr -i $file
++ rm $file
++ fi
+ fi
+ done
+
+@@ -174,7 +183,11 @@ test_invalid_filenames()
+
+ if [ -e $file ]; then
+ echo "Creating $file should have failed" >&2
+- rm $file
++ rm $file 2>/dev/null
++ if [ $? -ne 0 ]; then
++ chattr -i $file
++ rm $file
++ fi
+ ret=1
+ fi
+ done
+diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
+index 8c07644..4af74f7 100644
+--- a/tools/testing/selftests/efivarfs/open-unlink.c
++++ b/tools/testing/selftests/efivarfs/open-unlink.c
+@@ -1,10 +1,68 @@
++#include <errno.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <sys/ioctl.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
++#include <linux/fs.h>
++
++static int set_immutable(const char *path, int immutable)
++{
++ unsigned int flags;
++ int fd;
++ int rc;
++ int error;
++
++ fd = open(path, O_RDONLY);
++ if (fd < 0)
++ return fd;
++
++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++ if (rc < 0) {
++ error = errno;
++ close(fd);
++ errno = error;
++ return rc;
++ }
++
++ if (immutable)
++ flags |= FS_IMMUTABLE_FL;
++ else
++ flags &= ~FS_IMMUTABLE_FL;
++
++ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
++ error = errno;
++ close(fd);
++ errno = error;
++ return rc;
++}
++
++static int get_immutable(const char *path)
++{
++ unsigned int flags;
++ int fd;
++ int rc;
++ int error;
++
++ fd = open(path, O_RDONLY);
++ if (fd < 0)
++ return fd;
++
++ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++ if (rc < 0) {
++ error = errno;
++ close(fd);
++ errno = error;
++ return rc;
++ }
++ close(fd);
++ if (flags & FS_IMMUTABLE_FL)
++ return 1;
++ return 0;
++}
+
+ int main(int argc, char **argv)
+ {
+@@ -27,7 +85,7 @@ int main(int argc, char **argv)
+ buf[4] = 0;
+
+ /* create a test variable */
+- fd = open(path, O_WRONLY | O_CREAT);
++ fd = open(path, O_WRONLY | O_CREAT, 0600);
+ if (fd < 0) {
+ perror("open(O_WRONLY)");
+ return EXIT_FAILURE;
+@@ -41,6 +99,18 @@ int main(int argc, char **argv)
+
+ close(fd);
+
++ rc = get_immutable(path);
++ if (rc < 0) {
++ perror("ioctl(FS_IOC_GETFLAGS)");
++ return EXIT_FAILURE;
++ } else if (rc) {
++ rc = set_immutable(path, 0);
++ if (rc < 0) {
++ perror("ioctl(FS_IOC_SETFLAGS)");
++ return EXIT_FAILURE;
++ }
++ }
++
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ perror("open");
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 7a2f449..5d10f10 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+-
+- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
++ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
++ int sz = nr_longs * sizeof(unsigned long);
+ vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+ vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
+ vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 77d42be..4f70d12 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
+ * do alloc nowait since if we are going to sleep anyway we
+ * may as well sleep faulting in page
+ */
+- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ if (!work)
+ return 0;
+
diff --git a/4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch b/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch
index fcd8074..88b7093 100644
--- a/4.4.3/4420_grsecurity-3.1-4.4.3-201602282149.patch
+++ b/4.4.4/4420_grsecurity-3.1-4.4.4-201603032158.patch
@@ -449,7 +449,7 @@ index af70d15..ccd3786 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index 802be10..383fd5d 100644
+index 344bc6f..4753efd 100644
--- a/Makefile
+++ b/Makefile
@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -998,7 +998,7 @@ index 34e1569..b48ad87 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
-index 259c0ca..48eaaa1 100644
+index ddbb361..caf403d 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -7,6 +7,7 @@ config ARM_PTDUMP
@@ -6375,7 +6375,7 @@ index 8feaed6..1bd8a64 100644
/**
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
-index 2046c02..8183239 100644
+index 21ed715..774a251 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
@@ -6404,7 +6404,7 @@ index b336037..5b874cc 100644
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 8957f15..c5b802e 100644
+index 18826aa..f5a6216 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
@@ -10399,7 +10399,7 @@ index 646988d..b88905f 100644
info.flags = 0;
info.length = len;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 30e7ddb..266a3b0 100644
+index c690c8e..1d5798e 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
@@ -16278,7 +16278,7 @@ index a55697d..66473ae 100644
-END(ignore_sysret)
+ENDPROC(ignore_sysret)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
-index 6a1ae37..f1c3bfd 100644
+index 15cfeba..da22a57 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -13,8 +13,10 @@
@@ -16437,7 +16437,7 @@ index 6a1ae37..f1c3bfd 100644
/*
* Emulated IA32 system calls via int 0x80.
-@@ -285,11 +340,11 @@ ENTRY(entry_INT80_compat)
+@@ -286,11 +341,11 @@ ENTRY(entry_INT80_compat)
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
@@ -16454,7 +16454,7 @@ index 6a1ae37..f1c3bfd 100644
pushq %rbx /* pt_regs->rbx */
pushq %rbp /* pt_regs->rbp */
pushq %r12 /* pt_regs->r12 */
-@@ -298,6 +353,12 @@ ENTRY(entry_INT80_compat)
+@@ -299,6 +354,12 @@ ENTRY(entry_INT80_compat)
pushq %r15 /* pt_regs->r15 */
cld
@@ -16467,7 +16467,7 @@ index 6a1ae37..f1c3bfd 100644
/*
* User mode is traced as though IRQs are on, and the interrupt
* gate turned them off.
-@@ -309,10 +370,12 @@ ENTRY(entry_INT80_compat)
+@@ -310,10 +371,12 @@ ENTRY(entry_INT80_compat)
.Lsyscall_32_done:
/* Go back to user mode. */
@@ -18868,6 +18868,19 @@ index 1e3408e..67c5ba1 100644
extern void elcr_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
+index 055ea99..7dabb68 100644
+--- a/arch/x86/include/asm/hypervisor.h
++++ b/arch/x86/include/asm/hypervisor.h
+@@ -43,7 +43,7 @@ struct hypervisor_x86 {
+
+ /* X2APIC detection (run once per boot) */
+ bool (*x2apic_available)(void);
+-};
++} __do_const;
+
+ extern const struct hypervisor_x86 *x86_hyper;
+
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 39bcefc..272d904 100644
--- a/arch/x86/include/asm/i8259.h
@@ -22829,7 +22842,7 @@ index 971cf88..a8e01ae 100644
.name = "bigsmp",
.probe = probe_bigsmp,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index f253218..b71d723 100644
+index fdb0fbf..1426add 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1682,7 +1682,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
@@ -22904,7 +22917,7 @@ index 7694ae6..5abb08e 100644
static int cmdline_apic __initdata;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
-index 861bc59..a721835 100644
+index a35f6b5..cced8817 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -36,6 +36,7 @@ static struct irq_chip lapic_controller;
@@ -23700,6 +23713,19 @@ index ce47402..4a6bdf8 100644
}
static void microcode_fini_cpu(int cpu)
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 20e242e..14b1629 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -193,7 +193,7 @@ static void __init ms_hyperv_init_platform(void)
+ mark_tsc_unstable("running on Hyper-V");
+ }
+
+-const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
++const struct hypervisor_x86 x86_hyper_ms_hyperv = {
+ .name = "Microsoft HyperV",
+ .detect = ms_hyperv_platform,
+ .init_platform = ms_hyperv_init_platform,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 3b533cf..b40d426 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
@@ -24161,6 +24187,19 @@ index 2f0a4a9..8f4b802 100644
struct pci2phy_map {
struct list_head list;
+diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
+index 628a059..83bced6 100644
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -137,7 +137,7 @@ static bool __init vmware_legacy_x2apic_available(void)
+ (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
+ }
+
+-const __refconst struct hypervisor_x86 x86_hyper_vmware = {
++const struct hypervisor_x86 x86_hyper_vmware = {
+ .name = "VMware",
+ .detect = vmware_platform,
+ .set_cpu_features = vmware_set_cpu_features,
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index afa64ad..dce67dd 100644
--- a/arch/x86/kernel/crash_dump_64.c
@@ -26051,7 +26090,7 @@ index 37dae79..620dd84 100644
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
t->iopl = level << 12;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
-index f8062aa..c37b60f 100644
+index 61521dc..5ce5a37 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -26552,6 +26591,19 @@ index c2bedae..25e7ab60 100644
.attr = {
.name = "data",
.mode = S_IRUGO,
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 47190bd..0165c4d 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -553,7 +553,7 @@ static uint32_t __init kvm_detect(void)
+ return kvm_cpuid_base();
+ }
+
+-const struct hypervisor_x86 x86_hyper_kvm __refconst = {
++const struct hypervisor_x86 x86_hyper_kvm = {
+ .name = "KVM",
+ .detect = kvm_detect,
+ .x2apic_available = kvm_para_available,
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 2bd81e3..2d5e042 100644
--- a/arch/x86/kernel/kvmclock.c
@@ -29408,7 +29460,7 @@ index 6525e92..28559d2 100644
out:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 1505587..0f0516c 100644
+index b9b09fe..138addd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1881,7 +1881,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
@@ -29534,7 +29586,7 @@ index 4d30b86..94115f0 100644
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
-index 3058a22..cb2670f 100644
+index 7be8a25..7d71250 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -335,7 +335,7 @@ retry_walk:
@@ -29767,7 +29819,7 @@ index 10e7693..aa4d471 100644
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 9a2ed89..f2f4bc5 100644
+index 6ef3856..12e4701 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1937,8 +1937,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -29790,7 +29842,7 @@ index 9a2ed89..f2f4bc5 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -3028,7 +3030,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+@@ -3029,7 +3031,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
@@ -29799,7 +29851,7 @@ index 9a2ed89..f2f4bc5 100644
u64 xstate_bv = xsave->header.xfeatures;
u64 valid;
-@@ -3064,7 +3066,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+@@ -3065,7 +3067,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
@@ -29808,7 +29860,7 @@ index 9a2ed89..f2f4bc5 100644
u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
u64 valid;
-@@ -3108,7 +3110,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+@@ -3109,7 +3111,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
@@ -29817,7 +29869,7 @@ index 9a2ed89..f2f4bc5 100644
sizeof(struct fxregs_state));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XFEATURE_MASK_FPSSE;
-@@ -3133,7 +3135,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+@@ -3134,7 +3136,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
} else {
if (xstate_bv & ~XFEATURE_MASK_FPSSE)
return -EINVAL;
@@ -29826,7 +29878,7 @@ index 9a2ed89..f2f4bc5 100644
guest_xsave->region, sizeof(struct fxregs_state));
}
return 0;
-@@ -6363,6 +6365,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+@@ -6364,6 +6366,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
* exiting to the userspace. Otherwise, the value will be returned to the
* userspace.
*/
@@ -29834,7 +29886,7 @@ index 9a2ed89..f2f4bc5 100644
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
-@@ -6611,6 +6614,7 @@ out:
+@@ -6612,6 +6615,7 @@ out:
return r;
}
@@ -29842,7 +29894,7 @@ index 9a2ed89..f2f4bc5 100644
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu) &&
-@@ -7158,7 +7162,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+@@ -7159,7 +7163,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -29851,7 +29903,7 @@ index 9a2ed89..f2f4bc5 100644
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
-@@ -7175,7 +7179,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7176,7 +7180,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct fxregs_state *fxsave =
@@ -29860,7 +29912,7 @@ index 9a2ed89..f2f4bc5 100644
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
-@@ -7191,9 +7195,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7192,9 +7196,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
static void fx_init(struct kvm_vcpu *vcpu)
{
@@ -29872,7 +29924,7 @@ index 9a2ed89..f2f4bc5 100644
host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
-@@ -7217,7 +7221,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+@@ -7218,7 +7222,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
@@ -29881,7 +29933,7 @@ index 9a2ed89..f2f4bc5 100644
trace_kvm_fpu(1);
}
-@@ -7520,6 +7524,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+@@ -7521,6 +7525,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
struct static_key kvm_no_apic_vcpu __read_mostly;
@@ -29890,7 +29942,7 @@ index 9a2ed89..f2f4bc5 100644
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
-@@ -7536,11 +7542,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+@@ -7537,11 +7543,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -29909,7 +29961,7 @@ index 9a2ed89..f2f4bc5 100644
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
-@@ -7596,6 +7605,9 @@ fail_mmu_destroy:
+@@ -7597,6 +7606,9 @@ fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
@@ -29919,7 +29971,7 @@ index 9a2ed89..f2f4bc5 100644
fail:
return r;
}
-@@ -7613,6 +7625,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+@@ -7614,6 +7626,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.pio_data);
if (!lapic_in_kernel(vcpu))
static_key_slow_dec(&kvm_no_apic_vcpu);
@@ -34346,7 +34398,7 @@ index 0057a7acc..95c7edd 100644
might_sleep();
if (is_enabled()) /* recheck and proper locking in *_core() */
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
-index b2fd67d..086bcb9 100644
+index ef05755..7125725 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -193,7 +193,7 @@ static int mpx_insn_decode(struct insn *insn,
@@ -36844,10 +36896,10 @@ index 2f33760..835e50a 100644
#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
diff --git a/block/bio.c b/block/bio.c
-index 4f184d9..eb9fa89 100644
+index d4d1443..bb167da 100644
--- a/block/bio.c
+++ b/block/bio.c
-@@ -1140,7 +1140,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+@@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -36856,7 +36908,7 @@ index 4f184d9..eb9fa89 100644
return ERR_PTR(-EINVAL);
nr_pages += end - start;
-@@ -1265,7 +1265,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+@@ -1268,7 +1268,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -37268,7 +37320,7 @@ index d51a30a..b6891a3 100644
stream->workspace = vzalloc(zlib_inflate_workspacesize());
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
-index 3405f7a..1155093 100644
+index 5fdac39..ce3c90e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -412,7 +412,7 @@ static int video_enable_only_lcd(const struct dmi_system_id *d)
@@ -37566,7 +37618,7 @@ index 82707f9..a6b19f5 100644
* Award BIOS on this AOpen makes thermal control almost worthless.
* http://bugzilla.kernel.org/show_bug.cgi?id=8842
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
-index daaf1c4..4b583a2 100644
+index 80e55cb..f660caf 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -41,7 +41,6 @@ ACPI_MODULE_NAME("video");
@@ -37577,7 +37629,7 @@ index daaf1c4..4b583a2 100644
static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
-@@ -302,6 +301,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
+@@ -294,6 +293,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
@@ -37588,7 +37640,7 @@ index daaf1c4..4b583a2 100644
/*
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
-@@ -332,8 +335,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
+@@ -324,8 +327,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
&video_caps, NULL);
INIT_WORK(&backlight_notify_work,
acpi_video_backlight_notify_work);
@@ -40723,10 +40775,10 @@ index 8412ce5..3a40e15 100644
ret = create_boost_sysfs_file();
if (ret)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
-index b260576..21a0a29 100644
+index d994b0f..b0b9d15 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
-@@ -460,7 +460,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
+@@ -465,7 +465,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
cs_dbs_info->down_skip = 0;
cs_dbs_info->requested_freq = policy->cur;
} else {
@@ -41156,10 +41208,10 @@ index 11707df..2ea96f7 100644
/* Run before NMI debug handler and KGDB */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
-index 592af5f..bb1d583 100644
+index 5358737..2064670 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
-@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
*/
int edac_device_alloc_index(void)
{
@@ -41172,7 +41224,7 @@ index 592af5f..bb1d583 100644
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
-index a75acea..589dd01 100644
+index 58aed67..e6817a1 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
@@ -41185,7 +41237,7 @@ index a75acea..589dd01 100644
#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
static struct dev_ch_attribute dev_attr_legacy_##_name = \
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
-index 2cf44b4d..6dd2dc7 100644
+index b4b3860..08d7faa 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -29,7 +29,7 @@
@@ -41197,7 +41249,7 @@ index 2cf44b4d..6dd2dc7 100644
/*
* edac_pci_alloc_ctl_info
-@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+@@ -314,7 +314,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
*/
int edac_pci_alloc_index(void)
{
@@ -41493,10 +41545,10 @@ index 027ca212..65689be 100644
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
-index 756eca8..2336d08 100644
+index 10e6774..c2d96de 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
-@@ -590,7 +590,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+@@ -583,7 +583,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
static int
create_efivars_bin_attributes(void)
{
@@ -41712,7 +41764,7 @@ index 4e4c308..d041d75 100644
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-index 048cfe0..4ed6d8f 100644
+index bb1099c..8eff7b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2338,7 +2338,7 @@ static inline void amdgpu_unregister_atpx_handler(void) {}
@@ -41809,7 +41861,7 @@ index 8e99514..3d68786 100644
void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-index d5b4213..111bf04 100644
+index c961fe0..acde4f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1075,7 +1075,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
@@ -41822,7 +41874,7 @@ index d5b4213..111bf04 100644
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
-index 0508c5c..cce2be3 100644
+index 8d6668c..ea61792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -481,7 +481,7 @@ static struct drm_driver kms_driver = {
@@ -42773,7 +42825,7 @@ index 93ec5dc..204ec92 100644
#define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index b4741d1..a0dc9fc 100644
+index 61fcb3b..bad2d5f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -354,7 +354,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -42978,10 +43030,10 @@ index 97f3a56..32c712e 100644
ret = drm_ioctl(filp, cmd, arg);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 0d228f9..a00f50a 100644
+index 0f42a27..8d376ee 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -4395,14 +4395,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+@@ -4399,14 +4399,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
@@ -43000,7 +43052,7 @@ index 0d228f9..a00f50a 100644
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
}
-@@ -4414,32 +4415,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+@@ -4418,32 +4419,32 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
@@ -43053,7 +43105,7 @@ index 0d228f9..a00f50a 100644
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev))
-@@ -4447,35 +4448,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
+@@ -4451,35 +4452,36 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -43111,10 +43163,10 @@ index 0d228f9..a00f50a 100644
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 32cf973..62e0cfa 100644
+index f859a5b..c6ef76b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -14720,13 +14720,13 @@ struct intel_quirk {
+@@ -14731,13 +14731,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -43130,7 +43182,7 @@ index 32cf973..62e0cfa 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -14734,18 +14734,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -14745,18 +14745,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -43565,10 +43617,10 @@ index 01a8694..584fb48 100644
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
-index 2ae8577..0554f54 100644
+index 7c2e782..d3ca7da 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
-@@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+@@ -184,7 +184,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
@@ -43577,7 +43629,7 @@ index 2ae8577..0554f54 100644
{
struct qxl_drawable *draw = fb_cmd;
-@@ -203,7 +203,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+@@ -204,7 +204,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
struct drm_qxl_reloc reloc;
if (copy_from_user(&reloc,
@@ -43586,7 +43638,7 @@ index 2ae8577..0554f54 100644
sizeof(reloc))) {
ret = -EFAULT;
goto out_free_bos;
-@@ -282,10 +282,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+@@ -283,10 +283,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
@@ -43600,7 +43652,7 @@ index 2ae8577..0554f54 100644
sizeof(user_cmd)))
return -EFAULT;
-@@ -439,4 +439,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
+@@ -440,4 +440,4 @@ const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_AUTH),
};
@@ -43872,7 +43924,7 @@ index b928c17..e5d9400 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index c566993..0bf8fae 100644
+index d690df5..4aaaead 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1253,7 +1253,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
@@ -44060,7 +44112,7 @@ index 15aee72..c6df119 100644
-int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
+const int radeon_max_ioctl = ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index e343074..2042c8b 100644
+index e06ac54..46eabfd 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
@@ -44771,7 +44823,7 @@ index c13fb5b..55a3802 100644
*off += size;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index c4dcab0..a505f18 100644
+index 9098f13..19a4855 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -382,7 +382,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
@@ -44783,7 +44835,7 @@ index c4dcab0..a505f18 100644
ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
if (ret)
-@@ -696,9 +696,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+@@ -705,9 +705,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
* Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support
*/
@@ -45000,7 +45052,7 @@ index 6a27eb2..349ed23 100644
};
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
-index c848789..e9e9217 100644
+index c43318d..72f7656 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -819,7 +819,7 @@ static const struct i8k_config_data i8k_config_data[] = {
@@ -45294,7 +45346,7 @@ index b13936d..65322b2 100644
if (chipset >= AK_MAX_TYPE) {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index 0a26dd6..54c83de 100644
+index d6d2b35..a97866a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
@@ -45539,28 +45591,6 @@ index 1c02dea..5f1efa6 100644
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
-diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
-index cb78b1e..f504ba7 100644
---- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
-+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
-@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
-- return error;
-+ return error < 0 ? error : 0;
- }
-
- int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
-- return error;
-+ return error < 0 ? error : 0;
- }
-
- static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 58fce174..f6de2c2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
@@ -47579,6 +47609,41 @@ index 40634b0..4f5855e 100644
// Every interrupt can come to us here
// But we must truly tell each apart.
+diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
+index 134e4fa..243bffe 100644
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -218,7 +218,7 @@ static void rrpc_put_blks(struct rrpc *rrpc)
+
+ static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
+ {
+- int next = atomic_inc_return(&rrpc->next_lun);
++ int next = atomic_inc_return_unchecked(&rrpc->next_lun);
+
+ return &rrpc->luns[next % rrpc->nr_luns];
+ }
+@@ -1286,7 +1286,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
+ rrpc->nr_luns = lun_end - lun_begin + 1;
+
+ /* simple round-robin strategy */
+- atomic_set(&rrpc->next_lun, -1);
++ atomic_set_unchecked(&rrpc->next_lun, -1);
+
+ ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+ if (ret) {
+diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
+index a9696a0..75d0008 100644
+--- a/drivers/lightnvm/rrpc.h
++++ b/drivers/lightnvm/rrpc.h
+@@ -99,7 +99,7 @@ struct rrpc {
+ /* Write strategy variables. Move these into each for structure for each
+ * strategy
+ */
+- atomic_t next_lun; /* Whenever a page is written, this is updated
++ atomic_unchecked_t next_lun; /* Whenever a page is written, this is updated
+ * to point to the next write lun
+ */
+
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 4d20088..de60cb2 100644
--- a/drivers/md/bcache/Kconfig
@@ -47627,7 +47692,7 @@ index 6b420a5..d5acb8f 100644
struct gc_stat {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
-index 83392f8..fc8f340 100644
+index 22b9e34..ac456ec 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -468,7 +468,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
@@ -47821,7 +47886,7 @@ index adbff14..018c2d2 100644
struct cache_stat_collector collector;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 679a093..b4dd03d 100644
+index 8d0ead9..2b81525 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -530,7 +530,7 @@ void bch_prio_write(struct cache *ca)
@@ -48331,7 +48396,7 @@ index c219a05..15a27ca 100644
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 5df4048..1344a0d 100644
+index dd83492..d111dcf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -194,9 +194,9 @@ struct mapped_device {
@@ -48383,7 +48448,7 @@ index 5df4048..1344a0d 100644
{
rcu_read_unlock();
}
-@@ -2315,8 +2319,8 @@ static struct mapped_device *alloc_dev(int minor)
+@@ -2317,8 +2321,8 @@ static struct mapped_device *alloc_dev(int minor)
spin_lock_init(&md->deferred_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
@@ -48394,7 +48459,7 @@ index 5df4048..1344a0d 100644
INIT_LIST_HEAD(&md->uevent_list);
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
-@@ -2457,7 +2461,7 @@ static void event_callback(void *context)
+@@ -2459,7 +2463,7 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
@@ -48403,7 +48468,7 @@ index 5df4048..1344a0d 100644
wake_up(&md->eventq);
}
-@@ -3400,18 +3404,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+@@ -3402,18 +3406,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
@@ -48613,10 +48678,10 @@ index dfa57b4..7af9cda 100644
struct md_personality
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
-index fca6dbc..74ec612 100644
+index 7e44005..20e035a 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
-@@ -703,7 +703,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+@@ -700,7 +700,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
* Flick into a mode where all blocks get allocated in the new area.
*/
smm->begin = old_len;
@@ -48625,7 +48690,7 @@ index fca6dbc..74ec612 100644
/*
* Extend.
-@@ -741,7 +741,7 @@ out:
+@@ -738,7 +738,7 @@ out:
/*
* Switch back to normal behaviour.
*/
@@ -50863,7 +50928,7 @@ index f695b58..7b7d017 100644
+} __do_const;
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
-index fb26674..3172c2b 100644
+index acece32..a872279 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1633,7 +1633,9 @@ static int mmci_probe(struct amba_device *dev,
@@ -51045,6 +51110,19 @@ index 141c2a4..ca734ed 100644
.kind = "can",
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
+diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
+index c1b6676..50a8a51 100644
+--- a/drivers/net/can/led.c
++++ b/drivers/net/can/led.c
+@@ -128,7 +128,7 @@ static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
+ }
+
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+ .notifier_call = can_led_notifier,
+ };
+
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 674f367..ec3a31f 100644
--- a/drivers/net/can/vcan.c
@@ -52146,6 +52224,19 @@ index 1203d89..7895359 100644
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 3be4a23..e89602b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -1915,7 +1915,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
++static struct notifier_block mlxsw_sp_netdevice_nb = {
+ .notifier_call = mlxsw_sp_netdevice_event,
+ };
+
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 6223930..975033d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -52276,6 +52367,28 @@ index 79ef799..59bbd1f 100644
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 52ec3d6..ed19f16 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -5410,7 +5410,7 @@ out:
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block rocker_netdevice_nb __read_mostly = {
++static struct notifier_block rocker_netdevice_nb = {
+ .notifier_call = rocker_netdevice_event,
+ };
+
+@@ -5453,7 +5453,7 @@ static int rocker_netevent_event(struct notifier_block *unused,
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block rocker_netevent_nb __read_mostly = {
++static struct notifier_block rocker_netevent_nb = {
+ .notifier_call = rocker_netevent_event,
+ };
+
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c771e0a..bbb368d 100644
--- a/drivers/net/ethernet/sfc/ptp.c
@@ -52471,6 +52584,29 @@ index 8c48bb2..0a03401 100644
kfree_skb(skb);
}
}
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index a9268db..19d067f 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -751,15 +751,15 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_addr4_notifier_block = {
+ .notifier_call = ipvlan_addr4_event,
+ };
+
+-static struct notifier_block ipvlan_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_notifier_block = {
+ .notifier_call = ipvlan_device_event,
+ };
+
+-static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
++static struct notifier_block ipvlan_addr6_notifier_block = {
+ .notifier_call = ipvlan_addr6_event,
+ };
+
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a0849f4..147a4a6 100644
--- a/drivers/net/irda/vlsi_ir.c
@@ -52721,86 +52857,6 @@ index 9a863c6..8e2d8c9 100644
break;
err = 0;
break;
-diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
-index 597c53e..f7e8c79 100644
---- a/drivers/net/ppp/pptp.c
-+++ b/drivers/net/ppp/pptp.c
-@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
- return i < MAX_CALLID;
- }
-
--static int add_chan(struct pppox_sock *sock)
-+static int add_chan(struct pppox_sock *sock,
-+ struct pptp_addr *sa)
- {
- static int call_id;
-
- spin_lock(&chan_lock);
-- if (!sock->proto.pptp.src_addr.call_id) {
-+ if (!sa->call_id) {
- call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
- if (call_id == MAX_CALLID) {
- call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
- if (call_id == MAX_CALLID)
- goto out_err;
- }
-- sock->proto.pptp.src_addr.call_id = call_id;
-- } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
-+ sa->call_id = call_id;
-+ } else if (test_bit(sa->call_id, callid_bitmap)) {
- goto out_err;
-+ }
-
-- set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
-+ sock->proto.pptp.src_addr = *sa;
-+ set_bit(sa->call_id, callid_bitmap);
-+ rcu_assign_pointer(callid_sock[sa->call_id], sock);
- spin_unlock(&chan_lock);
-
- return 0;
-@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
- struct sock *sk = sock->sk;
- struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
- struct pppox_sock *po = pppox_sk(sk);
-- struct pptp_opt *opt = &po->proto.pptp;
- int error = 0;
-
- if (sockaddr_len < sizeof(struct sockaddr_pppox))
-@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
-
- lock_sock(sk);
-
-- opt->src_addr = sp->sa_addr.pptp;
-- if (add_chan(po))
-+ if (sk->sk_state & PPPOX_DEAD) {
-+ error = -EALREADY;
-+ goto out;
-+ }
-+
-+ if (sk->sk_state & PPPOX_BOUND) {
-+ error = -EBUSY;
-+ goto out;
-+ }
-+
-+ if (add_chan(po, &sp->sa_addr.pptp))
- error = -EBUSY;
-+ else
-+ sk->sk_state |= PPPOX_BOUND;
-
-+out:
- release_sock(sk);
- return error;
- }
-@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
- }
-
- opt->dst_addr = sp->sa_addr.pptp;
-- sk->sk_state = PPPOX_CONNECTED;
-+ sk->sk_state |= PPPOX_CONNECTED;
-
- end:
- release_sock(sk);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index 27ed252..80cffde 100644
--- a/drivers/net/slip/slhc.c
@@ -53008,7 +53064,7 @@ index f94ab78..675a3a4 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
-index 0a242b2..91f07ed 100644
+index 0a242b2..eaf24af 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -939,7 +939,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
@@ -53020,11 +53076,20 @@ index 0a242b2..91f07ed 100644
.kind = DRV_NAME,
.priv_size = sizeof(struct net_vrf),
+@@ -973,7 +973,7 @@ out:
+ return NOTIFY_DONE;
+ }
+
+-static struct notifier_block vrf_notifier_block __read_mostly = {
++static struct notifier_block vrf_notifier_block = {
+ .notifier_call = vrf_device_event,
+ };
+
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 405a7b6..3248ac3 100644
+index e0fcda4..b548741 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -3135,7 +3135,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
+@@ -3144,7 +3144,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
return vxlan->net;
}
@@ -53033,7 +53098,7 @@ index 405a7b6..3248ac3 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -3183,7 +3183,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -3192,7 +3192,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -53946,10 +54011,10 @@ index c652a66..1f75da8 100644
crypto_hdr[2] = 0;
crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index 9028345..5b66ca3 100644
+index 8c72047..e54deaa 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
-@@ -2049,7 +2049,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+@@ -2058,7 +2058,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
@@ -53958,7 +54023,7 @@ index 9028345..5b66ca3 100644
u32 reset_flag;
memset(buf, 0, sizeof(buf));
-@@ -2070,7 +2070,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+@@ -2079,7 +2079,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char buf[8];
@@ -56546,10 +56611,10 @@ index 7686bfe..4710893 100644
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
-index bfa9a64..d3b3ec2 100644
+index fc6674d..8f3aa03 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
-@@ -1423,8 +1423,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+@@ -1429,8 +1429,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
!pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
@@ -56825,7 +56890,7 @@ index e3cd3ec..00560ec 100644
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 84fa4c4..8333258 100644
+index bb669d3..2074023 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -112,7 +112,7 @@ static int sd_resume(struct device *);
@@ -57326,6 +57391,64 @@ index e541a01..a41777d 100644
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap);
goto send_comp;
+diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+index d900546..266edcf 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
++++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+@@ -3983,7 +3983,7 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
+ _12M_RATE_, _24M_RATE_, 0xff,
+ };
+
+- atomic_set(&pmlmeext->event_seq, 0);
++ atomic_set_unchecked(&pmlmeext->event_seq, 0);
+ pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
+
+ pmlmeext->cur_channel = padapter->registrypriv.channel;
+@@ -4310,7 +4310,7 @@ void report_survey_event(struct adapter *padapter,
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+@@ -4362,7 +4362,7 @@ void report_surveydone_event(struct adapter *padapter)
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+@@ -4408,7 +4408,7 @@ void report_join_res(struct adapter *padapter, int res)
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+@@ -4461,7 +4461,7 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+@@ -4516,7 +4516,7 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 1b1c102..375e471 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -57339,6 +57462,19 @@ index 1b1c102..375e471 100644
enum rt_eeprom_type {
EEPROM_93C46,
+diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+index 9093a5f..5863f9b 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
++++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+@@ -409,7 +409,7 @@ struct p2p_oper_class_map {
+ struct mlme_ext_priv {
+ struct adapter *padapter;
+ u8 mlmeext_init;
+- atomic_t event_seq;
++ atomic_unchecked_t event_seq;
+ u16 mgnt_seq;
+
+ unsigned char cur_channel;
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 26dd24c..2eb37c9 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
@@ -57352,6 +57488,112 @@ index 26dd24c..2eb37c9 100644
struct io_req {
struct list_head list;
+diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+index d28f29a..e3d2f19 100644
+--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
++++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+@@ -368,7 +368,7 @@ static void init_mlme_ext_priv23a_value(struct rtw_adapter *padapter)
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _12M_RATE_, _24M_RATE_, 0xff,};
+
+- atomic_set(&pmlmeext->event_seq, 0);
++ atomic_set_unchecked(&pmlmeext->event_seq, 0);
+ /* reset to zero when disconnect at client mode */
+ pmlmeext->mgnt_seq = 0;
+
+@@ -4743,7 +4743,7 @@ void report_survey_event23a(struct rtw_adapter *padapter,
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+@@ -4794,7 +4794,7 @@ void report_surveydone_event23a(struct rtw_adapter *padapter)
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+@@ -4838,7 +4838,7 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)&pjoinbss_evt->network.network,
+@@ -4888,7 +4888,7 @@ void report_del_sta_event23a(struct rtw_adapter *padapter,
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ ether_addr_copy((unsigned char *)&pdel_sta_evt->macaddr, MacAddr);
+@@ -4942,7 +4942,7 @@ void report_add_sta_event23a(struct rtw_adapter *padapter,
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+- pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
++ pc2h_evt_hdr->seq = atomic_inc_return_unchecked(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event*)(pevtcmd + sizeof(struct C2HEvent_Header));
+ ether_addr_copy((unsigned char *)&padd_sta_evt->macaddr, MacAddr);
+diff --git a/drivers/staging/rtl8723au/include/drv_types.h b/drivers/staging/rtl8723au/include/drv_types.h
+index e83463a..84230f3 100644
+--- a/drivers/staging/rtl8723au/include/drv_types.h
++++ b/drivers/staging/rtl8723au/include/drv_types.h
+@@ -185,7 +185,7 @@ struct dvobj_priv {
+
+ struct usb_interface *pusbintf;
+ struct usb_device *pusbdev;
+- atomic_t continual_urb_error;
++ atomic_unchecked_t continual_urb_error;
+
+ /*-------- below is for PCIE INTERFACE --------*/
+
+diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+index ea2a6c9..91d10ea 100644
+--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
++++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+@@ -406,7 +406,7 @@ struct p2p_oper_class_map {
+ struct mlme_ext_priv {
+ struct rtw_adapter *padapter;
+ u8 mlmeext_init;
+- atomic_t event_seq;
++ atomic_unchecked_t event_seq;
+ u16 mgnt_seq;
+
+ /* struct fw_priv fwpriv; */
+diff --git a/drivers/staging/rtl8723au/include/usb_ops.h b/drivers/staging/rtl8723au/include/usb_ops.h
+index ff11e13..69680d1 100644
+--- a/drivers/staging/rtl8723au/include/usb_ops.h
++++ b/drivers/staging/rtl8723au/include/usb_ops.h
+@@ -48,7 +48,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+ int ret = false;
+ int value;
+
+- value = atomic_inc_return(&dvobj->continual_urb_error);
++ value = atomic_inc_return_unchecked(&dvobj->continual_urb_error);
+ if (value > MAX_CONTINUAL_URB_ERR) {
+ DBG_8723A("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
+ dvobj, value, MAX_CONTINUAL_URB_ERR);
+@@ -60,7 +60,7 @@ static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+ /* Set the continual_urb_error of this @param dvobjprive to 0 */
+ static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
+ {
+- atomic_set(&dvobj->continual_urb_error, 0);
++ atomic_set_unchecked(&dvobj->continual_urb_error, 0);
+ }
+
+ bool rtl8723au_chip_configure(struct rtw_adapter *padapter);
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 860e1c2..609ee2a 100644
--- a/drivers/staging/sm750fb/sm750.c
@@ -57676,7 +57918,7 @@ index be4eedc..96aaf2f 100644
tz->ops = NULL;
tz->sensor_data = NULL;
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
-index 7fc919f..19f23a5 100644
+index 7fc919f..5521ec1 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
@@ -57684,7 +57926,7 @@ index 7fc919f..19f23a5 100644
}
-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
-+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
++static struct notifier_block pkg_temp_thermal_notifier = {
.notifier_call = pkg_temp_thermal_cpu_callback,
};
@@ -77319,7 +77561,7 @@ index e06dd75a..22221aa 100644
/* first set the basic ref node struct up */
atomic_set(&ref->refs, 1);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
-index 0ddca67..ddd9880 100644
+index 4958360..70b753e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1263,7 +1263,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
@@ -77331,7 +77573,7 @@ index 0ddca67..ddd9880 100644
atomic_set(&root->orphan_inodes, 0);
atomic_set(&root->refs, 1);
atomic_set(&root->will_be_snapshoted, 0);
-@@ -2564,7 +2564,7 @@ int open_ctree(struct super_block *sb,
+@@ -2579,7 +2579,7 @@ int open_ctree(struct super_block *sb,
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->defrag_running, 0);
atomic_set(&fs_info->qgroup_op_seq, 0);
@@ -77533,7 +77775,7 @@ index 1a33d3e..4830234 100644
* build a list of bios to read all the missing parts of this
* stripe
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
-index 24154e4..ac07531 100644
+index fe609b8..6475fee 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -248,7 +248,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
@@ -77664,7 +77906,7 @@ index 6916a78..4598936 100644
static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
-index 9e08447..e21fee0 100644
+index 9c62a6f..3c7df72 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -231,7 +231,7 @@ static struct btrfs_device *__alloc_device(void)
@@ -77673,10 +77915,10 @@ index 9e08447..e21fee0 100644
atomic_set(&dev->reada_in_flight, 0);
- atomic_set(&dev->dev_stats_ccnt, 0);
+ atomic_set_unchecked(&dev->dev_stats_ccnt, 0);
+ btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
-
-@@ -5184,7 +5184,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
+@@ -5185,7 +5185,7 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
sizeof(u64) * (total_stripes),
GFP_NOFS|__GFP_NOFAIL);
@@ -77685,7 +77927,7 @@ index 9e08447..e21fee0 100644
atomic_set(&bbio->refs, 1);
return bbio;
-@@ -5865,7 +5865,7 @@ static void btrfs_end_bio(struct bio *bio)
+@@ -5866,7 +5866,7 @@ static void btrfs_end_bio(struct bio *bio)
int is_orig_bio = 0;
if (bio->bi_error) {
@@ -77694,7 +77936,7 @@ index 9e08447..e21fee0 100644
if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
unsigned int stripe_index =
btrfs_io_bio(bio)->stripe_index;
-@@ -5903,7 +5903,7 @@ static void btrfs_end_bio(struct bio *bio)
+@@ -5904,7 +5904,7 @@ static void btrfs_end_bio(struct bio *bio)
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
*/
@@ -77703,7 +77945,7 @@ index 9e08447..e21fee0 100644
bio->bi_error = -EIO;
} else {
/*
-@@ -6014,7 +6014,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
+@@ -6015,7 +6015,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
@@ -77712,7 +77954,7 @@ index 9e08447..e21fee0 100644
if (atomic_dec_and_test(&bbio->stripes_pending)) {
/* Shoud be the original bio. */
WARN_ON(bio != bbio->orig_bio);
-@@ -6776,10 +6776,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
+@@ -6777,10 +6777,10 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
continue;
@@ -80972,10 +81214,10 @@ index 5797d45..7d7d79a 100644
if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
-index 023f6a1..9132167 100644
+index e5232bb..d7b20d1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
-@@ -845,9 +845,9 @@ restart:
+@@ -852,9 +852,9 @@ restart:
#else /* CONFIG_CGROUP_WRITEBACK */
static struct bdi_writeback *
@@ -80987,7 +81229,7 @@ index 023f6a1..9132167 100644
{
struct bdi_writeback *wb = inode_to_wb(inode);
-@@ -856,8 +856,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
+@@ -863,8 +863,8 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
return wb;
}
@@ -80997,7 +81239,7 @@ index 023f6a1..9132167 100644
{
struct bdi_writeback *wb = inode_to_wb(inode);
-@@ -1101,9 +1101,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
+@@ -1108,9 +1108,8 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
* Wait for writeback on an inode to complete. Called with i_lock held.
* Caller must make sure inode cannot go away when we drop i_lock.
*/
@@ -81008,7 +81250,7 @@ index 023f6a1..9132167 100644
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
-@@ -1132,8 +1131,8 @@ void inode_wait_for_writeback(struct inode *inode)
+@@ -1139,8 +1138,8 @@ void inode_wait_for_writeback(struct inode *inode)
* held and drops it. It is aimed for callers not holding any inode reference
* so once i_lock is dropped, inode can go away.
*/
@@ -83115,7 +83357,7 @@ index 14db05d..687f6d8 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index 0c3974c..a52e0f8 100644
+index d8ee4da..47a7c9c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -336,17 +336,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -83310,7 +83552,7 @@ index 0c3974c..a52e0f8 100644
last = nd->stack + nd->depth++;
last->link = *link;
last->cookie = NULL;
-@@ -1828,7 +1917,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1833,7 +1922,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline u64 hash_name(const char *name)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -83319,7 +83561,7 @@ index 0c3974c..a52e0f8 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1996,6 +2085,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
+@@ -2001,6 +2090,10 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
@@ -83330,7 +83572,7 @@ index 0c3974c..a52e0f8 100644
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
-@@ -2133,6 +2226,11 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
+@@ -2138,6 +2231,11 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
if (!err)
err = complete_walk(nd);
@@ -83342,7 +83584,7 @@ index 0c3974c..a52e0f8 100644
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
-@@ -2181,6 +2279,10 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
+@@ -2186,6 +2284,10 @@ static int path_parentat(struct nameidata *nd, unsigned flags,
err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
@@ -83353,7 +83595,7 @@ index 0c3974c..a52e0f8 100644
if (!err) {
*parent = nd->path;
nd->path.mnt = NULL;
-@@ -2712,6 +2814,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2717,6 +2819,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -83367,7 +83609,7 @@ index 0c3974c..a52e0f8 100644
return 0;
}
-@@ -2978,6 +3087,18 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2983,6 +3092,18 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -83386,7 +83628,7 @@ index 0c3974c..a52e0f8 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2999,6 +3120,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -3004,6 +3125,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -83395,7 +83637,7 @@ index 0c3974c..a52e0f8 100644
}
out_no_open:
path->dentry = dentry;
-@@ -3104,11 +3227,24 @@ retry_lookup:
+@@ -3109,11 +3232,24 @@ retry_lookup:
goto finish_open_created;
}
@@ -83421,19 +83663,7 @@ index 0c3974c..a52e0f8 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -3144,6 +3280,11 @@ finish_lookup:
- if (unlikely(error))
- return error;
-
-+ if (gr_handle_nameidata_symlinkowner(nd, inode)) {
-+ path_to_nameidata(&path, nd);
-+ return -EACCES;
-+ }
-+
- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
- path_to_nameidata(&path, nd);
- return -ELOOP;
-@@ -3166,6 +3307,12 @@ finish_open:
+@@ -3166,6 +3302,17 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -83443,10 +83673,15 @@ index 0c3974c..a52e0f8 100644
+ goto out;
+ }
+
++ if (gr_handle_nameidata_symlinkowner(nd, inode)) {
++ error = -EACCES;
++ goto out;
++ }
++
audit_inode(nd->name, nd->path.dentry, 0);
- error = -EISDIR;
- if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
-@@ -3432,9 +3579,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+ error = -ELOOP;
+@@ -3440,9 +3587,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
goto unlock;
error = -EEXIST;
@@ -83460,7 +83695,7 @@ index 0c3974c..a52e0f8 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3488,6 +3637,20 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3496,6 +3645,20 @@ inline struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -83481,7 +83716,7 @@ index 0c3974c..a52e0f8 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3551,6 +3714,17 @@ retry:
+@@ -3559,6 +3722,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -83499,7 +83734,7 @@ index 0c3974c..a52e0f8 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3566,6 +3740,8 @@ retry:
+@@ -3574,6 +3748,8 @@ retry:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
@@ -83508,7 +83743,7 @@ index 0c3974c..a52e0f8 100644
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
-@@ -3620,9 +3796,16 @@ retry:
+@@ -3628,9 +3804,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -83525,7 +83760,7 @@ index 0c3974c..a52e0f8 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3655,7 +3838,7 @@ void dentry_unhash(struct dentry *dentry)
+@@ -3663,7 +3846,7 @@ void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
@@ -83534,7 +83769,7 @@ index 0c3974c..a52e0f8 100644
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
-@@ -3708,6 +3891,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3716,6 +3899,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct path path;
struct qstr last;
int type;
@@ -83543,7 +83778,7 @@ index 0c3974c..a52e0f8 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname,
-@@ -3740,10 +3925,20 @@ retry:
+@@ -3748,10 +3933,20 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -83564,7 +83799,7 @@ index 0c3974c..a52e0f8 100644
exit3:
dput(dentry);
exit2:
-@@ -3838,6 +4033,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3846,6 +4041,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
int type;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -83573,7 +83808,7 @@ index 0c3974c..a52e0f8 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname,
-@@ -3864,10 +4061,21 @@ retry_deleg:
+@@ -3872,10 +4069,21 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -83595,7 +83830,7 @@ index 0c3974c..a52e0f8 100644
exit2:
dput(dentry);
}
-@@ -3956,9 +4164,17 @@ retry:
+@@ -3964,9 +4172,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -83613,7 +83848,7 @@ index 0c3974c..a52e0f8 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -4062,6 +4278,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -4070,6 +4286,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -83621,7 +83856,7 @@ index 0c3974c..a52e0f8 100644
int how = 0;
int error;
-@@ -4085,7 +4302,7 @@ retry:
+@@ -4093,7 +4310,7 @@ retry:
if (error)
return error;
@@ -83630,7 +83865,7 @@ index 0c3974c..a52e0f8 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -4097,11 +4314,26 @@ retry:
+@@ -4105,11 +4322,26 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -83657,7 +83892,7 @@ index 0c3974c..a52e0f8 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4416,6 +4648,20 @@ retry_deleg:
+@@ -4424,6 +4656,20 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -83678,7 +83913,7 @@ index 0c3974c..a52e0f8 100644
error = security_path_rename(&old_path, old_dentry,
&new_path, new_dentry, flags);
if (error)
-@@ -4423,6 +4669,9 @@ retry_deleg:
+@@ -4431,6 +4677,9 @@ retry_deleg:
error = vfs_rename(old_path.dentry->d_inode, old_dentry,
new_path.dentry->d_inode, new_dentry,
&delegated_inode, flags);
@@ -83688,7 +83923,7 @@ index 0c3974c..a52e0f8 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4479,14 +4728,24 @@ EXPORT_SYMBOL(vfs_whiteout);
+@@ -4487,14 +4736,24 @@ EXPORT_SYMBOL(vfs_whiteout);
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
@@ -85895,7 +86130,7 @@ index a352d57..cb94a5c 100644
}
fs_initcall(proc_interrupts_init);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
-index 92e6726..a600d4fa 100644
+index 92e6726..93a72d0 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
@@ -85959,7 +86194,7 @@ index 92e6726..a600d4fa 100644
}
-static struct notifier_block kcore_callback_nb __meminitdata = {
-+static struct notifier_block kcore_callback_nb __meminitconst = {
++static struct notifier_block kcore_callback_nb = {
.notifier_call = kcore_callback,
.priority = 0,
};
@@ -99346,29 +99581,6 @@ index 1bfcfe5..e04c5c9 100644
+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
#endif /* __ASM_GENERIC_CACHE_H */
-diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
-index 0419485..0f1c6f3 100644
---- a/include/asm-generic/cputime_nsecs.h
-+++ b/include/asm-generic/cputime_nsecs.h
-@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
- */
- static inline cputime_t timespec_to_cputime(const struct timespec *val)
- {
-- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
-+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
- return (__force cputime_t) ret;
- }
- static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
-@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
- */
- static inline cputime_t timeval_to_cputime(const struct timeval *val)
- {
-- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
-+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
-+ val->tv_usec * NSEC_PER_USEC;
- return (__force cputime_t) ret;
- }
- static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
index 0d68a1e..b74a761 100644
--- a/include/asm-generic/emergency-restart.h
@@ -100151,10 +100363,10 @@ index 8609d57..86e4d79 100644
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
-index 06b77f9d..d08b456 100644
+index 8e30fae..38632f8 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
-@@ -407,7 +407,7 @@ struct cftype {
+@@ -413,7 +413,7 @@ struct cftype {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
#endif
@@ -100888,7 +101100,7 @@ index 2e551e2..8ea30b5 100644
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
diff --git a/include/linux/efi.h b/include/linux/efi.h
-index 569b5a8..55dbf24 100644
+index 47be3ad..b2b1b58 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1094,6 +1094,7 @@ struct efivar_operations {
@@ -105499,22 +105711,6 @@ index 6fb8016..2cf60e7 100644
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
-diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
-index 50777b5..92d112a 100644
---- a/include/linux/shmem_fs.h
-+++ b/include/linux/shmem_fs.h
-@@ -15,10 +15,7 @@ struct shmem_inode_info {
- unsigned int seals; /* shmem seals */
- unsigned long flags;
- unsigned long alloced; /* data pages alloced to file */
-- union {
-- unsigned long swapped; /* subtotal assigned to swap */
-- char *symlink; /* unswappable short symlink */
-- };
-+ unsigned long swapped; /* subtotal assigned to swap */
- struct shared_policy policy; /* NUMA memory alloc policy */
- struct list_head swaplist; /* chain of maybes on swap */
- struct simple_xattrs xattrs; /* list of xattrs */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 92557bb..53fa513 100644
--- a/include/linux/signal.h
@@ -105529,10 +105725,10 @@ index 92557bb..53fa513 100644
static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index 9147f9f..ad74b50 100644
+index 75f136a..fa3b724 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -808,7 +808,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+@@ -809,7 +809,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
@@ -105541,7 +105737,7 @@ index 9147f9f..ad74b50 100644
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -2079,7 +2079,7 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+@@ -2080,7 +2080,7 @@ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
return skb->csum_start - skb_headroom(skb);
}
@@ -105550,7 +105746,7 @@ index 9147f9f..ad74b50 100644
{
return skb_transport_header(skb) - skb->data;
}
-@@ -2094,7 +2094,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -2095,7 +2095,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
return skb->inner_transport_header - skb->inner_network_header;
}
@@ -105559,7 +105755,7 @@ index 9147f9f..ad74b50 100644
{
return skb_network_header(skb) - skb->data;
}
-@@ -2154,7 +2154,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2155,7 +2155,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -105568,7 +105764,7 @@ index 9147f9f..ad74b50 100644
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2794,9 +2794,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2795,9 +2795,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
@@ -105580,7 +105776,7 @@ index 9147f9f..ad74b50 100644
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3325,6 +3325,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3326,6 +3326,9 @@ static inline void nf_reset(struct sk_buff *skb)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -106836,20 +107032,9 @@ index 5122b5e..598b440 100644
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
-index 2a91a05..f3ff431 100644
+index 9b4c418..f3ff431 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
-@@ -6,8 +6,8 @@
- #include <linux/mutex.h>
- #include <net/sock.h>
-
--void unix_inflight(struct file *fp);
--void unix_notinflight(struct file *fp);
-+void unix_inflight(struct user_struct *user, struct file *fp);
-+void unix_notinflight(struct user_struct *user, struct file *fp);
- void unix_gc(void);
- void wait_for_unix_gc(void);
- struct sock *unix_get_socket(struct file *filp);
@@ -36,7 +36,7 @@ struct unix_skb_parms {
u32 secid; /* Security ID */
#endif
@@ -106962,7 +107147,7 @@ index cf6c745..8a0cf00 100644
return;
}
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
-index 481fe1c..3adeb9d 100644
+index 49dcad4..6d2c708 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -65,7 +65,7 @@ struct inet_connection_sock_af_ops {
@@ -107014,10 +107199,10 @@ index 1a98f1c..2a44de6 100644
static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
-index 9f4df68..f9705be 100644
+index 3f98233..3d46645 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
-@@ -173,7 +173,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+@@ -174,7 +174,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
@@ -107460,18 +107645,6 @@ index 2f87c1b..5a03287 100644
int __rtnl_link_register(struct rtnl_link_ops *ops);
void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-diff --git a/include/net/scm.h b/include/net/scm.h
-index 262532d..59fa93c 100644
---- a/include/net/scm.h
-+++ b/include/net/scm.h
-@@ -21,6 +21,7 @@ struct scm_creds {
- struct scm_fp_list {
- short count;
- short max;
-+ struct user_struct *user;
- struct file *fp[SCM_MAX_FD];
- };
-
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a3..ca27d73 100644
--- a/include/net/sctp/checksum.h
@@ -107633,7 +107806,7 @@ index 14d3c07..c273ad8 100644
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
diff --git a/include/net/tcp.h b/include/net/tcp.h
-index f80e74c..1e64f3c 100644
+index 414d822..f99ea64 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -550,7 +550,7 @@ void tcp_retransmit_timer(struct sock *sk);
@@ -109194,19 +109367,6 @@ index 3b39550..e470527 100644
if (!access_ok(VERIFY_READ, uattr, 1))
return -EFAULT;
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index d1d3e8f..2e7f7ab 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
- /* adjust offset of jmps if necessary */
- if (i < pos && i + insn->off + 1 > pos)
- insn->off += delta;
-- else if (i > pos && i + insn->off + 1 < pos)
-+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
- insn->off -= delta;
- }
- }
diff --git a/kernel/capability.c b/kernel/capability.c
index 45432b5..988f1e4 100644
--- a/kernel/capability.c
@@ -109306,10 +109466,10 @@ index 45432b5..988f1e4 100644
+}
+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 470f653..5ea1e67 100644
+index fb1ecfd..f6add73 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -3345,7 +3345,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
+@@ -3346,7 +3346,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
key = &cft->lockdep_key;
#endif
kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
@@ -109318,7 +109478,7 @@ index 470f653..5ea1e67 100644
NULL, key);
if (IS_ERR(kn))
return PTR_ERR(kn);
-@@ -3449,11 +3449,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
+@@ -3450,11 +3450,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
/* free copy for custom atomic_write_len, see init_cftypes() */
if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
kfree(cft->kf_ops);
@@ -109336,7 +109496,7 @@ index 470f653..5ea1e67 100644
}
}
-@@ -3484,8 +3487,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3485,8 +3488,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
kf_ops->atomic_write_len = cft->max_write_len;
}
@@ -109349,7 +109509,7 @@ index 470f653..5ea1e67 100644
}
return 0;
-@@ -3498,7 +3503,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
+@@ -3499,7 +3504,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
if (!cfts || !cfts[0].ss)
return -ENOENT;
@@ -109358,7 +109518,7 @@ index 470f653..5ea1e67 100644
cgroup_apply_cftypes(cfts, false);
cgroup_exit_cftypes(cfts);
return 0;
-@@ -3555,7 +3560,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3556,7 +3561,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
mutex_lock(&cgroup_mutex);
@@ -109367,7 +109527,7 @@ index 470f653..5ea1e67 100644
ret = cgroup_apply_cftypes(cfts, true);
if (ret)
cgroup_rm_cftypes_locked(cfts);
-@@ -3576,8 +3581,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3577,8 +3582,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
@@ -109379,7 +109539,7 @@ index 470f653..5ea1e67 100644
return cgroup_add_cftypes(ss, cfts);
}
-@@ -3593,8 +3600,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3594,8 +3601,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype *cft;
@@ -109391,7 +109551,7 @@ index 470f653..5ea1e67 100644
return cgroup_add_cftypes(ss, cfts);
}
-@@ -5725,6 +5734,9 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -5738,6 +5747,9 @@ static void cgroup_release_agent(struct work_struct *work)
if (!pathbuf || !agentbuf)
goto out;
@@ -109401,7 +109561,7 @@ index 470f653..5ea1e67 100644
path = cgroup_path(cgrp, pathbuf, PATH_MAX);
if (!path)
goto out;
-@@ -5900,7 +5912,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
+@@ -5913,7 +5925,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct task_struct *task;
int count = 0;
@@ -113425,7 +113585,7 @@ index ef7093c..1cc3d0f 100644
return 0;
}
diff --git a/kernel/resource.c b/kernel/resource.c
-index f150dbb..33735c2 100644
+index 249b1eb..b3451db 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -84,8 +84,8 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
@@ -113689,46 +113849,6 @@ index b242775..b497b69 100644
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
-diff --git a/kernel/seccomp.c b/kernel/seccomp.c
-index 580ac2d..15a1795 100644
---- a/kernel/seccomp.c
-+++ b/kernel/seccomp.c
-@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
- put_seccomp_filter(thread);
- smp_store_release(&thread->seccomp.filter,
- caller->seccomp.filter);
-+
-+ /*
-+ * Don't let an unprivileged task work around
-+ * the no_new_privs restriction by creating
-+ * a thread that sets it up, enters seccomp,
-+ * then dies.
-+ */
-+ if (task_no_new_privs(caller))
-+ task_set_no_new_privs(thread);
-+
- /*
- * Opt the other thread into seccomp if needed.
- * As threads are considered to be trust-realm
- * equivalent (see ptrace_may_access), it is safe to
- * allow one thread to transition the other.
- */
-- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
-- /*
-- * Don't let an unprivileged task work around
-- * the no_new_privs restriction by creating
-- * a thread that sets it up, enters seccomp,
-- * then dies.
-- */
-- if (task_no_new_privs(caller))
-- task_set_no_new_privs(thread);
--
-+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
-- }
- }
- }
-
diff --git a/kernel/signal.c b/kernel/signal.c
index f3f1f7a..d2e7863 100644
--- a/kernel/signal.c
@@ -114791,7 +114911,7 @@ index 86751c6..7875536 100644
update_vsyscall_tz();
if (firsttime) {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index d563c19..5108cb0 100644
+index 99188ee..29f96f9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -15,6 +15,7 @@
@@ -114802,7 +114922,7 @@ index d563c19..5108cb0 100644
#include <linux/syscore_ops.h>
#include <linux/clocksource.h>
#include <linux/jiffies.h>
-@@ -916,6 +917,8 @@ int do_settimeofday64(const struct timespec64 *ts)
+@@ -915,6 +916,8 @@ int do_settimeofday64(const struct timespec64 *ts)
if (!timespec64_valid_strict(ts))
return -EINVAL;
@@ -115439,10 +115559,10 @@ index 0f06532..247c8e7 100644
+ return atomic64_inc_return_unchecked(&trace_counter);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 4f6ef69..90c3b0f 100644
+index debf6e8..60fa064 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -2366,7 +2366,6 @@ __trace_early_add_new_event(struct trace_event_call *call,
+@@ -2367,7 +2367,6 @@ __trace_early_add_new_event(struct trace_event_call *call,
return 0;
}
@@ -115679,10 +115799,10 @@ index 18f34cf..e7513f2 100644
.thread_should_run = watchdog_should_run,
.thread_fn = watchdog,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index c579dba..8c0345d 100644
+index 450c21f..16482d9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -1856,9 +1856,8 @@ static void pool_mayday_timeout(unsigned long __pool)
+@@ -1866,9 +1866,8 @@ static void pool_mayday_timeout(unsigned long __pool)
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
@@ -115693,7 +115813,7 @@ index c579dba..8c0345d 100644
{
restart:
spin_unlock_irq(&pool->lock);
-@@ -1948,9 +1947,8 @@ static bool manage_workers(struct worker *worker)
+@@ -1958,9 +1957,8 @@ static bool manage_workers(struct worker *worker)
* CONTEXT:
* spin_lock_irq(pool->lock) which is released and regrabbed.
*/
@@ -115704,7 +115824,7 @@ index c579dba..8c0345d 100644
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
-@@ -4452,7 +4450,7 @@ static void rebind_workers(struct worker_pool *pool)
+@@ -4462,7 +4460,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
worker_flags |= WORKER_REBOUND;
worker_flags &= ~WORKER_UNBOUND;
@@ -117101,7 +117221,7 @@ index ef6963b..09c45dc 100644
idx = vma_hugecache_offset(h, vma, address);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
-index d8fb10d..8606223 100644
+index d8fb10d..1c74822 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -27,7 +27,6 @@ struct hugetlb_cgroup {
@@ -117194,10 +117314,10 @@ index d8fb10d..8606223 100644
+ snprintf(names[3], MAX_CFTYPE_NAME, "%s.failcnt", buf);
+
+ pax_open_kernel();
-+ strncpy((void *)h->cgroup_files[0]->name, names[0], MAX_CFTYPE_NAME);
-+ strncpy((void *)h->cgroup_files[1]->name, names[1], MAX_CFTYPE_NAME);
-+ strncpy((void *)h->cgroup_files[2]->name, names[2], MAX_CFTYPE_NAME);
-+ strncpy((void *)h->cgroup_files[3]->name, names[3], MAX_CFTYPE_NAME);
++ strncpy((void *)(*h->cgroup_files)[0].name, names[0], MAX_CFTYPE_NAME);
++ strncpy((void *)(*h->cgroup_files)[1].name, names[1], MAX_CFTYPE_NAME);
++ strncpy((void *)(*h->cgroup_files)[2].name, names[2], MAX_CFTYPE_NAME);
++ strncpy((void *)(*h->cgroup_files)[3].name, names[3], MAX_CFTYPE_NAME);
+ pax_close_kernel();
WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
@@ -117412,7 +117532,7 @@ index 750b789..b1b1b59 100644
/*
* free pages are specially detected outside this table:
diff --git a/mm/memory.c b/mm/memory.c
-index c387430..119fd96 100644
+index b80bf47..d3fd553 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -117970,7 +118090,7 @@ index c387430..119fd96 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3478,6 +3749,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3488,6 +3759,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -117994,7 +118114,7 @@ index c387430..119fd96 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3510,6 +3798,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3520,6 +3808,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -118027,7 +118147,7 @@ index c387430..119fd96 100644
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
-@@ -3619,8 +3933,8 @@ out:
+@@ -3629,8 +3943,8 @@ out:
return ret;
}
@@ -118038,7 +118158,7 @@ index c387430..119fd96 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -3646,8 +3960,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3656,8 +3970,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -118049,7 +118169,7 @@ index c387430..119fd96 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -3655,7 +3969,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3665,7 +3979,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -118058,7 +118178,7 @@ index c387430..119fd96 100644
void *maddr;
struct page *page = NULL;
-@@ -3716,8 +4030,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3726,8 +4040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -118069,7 +118189,7 @@ index c387430..119fd96 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -3727,11 +4041,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3737,11 +4051,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -118161,7 +118281,7 @@ index 87a1779..ebf95d4 100644
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
diff --git a/mm/migrate.c b/mm/migrate.c
-index 7890d0b..00200c6 100644
+index 6d17e0a..64ef47b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1505,8 +1505,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
@@ -118259,7 +118379,7 @@ index d6006b1..a72cbda 100644
capable(CAP_IPC_LOCK))
ret = apply_mlockall_flags(flags);
diff --git a/mm/mm_init.c b/mm/mm_init.c
-index fdadf91..5f527d1 100644
+index fdadf91..90c6bcc 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -170,7 +170,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
@@ -118267,7 +118387,7 @@ index fdadf91..5f527d1 100644
}
-static struct notifier_block compute_batch_nb __meminitdata = {
-+static struct notifier_block compute_batch_nb __meminitconst = {
++static struct notifier_block compute_batch_nb = {
.notifier_call = mm_compute_batch_notifier,
.priority = IPC_CALLBACK_PRI, /* use lowest priority */
};
@@ -120279,7 +120399,7 @@ index b577fbb..ccd4d4e 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index 2afcdbb..4b38523 100644
+index ea5a70c..4b38523 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -120300,33 +120420,7 @@ index 2afcdbb..4b38523 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
- list_del_init(&info->swaplist);
- mutex_unlock(&shmem_swaplist_mutex);
- }
-- } else
-- kfree(info->symlink);
-+ }
-
- simple_xattrs_free(&info->xattrs);
- WARN_ON(inode->i_blocks);
-@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
- info = SHMEM_I(inode);
- inode->i_size = len-1;
- if (len <= SHORT_SYMLINK_LEN) {
-- info->symlink = kmemdup(symname, len, GFP_KERNEL);
-- if (!info->symlink) {
-+ inode->i_link = kmemdup(symname, len, GFP_KERNEL);
-+ if (!inode->i_link) {
- iput(inode);
- return -ENOMEM;
- }
- inode->i_op = &shmem_short_symlink_operations;
-- inode->i_link = info->symlink;
- } else {
- error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
- if (error) {
-@@ -2566,6 +2564,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2564,6 +2564,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -120338,7 +120432,7 @@ index 2afcdbb..4b38523 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2621,6 +2624,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2619,6 +2624,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -120354,7 +120448,7 @@ index 2afcdbb..4b38523 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -3004,8 +3016,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -3002,8 +3016,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -120364,14 +120458,6 @@ index 2afcdbb..4b38523 100644
if (!sbinfo)
return -ENOMEM;
-@@ -3083,6 +3094,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
- static void shmem_destroy_callback(struct rcu_head *head)
- {
- struct inode *inode = container_of(head, struct inode, i_rcu);
-+ kfree(inode->i_link);
- kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
- }
-
diff --git a/mm/slab.c b/mm/slab.c
index 4765c97..26f5c11 100644
--- a/mm/slab.c
@@ -121289,7 +121375,7 @@ index 17e8f8c..56d3370 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 4699751..ac3f662 100644
+index 4699751..bb1f0cf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -34,6 +34,7 @@
@@ -121561,6 +121647,15 @@ index 4699751..ac3f662 100644
return -EINVAL;
s->flags &= ~SLAB_FAILSLAB;
+@@ -5094,7 +5202,7 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+ STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
+ #endif
+
+-static struct attribute *slab_attrs[] = {
++static struct attribute *slab_attrs[] __read_only = {
+ &slab_size_attr.attr,
+ &object_size_attr.attr,
+ &objs_per_slab_attr.attr,
@@ -5129,6 +5237,12 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
@@ -122964,7 +123059,7 @@ index 1a19b98..df2b4ec 100644
if (!can_dir) {
printk(KERN_INFO "can: failed to create /proc/net/can . "
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
-index 9981039..d1f9f35 100644
+index 63ae5dd..40e8dce 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -190,7 +190,7 @@ static void con_fault(struct ceph_connection *con);
@@ -123117,7 +123212,7 @@ index d62af69..2e07b22 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 7f00f24..db000e2 100644
+index 9efbdb3..16a834b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1748,7 +1748,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
@@ -123165,7 +123260,7 @@ index 7f00f24..db000e2 100644
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
-@@ -4841,7 +4841,7 @@ out_unlock:
+@@ -4844,7 +4844,7 @@ out_unlock:
return work;
}
@@ -123174,7 +123269,7 @@ index 7f00f24..db000e2 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
-@@ -7001,8 +7001,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -7004,8 +7004,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -123552,37 +123647,10 @@ index 34ba7a0..5ebb8ef 100644
.min_dump_alloc = min_dump_alloc,
};
diff --git a/net/core/scm.c b/net/core/scm.c
-index 8a1741b..69f6cac 100644
+index dce0acb..69f6cac 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
-@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
- *fplp = fpl;
- fpl->count = 0;
- fpl->max = SCM_MAX_FD;
-+ fpl->user = NULL;
- }
- fpp = &fpl->fp[fpl->count];
-
-@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
- *fpp++ = file;
- fpl->count++;
- }
-+
-+ if (!fpl->user)
-+ fpl->user = get_uid(current_user());
-+
- return num;
- }
-
-@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
- scm->fp = NULL;
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
-+ free_uid(fpl->user);
- kfree(fpl);
- }
- }
-@@ -209,9 +215,9 @@ EXPORT_SYMBOL(__scm_send);
+@@ -215,9 +215,9 @@ EXPORT_SYMBOL(__scm_send);
int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
{
struct cmsghdr __user *cm
@@ -123594,7 +123662,7 @@ index 8a1741b..69f6cac 100644
int err;
if (MSG_CMSG_COMPAT & msg->msg_flags)
-@@ -232,7 +238,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+@@ -238,7 +238,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
err = -EFAULT;
if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
goto out;
@@ -123603,7 +123671,7 @@ index 8a1741b..69f6cac 100644
goto out;
cmlen = CMSG_SPACE(len);
if (msg->msg_controllen < cmlen)
-@@ -248,7 +254,7 @@ EXPORT_SYMBOL(put_cmsg);
+@@ -254,7 +254,7 @@ EXPORT_SYMBOL(put_cmsg);
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
@@ -123612,7 +123680,7 @@ index 8a1741b..69f6cac 100644
int fdmax = 0;
int fdnum = scm->fp->count;
-@@ -268,7 +274,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+@@ -274,7 +274,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
if (fdnum < fdmax)
fdmax = fdnum;
@@ -123621,7 +123689,7 @@ index 8a1741b..69f6cac 100644
i++, cmfptr++)
{
struct socket *sock;
-@@ -297,7 +303,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+@@ -303,7 +303,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
if (i > 0)
{
@@ -123630,19 +123698,11 @@ index 8a1741b..69f6cac 100644
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
-@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
- for (i = 0; i < fpl->count; i++)
- get_file(fpl->fp[i]);
- new_fpl->max = new_fpl->count;
-+ new_fpl->user = get_uid(fpl->user);
- }
- return new_fpl;
- }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index b2df375..f54b133 100644
+index 5bf88f5..008242b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -969,7 +969,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
+@@ -971,7 +971,8 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
@@ -123652,7 +123712,7 @@ index b2df375..f54b133 100644
skb->network_header += off;
if (skb_mac_header_was_set(skb))
skb->mac_header += off;
-@@ -2103,7 +2104,7 @@ EXPORT_SYMBOL(__skb_checksum);
+@@ -2105,7 +2106,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
@@ -123661,7 +123721,7 @@ index b2df375..f54b133 100644
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
-@@ -3318,12 +3319,14 @@ void __init skb_init(void)
+@@ -3320,12 +3321,14 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -123863,10 +123923,10 @@ index 0c1d58d..e6ad04f 100644
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
-index 95b6139..3048623 100644
+index a6beb7b..4d833b2 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
-@@ -35,7 +35,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
{
unsigned int orig_size, size;
int ret, i;
@@ -123875,7 +123935,7 @@ index 95b6139..3048623 100644
.data = &size,
.maxlen = sizeof(size),
.mode = table->mode
-@@ -203,7 +203,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
+@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char id[IFNAMSIZ];
@@ -123884,7 +123944,7 @@ index 95b6139..3048623 100644
.data = id,
.maxlen = IFNAMSIZ,
};
-@@ -221,7 +221,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
+@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
static int proc_do_rss_key(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -123893,7 +123953,7 @@ index 95b6139..3048623 100644
char buf[NETDEV_RSS_KEY_LEN * 3];
snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
-@@ -285,7 +285,7 @@ static struct ctl_table net_core_table[] = {
+@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
.mode = 0444,
.proc_handler = proc_do_rss_key,
},
@@ -123902,7 +123962,7 @@ index 95b6139..3048623 100644
{
.procname = "bpf_jit_enable",
.data = &bpf_jit_enable,
-@@ -409,13 +409,12 @@ static struct ctl_table netns_core_table[] = {
+@@ -419,13 +419,12 @@ static struct ctl_table netns_core_table[] = {
static __net_init int sysctl_core_net_init(struct net *net)
{
@@ -123918,7 +123978,7 @@ index 95b6139..3048623 100644
if (tbl == NULL)
goto err_dup;
-@@ -425,17 +424,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
+@@ -435,17 +434,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
if (net->user_ns != &init_user_ns) {
tbl[0].procname = NULL;
}
@@ -123940,7 +124000,7 @@ index 95b6139..3048623 100644
err_dup:
return -ENOMEM;
}
-@@ -450,7 +448,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
+@@ -460,7 +458,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
kfree(tbl);
}
@@ -124118,7 +124178,7 @@ index 59b3e0e..ff060b8 100644
struct dst_entry *dst = NULL;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index cebd9d3..12e9abe 100644
+index f6303b1..d524bab 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -69,7 +69,8 @@
@@ -124250,7 +124310,7 @@ index d97268e..6ee80d4 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
-index 46b9c88..b52cf2f 100644
+index 6414891..30ec9bf 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -670,8 +670,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
@@ -124342,7 +124402,7 @@ index 86fa458..5f601b9 100644
p->rate_tokens = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
-index 1fe55ae..9be62bd 100644
+index b8a0607d..0ef8880 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -284,7 +284,7 @@ static int ip_frag_too_far(struct ipq *qp)
@@ -124354,7 +124414,7 @@ index 1fe55ae..9be62bd 100644
qp->rid = end;
rc = qp->q.fragments && (end - start) > max;
-@@ -774,12 +774,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
+@@ -775,12 +775,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
@@ -124369,7 +124429,7 @@ index 1fe55ae..9be62bd 100644
if (!table)
goto err_alloc;
-@@ -793,9 +792,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+@@ -794,9 +793,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
table[0].procname = NULL;
@@ -124382,7 +124442,7 @@ index 1fe55ae..9be62bd 100644
if (!hdr)
goto err_reg;
-@@ -803,8 +803,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+@@ -804,8 +804,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
return 0;
err_reg:
@@ -124449,10 +124509,10 @@ index b1209b6..c2f63ba 100644
ICMP_PROT_UNREACH, 0);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index 5f73a7c..bfd78f8 100644
+index a501242..ea6b81d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
-@@ -1308,7 +1308,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+@@ -1310,7 +1310,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
len = min_t(unsigned int, len, opt->optlen);
if (put_user(len, optlen))
return -EFAULT;
@@ -124462,7 +124522,7 @@ index 5f73a7c..bfd78f8 100644
return -EFAULT;
return 0;
}
-@@ -1441,7 +1442,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+@@ -1443,7 +1444,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
@@ -124658,7 +124718,7 @@ index 4a9e6db..06174e1 100644
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index e89094a..bd431045 100644
+index aa67e0e..3c65672 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -59,7 +59,7 @@ struct ping_table {
@@ -124697,7 +124757,7 @@ index e89094a..bd431045 100644
info, (u8 *)icmph);
#endif
}
-@@ -919,10 +919,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+@@ -921,10 +921,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
}
if (inet6_sk(sk)->rxopt.all)
@@ -124710,7 +124770,7 @@ index e89094a..bd431045 100644
else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
#endif
-@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1119,7 +1119,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -124758,7 +124818,7 @@ index 3abd9d7..c5e4052 100644
/*
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
-index bc35f18..f94a500 100644
+index 7113bae..0e9e9a6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -323,7 +323,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -124770,7 +124830,7 @@ index bc35f18..f94a500 100644
kfree_skb(skb);
return NET_RX_DROP;
}
-@@ -781,16 +781,20 @@ static int raw_init(struct sock *sk)
+@@ -783,16 +783,20 @@ static int raw_init(struct sock *sk)
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
{
@@ -124792,7 +124852,7 @@ index bc35f18..f94a500 100644
if (get_user(len, optlen))
goto out;
-@@ -800,8 +804,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
+@@ -802,8 +806,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
if (len > sizeof(struct icmp_filter))
len = sizeof(struct icmp_filter);
ret = -EFAULT;
@@ -124803,7 +124863,7 @@ index bc35f18..f94a500 100644
goto out;
ret = 0;
out: return ret;
-@@ -1030,7 +1034,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
+@@ -1032,7 +1036,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0, sock_i_ino(sp),
@@ -124813,10 +124873,10 @@ index bc35f18..f94a500 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 85f184e..2cb2c0b 100644
+index 02c6229..68cc2a6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -231,7 +231,7 @@ static const struct seq_operations rt_cache_seq_ops = {
+@@ -232,7 +232,7 @@ static const struct seq_operations rt_cache_seq_ops = {
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
@@ -124825,7 +124885,7 @@ index 85f184e..2cb2c0b 100644
}
static const struct file_operations rt_cache_seq_fops = {
-@@ -322,7 +322,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
+@@ -323,7 +323,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
@@ -124834,7 +124894,7 @@ index 85f184e..2cb2c0b 100644
}
static const struct file_operations rt_cpu_seq_fops = {
-@@ -360,7 +360,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
+@@ -361,7 +361,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
@@ -124843,7 +124903,7 @@ index 85f184e..2cb2c0b 100644
}
static const struct file_operations rt_acct_proc_fops = {
-@@ -462,7 +462,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+@@ -463,7 +463,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
#define IP_IDENTS_SZ 2048u
@@ -124852,7 +124912,7 @@ index 85f184e..2cb2c0b 100644
static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
-@@ -472,7 +472,7 @@ static u32 *ip_tstamps __read_mostly;
+@@ -473,7 +473,7 @@ static u32 *ip_tstamps __read_mostly;
u32 ip_idents_reserve(u32 hash, int segs)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
@@ -124861,7 +124921,7 @@ index 85f184e..2cb2c0b 100644
u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 delta = 0;
-@@ -480,7 +480,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
+@@ -481,7 +481,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
@@ -124870,7 +124930,7 @@ index 85f184e..2cb2c0b 100644
}
EXPORT_SYMBOL(ip_idents_reserve);
-@@ -2706,34 +2706,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
+@@ -2755,34 +2755,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
@@ -124913,7 +124973,7 @@ index 85f184e..2cb2c0b 100644
err_dup:
return -ENOMEM;
}
-@@ -2756,8 +2756,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+@@ -2805,8 +2805,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
@@ -124924,7 +124984,7 @@ index 85f184e..2cb2c0b 100644
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
-@@ -2801,11 +2801,7 @@ int __init ip_rt_init(void)
+@@ -2850,11 +2850,7 @@ int __init ip_rt_init(void)
int rc = 0;
int cpu;
@@ -125094,7 +125154,7 @@ index d4c5115..f949b08 100644
write_pnet(&ireq->ireq_net, sock_net(sk_listener));
ireq->ireq_family = sk_listener->sk_family;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index d8841a2..d31baa8 100644
+index 8c7e631..d1dfdaf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
@@ -125108,7 +125168,7 @@ index d8841a2..d31baa8 100644
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
-@@ -1412,6 +1416,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1420,6 +1424,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
@@ -125118,7 +125178,7 @@ index d8841a2..d31baa8 100644
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
-@@ -1577,12 +1584,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
+@@ -1585,12 +1592,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -125141,7 +125201,7 @@ index d8841a2..d31baa8 100644
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
-@@ -1665,6 +1679,10 @@ csum_error:
+@@ -1675,6 +1689,10 @@ csum_error:
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
@@ -125222,7 +125282,7 @@ index 193ba1f..aeda727 100644
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index c438908..5f28ba3 100644
+index 7f8ab46..6d152f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -87,6 +87,7 @@
@@ -125273,7 +125333,7 @@ index c438908..5f28ba3 100644
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
-@@ -1204,7 +1221,7 @@ static unsigned int first_packet_length(struct sock *sk)
+@@ -1206,7 +1223,7 @@ static unsigned int first_packet_length(struct sock *sk)
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
@@ -125282,7 +125342,7 @@ index c438908..5f28ba3 100644
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
-@@ -1284,6 +1301,10 @@ try_again:
+@@ -1286,6 +1303,10 @@ try_again:
if (!skb)
goto out;
@@ -125293,7 +125353,7 @@ index c438908..5f28ba3 100644
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
-@@ -1316,7 +1337,7 @@ try_again:
+@@ -1318,7 +1339,7 @@ try_again:
if (unlikely(err)) {
trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) {
@@ -125302,7 +125362,7 @@ index c438908..5f28ba3 100644
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
-@@ -1610,7 +1631,7 @@ csum_error:
+@@ -1612,7 +1633,7 @@ csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -125311,7 +125371,7 @@ index c438908..5f28ba3 100644
kfree_skb(skb);
return -1;
}
-@@ -1628,7 +1649,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
+@@ -1630,7 +1651,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
@@ -125320,7 +125380,7 @@ index c438908..5f28ba3 100644
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
-@@ -1834,6 +1855,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+@@ -1836,6 +1857,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
@@ -125330,7 +125390,7 @@ index c438908..5f28ba3 100644
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
-@@ -2438,7 +2462,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -2440,7 +2464,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -125440,7 +125500,7 @@ index 983bb99..ebc39e1 100644
Support for IPsec ESP.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
-index 1f21087..78bc7e1 100644
+index e8d3da0..c1ab725 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -179,7 +179,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
@@ -125479,7 +125539,7 @@ index 1f21087..78bc7e1 100644
if (ops->ndo_do_ioctl) {
mm_segment_t oldfs = get_fs();
-@@ -3864,16 +3864,23 @@ static const struct file_operations if6_fops = {
+@@ -3867,16 +3867,23 @@ static const struct file_operations if6_fops = {
.release = seq_release_net,
};
@@ -125504,7 +125564,7 @@ index 1f21087..78bc7e1 100644
}
static struct pernet_operations if6_proc_net_ops = {
-@@ -4492,7 +4499,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+@@ -4495,7 +4502,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
s_ip_idx = ip_idx = cb->args[2];
rcu_read_lock();
@@ -125513,7 +125573,7 @@ index 1f21087..78bc7e1 100644
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
-@@ -4702,7 +4709,7 @@ static inline size_t inet6_if_nlmsg_size(void)
+@@ -4705,7 +4712,7 @@ static inline size_t inet6_if_nlmsg_size(void)
+ nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
}
@@ -125522,7 +125582,7 @@ index 1f21087..78bc7e1 100644
int items, int bytes)
{
int i;
-@@ -4712,7 +4719,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
+@@ -4715,7 +4722,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
/* Use put_unaligned() because stats may not be aligned for u64. */
put_unaligned(items, &stats[0]);
for (i = 1; i < items; i++)
@@ -125531,7 +125591,7 @@ index 1f21087..78bc7e1 100644
memset(&stats[items], 0, pad);
}
-@@ -5166,7 +5173,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+@@ -5169,7 +5176,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
rt_genid_bump_ipv6(net);
break;
}
@@ -125540,7 +125600,7 @@ index 1f21087..78bc7e1 100644
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
-@@ -5186,7 +5193,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
+@@ -5189,7 +5196,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
@@ -125549,7 +125609,7 @@ index 1f21087..78bc7e1 100644
int ret;
/*
-@@ -5211,7 +5218,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+@@ -5214,7 +5221,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
{
struct inet6_dev *idev = ctl->extra1;
int min_mtu = IPV6_MIN_MTU;
@@ -125558,7 +125618,7 @@ index 1f21087..78bc7e1 100644
lctl = *ctl;
lctl.extra1 = &min_mtu;
-@@ -5286,7 +5293,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
+@@ -5289,7 +5296,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
@@ -125567,7 +125627,7 @@ index 1f21087..78bc7e1 100644
int ret;
/*
-@@ -5351,7 +5358,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+@@ -5354,7 +5361,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
int err;
struct in6_addr addr;
char str[IPV6_MAX_STRLEN];
@@ -125576,7 +125636,7 @@ index 1f21087..78bc7e1 100644
struct net *net = ctl->extra2;
struct ipv6_stable_secret *secret = ctl->data;
-@@ -5420,7 +5427,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+@@ -5423,7 +5430,7 @@ int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
@@ -125599,10 +125659,10 @@ index 9f5137c..a7eabd9 100644
err = ipv6_init_mibs(net);
if (err)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
-index 517c55b..b6462b8 100644
+index 4281621..b1e9d2d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
-@@ -979,5 +979,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+@@ -982,5 +982,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
@@ -126076,10 +126136,10 @@ index 45f5ae5..1c57cbe 100644
return -ENOMEM;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index 826e6aa..02a2386 100644
+index 3f164d3..cd507d9 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
-@@ -3504,7 +3504,7 @@ struct ctl_table ipv6_route_table_template[] = {
+@@ -3503,7 +3503,7 @@ struct ctl_table ipv6_route_table_template[] = {
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
{
@@ -126124,7 +126184,7 @@ index 45243bb..cdb398e 100644
struct ctl_table *ipv6_icmp_table;
int err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index bd100b4..72c83d8 100644
+index b8d4056..9509e20 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -126138,7 +126198,7 @@ index bd100b4..72c83d8 100644
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
-@@ -1268,6 +1272,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1269,6 +1273,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
reset:
@@ -126148,7 +126208,7 @@ index bd100b4..72c83d8 100644
tcp_v6_send_reset(sk, skb);
discard:
if (opt_skb)
-@@ -1378,12 +1385,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+@@ -1379,12 +1386,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
lookup:
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
inet6_iif(skb));
@@ -126171,7 +126231,7 @@ index bd100b4..72c83d8 100644
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
-@@ -1471,6 +1486,10 @@ csum_error:
+@@ -1472,6 +1487,10 @@ csum_error:
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
@@ -126388,7 +126448,7 @@ index b9ac598..f88cc56 100644
return;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
-index 435608c..1b7d8bb 100644
+index 20ab7b2..1b7d8bb 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -685,10 +685,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
@@ -126404,16 +126464,6 @@ index 435608c..1b7d8bb 100644
}
memcpy(iucv->src_name, name, 8);
}
-@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
- if (!addr || addr->sa_family != AF_IUCV)
- return -EINVAL;
-
-+ if (addr_len < sizeof(struct sockaddr_iucv))
-+ return -EINVAL;
-+
- lock_sock(sk);
- if (sk->sk_state != IUCV_OPEN) {
- err = -EBADFD;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 7eaa000..2fa7f35 100644
--- a/net/iucv/iucv.c
@@ -126917,7 +126967,7 @@ index d824c38..f3ded28 100644
bip_ipn_set64(mmie->sequence_number, pn64);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
-index 7079cd3..abf6a59 100644
+index 7079cd3..c299f08 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -386,7 +386,7 @@ static int ieee802154_header_create(struct sk_buff *skb,
@@ -126929,6 +126979,15 @@ index 7079cd3..abf6a59 100644
if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
return -EINVAL;
+@@ -451,7 +451,7 @@ static int mac802154_header_create(struct sk_buff *skb,
+ memset(&hdr.fc, 0, sizeof(hdr.fc));
+ hdr.fc.type = IEEE802154_FC_TYPE_DATA;
+ hdr.fc.ack_request = wpan_dev->ackreq;
+- hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
++ hdr.seq = atomic_inc_return_unchecked(&dev->ieee802154_ptr->dsn) & 0xFF;
+
+ /* TODO currently a workaround to give zero cb block to set
+ * security parameters defaults according MIB.
@@ -576,7 +576,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
get_random_bytes(&tmp, sizeof(tmp));
atomic_set(&wpan_dev->bsn, tmp);
@@ -128667,10 +128726,10 @@ index ec52912..059504b 100644
/* Initialize IPv6 support and register with socket layer. */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
-index 3d9ea9a..d3aee1a 100644
+index 8b4ff31..92b21ee 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
-@@ -856,8 +856,10 @@ int sctp_register_af(struct sctp_af *af)
+@@ -858,8 +858,10 @@ int sctp_register_af(struct sctp_af *af)
return 0;
}
@@ -128682,7 +128741,7 @@ index 3d9ea9a..d3aee1a 100644
return 1;
}
-@@ -987,7 +989,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
+@@ -989,7 +991,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
static struct sctp_af sctp_af_inet;
@@ -128691,7 +128750,7 @@ index 3d9ea9a..d3aee1a 100644
.event_msgname = sctp_inet_event_msgname,
.skb_msgname = sctp_inet_skb_msgname,
.af_supported = sctp_inet_af_supported,
-@@ -1059,7 +1061,7 @@ static const struct net_protocol sctp_protocol = {
+@@ -1061,7 +1063,7 @@ static const struct net_protocol sctp_protocol = {
};
/* IPv4 address related functions. */
@@ -128700,7 +128759,7 @@ index 3d9ea9a..d3aee1a 100644
.sa_family = AF_INET,
.sctp_xmit = sctp_v4_xmit,
.setsockopt = ip_setsockopt,
-@@ -1143,7 +1145,7 @@ static void sctp_v4_pf_init(void)
+@@ -1145,7 +1147,7 @@ static void sctp_v4_pf_init(void)
static void sctp_v4_pf_exit(void)
{
@@ -128854,7 +128913,7 @@ index 22c2bf3..f1f08c8 100644
/*
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index ef1d90f..23b38b3 100644
+index be1489f..5364cd7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2192,11 +2192,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
@@ -129295,7 +129354,7 @@ index 1095be9..815d777 100644
/* make a copy for the caller */
*handle = ctxh;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
-index 5e4f815..15e403f 100644
+index 21e2035..2e567b1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1623,7 +1623,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
@@ -129669,7 +129728,7 @@ index 1eadc95..f6ccd08 100644
kfree_skb(args);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
-index 350cca3..a108fc5 100644
+index 69ee2ee..3352a8d 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -75,7 +75,7 @@ static void tipc_subscrp_send_event(struct tipc_subscription *sub,
@@ -129682,7 +129741,7 @@ index 350cca3..a108fc5 100644
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
-index e3f85bc..a6c35a6 100644
+index 898a53a..5a23004 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -919,6 +919,12 @@ static struct sock *unix_find_other(struct net *net,
@@ -129732,33 +129791,7 @@ index e3f85bc..a6c35a6 100644
return err;
}
-@@ -1496,7 +1515,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
- UNIXCB(skb).fp = NULL;
-
- for (i = scm->fp->count-1; i >= 0; i--)
-- unix_notinflight(scm->fp->fp[i]);
-+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
- }
-
- static void unix_destruct_scm(struct sk_buff *skb)
-@@ -1561,7 +1580,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
- return -ENOMEM;
-
- for (i = scm->fp->count - 1; i >= 0; i--)
-- unix_inflight(scm->fp->fp[i]);
-+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
- return max_level;
- }
-
-@@ -2332,6 +2351,7 @@ again:
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
-+ scm_destroy(&scm);
- goto out;
- }
-
-@@ -2796,9 +2816,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2806,9 +2825,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
@@ -129773,7 +129806,7 @@ index e3f85bc..a6c35a6 100644
seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
-@@ -2823,10 +2847,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
+@@ -2833,10 +2856,29 @@ static int unix_seq_show(struct seq_file *seq, void *v)
seq_putc(seq, '@');
i++;
}
@@ -129808,7 +129841,7 @@ index e3f85bc..a6c35a6 100644
}
diff --git a/net/unix/diag.c b/net/unix/diag.c
-index c512f64..284072f 100644
+index 4d96797..ab6a813 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -299,7 +299,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
@@ -129820,42 +129853,6 @@ index c512f64..284072f 100644
.dump = unix_diag_dump,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
-diff --git a/net/unix/garbage.c b/net/unix/garbage.c
-index 8fcdc22..6a0d485 100644
---- a/net/unix/garbage.c
-+++ b/net/unix/garbage.c
-@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
- * descriptor if it is for an AF_UNIX socket.
- */
-
--void unix_inflight(struct file *fp)
-+void unix_inflight(struct user_struct *user, struct file *fp)
- {
- struct sock *s = unix_get_socket(fp);
-
-@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
- }
- unix_tot_inflight++;
- }
-- fp->f_cred->user->unix_inflight++;
-+ user->unix_inflight++;
- spin_unlock(&unix_gc_lock);
- }
-
--void unix_notinflight(struct file *fp)
-+void unix_notinflight(struct user_struct *user, struct file *fp)
- {
- struct sock *s = unix_get_socket(fp);
-
-@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
- list_del_init(&u->link);
- unix_tot_inflight--;
- }
-- fp->f_cred->user->unix_inflight--;
-+ user->unix_inflight--;
- spin_unlock(&unix_gc_lock);
- }
-
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index b3d5150..ff3a837 100644
--- a/net/unix/sysctl_net_unix.c
@@ -130835,7 +130832,7 @@ index 25cf0c2..eb178ce 100644
return -1;
if (!exact1 && exact2)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
-index dacf71a..f67b2c4 100755
+index ba6c34e..ea10bce 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -179,7 +179,7 @@ else
@@ -132596,26 +132593,6 @@ index 1450f85..a91e0bc 100644
rt_genid_bump_all(net);
}
rtnl_unlock();
-diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
-index ff81026..7c57c7f 100644
---- a/security/smack/smack_lsm.c
-+++ b/security/smack/smack_lsm.c
-@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
- */
- static inline unsigned int smk_ptrace_mode(unsigned int mode)
- {
-- switch (mode) {
-- case PTRACE_MODE_READ:
-- return MAY_READ;
-- case PTRACE_MODE_ATTACH:
-+ if (mode & PTRACE_MODE_ATTACH)
- return MAY_READWRITE;
-- }
-+ if (mode & PTRACE_MODE_READ)
-+ return MAY_READ;
-
- return 0;
- }
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 2367b10..a0c3c51 100644
--- a/security/tomoyo/file.c
@@ -132775,27 +132752,9 @@ index 90c605e..bf3a29a 100644
help
This selects Yama, which extends DAC support with additional
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
-index d3c19c9..fb00554 100644
+index cb6ed10..fb00554 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
-@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
- int rc = 0;
-
- /* require ptrace target be a child of ptracer on attach */
-- if (mode == PTRACE_MODE_ATTACH) {
-+ if (mode & PTRACE_MODE_ATTACH) {
- switch (ptrace_scope) {
- case YAMA_SCOPE_DISABLED:
- /* No additional restrictions. */
-@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
- }
- }
-
-- if (rc) {
-+ if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
- printk_ratelimited(KERN_NOTICE
- "ptrace of pid %d was attempted by: %s (pid %d)\n",
- child->pid, current->comm, current->pid);
@@ -357,7 +357,7 @@ static struct security_hook_list yama_hooks[] = {
static int yama_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -153835,10 +153794,10 @@ index 0000000..fc58e16
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..acc340b
+index 0000000..9e45ae9
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,21510 @@
+@@ -0,0 +1,21511 @@
+enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
+enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL
+enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL
@@ -173167,6 +173126,7 @@ index 0000000..acc340b
+enable_so_virtqueue_add_fndecl_58841 virtqueue_add fndecl 3 58841 NULL
+enable_so_nr_scratch_dwc3_58852 nr_scratch dwc3 0 58852 NULL
+enable_so_ms_lib_read_extrablock_fndecl_58857 ms_lib_read_extrablock fndecl 4 58857 NULL
++enable_so_ucs2_utf8size_fndecl_58859 ucs2_utf8size fndecl 0 58859 NULL
+enable_so_exofs_iget_fndecl_58862 exofs_iget fndecl 2 58862 NULL
+enable_so_vid_batadv_softif_vlan_58864 vid batadv_softif_vlan 0 58864 NULL nohasharray
+enable_so_pd_groupsize_ptlrpcd_58864 pd_groupsize ptlrpcd 0 58864 &enable_so_vid_batadv_softif_vlan_58864
diff --git a/4.4.3/4425_grsec_remove_EI_PAX.patch b/4.4.4/4425_grsec_remove_EI_PAX.patch
index 2a1aa6c..2a1aa6c 100644
--- a/4.4.3/4425_grsec_remove_EI_PAX.patch
+++ b/4.4.4/4425_grsec_remove_EI_PAX.patch
diff --git a/4.4.3/4427_force_XATTR_PAX_tmpfs.patch b/4.4.4/4427_force_XATTR_PAX_tmpfs.patch
index f6aea64..f6aea64 100644
--- a/4.4.3/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.4.4/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.4.3/4430_grsec-remove-localversion-grsec.patch b/4.4.4/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.4.3/4430_grsec-remove-localversion-grsec.patch
+++ b/4.4.4/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.4.3/4435_grsec-mute-warnings.patch b/4.4.4/4435_grsec-mute-warnings.patch
index b7564e4..b7564e4 100644
--- a/4.4.3/4435_grsec-mute-warnings.patch
+++ b/4.4.4/4435_grsec-mute-warnings.patch
diff --git a/4.4.3/4440_grsec-remove-protected-paths.patch b/4.4.4/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.4.3/4440_grsec-remove-protected-paths.patch
+++ b/4.4.4/4440_grsec-remove-protected-paths.patch
diff --git a/4.4.3/4450_grsec-kconfig-default-gids.patch b/4.4.4/4450_grsec-kconfig-default-gids.patch
index 77f9706..77f9706 100644
--- a/4.4.3/4450_grsec-kconfig-default-gids.patch
+++ b/4.4.4/4450_grsec-kconfig-default-gids.patch
diff --git a/4.4.3/4465_selinux-avc_audit-log-curr_ip.patch b/4.4.4/4465_selinux-avc_audit-log-curr_ip.patch
index f1c4923..f1c4923 100644
--- a/4.4.3/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.4.4/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.4.3/4470_disable-compat_vdso.patch b/4.4.4/4470_disable-compat_vdso.patch
index 281aad9..281aad9 100644
--- a/4.4.3/4470_disable-compat_vdso.patch
+++ b/4.4.4/4470_disable-compat_vdso.patch
diff --git a/4.4.3/4475_emutramp_default_on.patch b/4.4.4/4475_emutramp_default_on.patch
index afd6019..afd6019 100644
--- a/4.4.3/4475_emutramp_default_on.patch
+++ b/4.4.4/4475_emutramp_default_on.patch