summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-11-15 08:54:46 -0500
committerAnthony G. Basile <blueness@gentoo.org>2014-11-15 08:54:46 -0500
commit44dc7c51e7c992a6f9ad065ad98399f1d1414e44 (patch)
tree027e4e7e44665bc933ad049adb96fa88ff0181d7
parentGrsec/PaX: 3.0-{3.2.64,3.14.23,3.17.2}-201411062034 (diff)
downloadhardened-patchset-20141115.tar.gz
hardened-patchset-20141115.tar.bz2
hardened-patchset-20141115.zip
Grsec/PaX: 3.0-{3.2.64,3.14.24,3.17.2}-20141115002720141115
-rw-r--r--3.14.24/0000_README (renamed from 3.17.2/0000_README)6
-rw-r--r--3.14.24/1023_linux-3.14.24.patch7091
-rw-r--r--3.14.24/4420_grsecurity-3.0-3.14.24-201411150026.patch (renamed from 3.14.23/4420_grsecurity-3.0-3.14.23-201411062033.patch)1734
-rw-r--r--3.14.24/4425_grsec_remove_EI_PAX.patch (renamed from 3.14.23/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.14.24/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.14.23/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.14.24/4430_grsec-remove-localversion-grsec.patch (renamed from 3.14.23/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.14.24/4435_grsec-mute-warnings.patch (renamed from 3.14.23/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.14.24/4440_grsec-remove-protected-paths.patch (renamed from 3.14.23/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.14.24/4450_grsec-kconfig-default-gids.patch (renamed from 3.14.23/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.14.24/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.14.23/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.14.24/4470_disable-compat_vdso.patch (renamed from 3.14.23/4470_disable-compat_vdso.patch)0
-rw-r--r--3.14.24/4475_emutramp_default_on.patch (renamed from 3.14.23/4475_emutramp_default_on.patch)0
-rw-r--r--3.17.3/0000_README (renamed from 3.14.23/0000_README)6
-rw-r--r--3.17.3/1002_linux-3.17.3.patch11840
-rw-r--r--3.17.3/4420_grsecurity-3.0-3.17.3-201411150027.patch (renamed from 3.17.2/4420_grsecurity-3.0-3.17.2-201411062034.patch)2769
-rw-r--r--3.17.3/4425_grsec_remove_EI_PAX.patch (renamed from 3.17.2/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--3.17.3/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.17.2/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.17.3/4430_grsec-remove-localversion-grsec.patch (renamed from 3.17.2/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.17.3/4435_grsec-mute-warnings.patch (renamed from 3.17.2/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.17.3/4440_grsec-remove-protected-paths.patch (renamed from 3.17.2/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.17.3/4450_grsec-kconfig-default-gids.patch (renamed from 3.17.2/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--3.17.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.17.2/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.17.3/4470_disable-compat_vdso.patch (renamed from 3.17.2/4470_disable-compat_vdso.patch)0
-rw-r--r--3.17.3/4475_emutramp_default_on.patch (renamed from 3.17.2/4475_emutramp_default_on.patch)0
-rw-r--r--3.2.64/0000_README2
-rw-r--r--3.2.64/4420_grsecurity-3.0-3.2.64-201411150025.patch (renamed from 3.2.64/4420_grsecurity-3.0-3.2.64-201411062032.patch)616
26 files changed, 21908 insertions, 2156 deletions
diff --git a/3.17.2/0000_README b/3.14.24/0000_README
index 08a13b9..5926788 100644
--- a/3.17.2/0000_README
+++ b/3.14.24/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.17.2-201411062034.patch
+Patch: 1023_linux-3.14.24.patch
+From: http://www.kernel.org
+Desc: Linux 3.14.24
+
+Patch: 4420_grsecurity-3.0-3.14.24-201411150026.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.24/1023_linux-3.14.24.patch b/3.14.24/1023_linux-3.14.24.patch
new file mode 100644
index 0000000..5c63dd5
--- /dev/null
+++ b/3.14.24/1023_linux-3.14.24.patch
@@ -0,0 +1,7091 @@
+diff --git a/Makefile b/Makefile
+index 135a04a..8fd0610 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Remembering Coco
+
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index 4f31b2e..398064c 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -20,7 +20,7 @@
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
++ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ };
+
+ aliases {
+diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
+index 2fd3162..c1d3d2d 100644
+--- a/arch/arc/include/asm/cache.h
++++ b/arch/arc/include/asm/cache.h
+@@ -55,4 +55,31 @@ extern void read_decode_cache_bcr(void);
+
+ #endif /* !__ASSEMBLY__ */
+
++/* Instruction cache related Auxiliary registers */
++#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
++#define ARC_REG_IC_IVIC 0x10
++#define ARC_REG_IC_CTRL 0x11
++#define ARC_REG_IC_IVIL 0x19
++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
++#define ARC_REG_IC_PTAG 0x1E
++#endif
++
++/* Bit val in IC_CTRL */
++#define IC_CTRL_CACHE_DISABLE 0x1
++
++/* Data cache related Auxiliary registers */
++#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
++#define ARC_REG_DC_IVDC 0x47
++#define ARC_REG_DC_CTRL 0x48
++#define ARC_REG_DC_IVDL 0x4A
++#define ARC_REG_DC_FLSH 0x4B
++#define ARC_REG_DC_FLDL 0x4C
++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
++#define ARC_REG_DC_PTAG 0x5C
++#endif
++
++/* Bit val in DC_CTRL */
++#define DC_CTRL_INV_MODE_FLUSH 0x40
++#define DC_CTRL_FLUSH_STATUS 0x100
++
+ #endif /* _ASM_CACHE_H */
+diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
+index b65fca7..fea9316 100644
+--- a/arch/arc/include/asm/kgdb.h
++++ b/arch/arc/include/asm/kgdb.h
+@@ -19,7 +19,7 @@
+ * register API yet */
+ #undef DBG_MAX_REG_NUM
+
+-#define GDB_MAX_REGS 39
++#define GDB_MAX_REGS 87
+
+ #define BREAK_INSTR_SIZE 2
+ #define CACHE_FLUSH_IS_SAFE 1
+@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
+
+ extern void kgdb_trap(struct pt_regs *regs);
+
+-enum arc700_linux_regnums {
++/* This is the numbering of registers according to the GDB. See GDB's
++ * arc-tdep.h for details.
++ *
++ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
++enum arc_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+- _BTA = 27,
+- _LP_START = 28,
+- _LP_END = 29,
+- _LP_COUNT = 30,
+- _STATUS32 = 31,
+- _BLINK = 32,
+- _FP = 33,
+- __SP = 34,
+- _EFA = 35,
+- _RET = 36,
+- _ORIG_R8 = 37,
+- _STOP_PC = 38
++ _FP = 27,
++ __SP = 28,
++ _R30 = 30,
++ _BLINK = 31,
++ _LP_COUNT = 60,
++ _STOP_PC = 64,
++ _RET = 64,
++ _LP_START = 65,
++ _LP_END = 66,
++ _STATUS32 = 67,
++ _ECR = 76,
++ _BTA = 82,
+ };
+
+ #else
+diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
+index 9919972..07a58f2 100644
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -12,10 +12,42 @@
+ * to skip certain things during boot on simulator
+ */
+
++#include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/entry.h>
+-#include <linux/linkage.h>
+ #include <asm/arcregs.h>
++#include <asm/cache.h>
++
++.macro CPU_EARLY_SETUP
++
++ ; Setting up Vectror Table (in case exception happens in early boot
++ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++
++ ; Disable I-cache/D-cache if kernel so configured
++ lr r5, [ARC_REG_IC_BCR]
++ breq r5, 0, 1f ; I$ doesn't exist
++ lr r5, [ARC_REG_IC_CTRL]
++#ifdef CONFIG_ARC_HAS_ICACHE
++ bclr r5, r5, 0 ; 0 - Enable, 1 is Disable
++#else
++ bset r5, r5, 0 ; I$ exists, but is not used
++#endif
++ sr r5, [ARC_REG_IC_CTRL]
++
++1:
++ lr r5, [ARC_REG_DC_BCR]
++ breq r5, 0, 1f ; D$ doesn't exist
++ lr r5, [ARC_REG_DC_CTRL]
++ bclr r5, r5, 6 ; Invalidate (discard w/o wback)
++#ifdef CONFIG_ARC_HAS_DCACHE
++ bclr r5, r5, 0 ; Enable (+Inv)
++#else
++ bset r5, r5, 0 ; Disable (+Inv)
++#endif
++ sr r5, [ARC_REG_DC_CTRL]
++
++1:
++.endm
+
+ .cpu A7
+
+@@ -24,13 +56,13 @@
+ .globl stext
+ stext:
+ ;-------------------------------------------------------------------
+- ; Don't clobber r0-r4 yet. It might have bootloader provided info
++ ; Don't clobber r0-r2 yet. It might have bootloader provided info
+ ;-------------------------------------------------------------------
+
+- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++ CPU_EARLY_SETUP
+
+ #ifdef CONFIG_SMP
+- ; Only Boot (Master) proceeds. Others wait in platform dependent way
++ ; Ensure Boot (Master) proceeds. Others wait in platform dependent way
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (cpu-id) ^^^ => Zero for UP ARC700
+ ; => #Core-ID if SMP (Master 0)
+@@ -39,7 +71,8 @@ stext:
+ ; need to make sure only boot cpu takes this path.
+ GET_CPU_ID r5
+ cmp r5, 0
+- jnz arc_platform_smp_wait_to_boot
++ mov.ne r0, r5
++ jne arc_platform_smp_wait_to_boot
+ #endif
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+@@ -89,7 +122,7 @@ stext:
+
+ first_lines_of_secondary:
+
+- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
++ CPU_EARLY_SETUP
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
+index 400c663..1f676c4 100644
+--- a/arch/arc/mm/cache_arc700.c
++++ b/arch/arc/mm/cache_arc700.c
+@@ -73,37 +73,9 @@
+ #include <asm/cachectl.h>
+ #include <asm/setup.h>
+
+-/* Instruction cache related Auxiliary registers */
+-#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+-#define ARC_REG_IC_IVIC 0x10
+-#define ARC_REG_IC_CTRL 0x11
+-#define ARC_REG_IC_IVIL 0x19
+-#if (CONFIG_ARC_MMU_VER > 2)
+-#define ARC_REG_IC_PTAG 0x1E
+-#endif
+-
+-/* Bit val in IC_CTRL */
+-#define IC_CTRL_CACHE_DISABLE 0x1
+-
+-/* Data cache related Auxiliary registers */
+-#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
+-#define ARC_REG_DC_IVDC 0x47
+-#define ARC_REG_DC_CTRL 0x48
+-#define ARC_REG_DC_IVDL 0x4A
+-#define ARC_REG_DC_FLSH 0x4B
+-#define ARC_REG_DC_FLDL 0x4C
+-#if (CONFIG_ARC_MMU_VER > 2)
+-#define ARC_REG_DC_PTAG 0x5C
+-#endif
+-
+-/* Bit val in DC_CTRL */
+-#define DC_CTRL_INV_MODE_FLUSH 0x40
+-#define DC_CTRL_FLUSH_STATUS 0x100
+-
+-char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
++char *arc_cache_mumbojumbo(int c, char *buf, int len)
+ {
+ int n = 0;
+- unsigned int c = smp_processor_id();
+
+ #define PR_CACHE(p, enb, str) \
+ { \
+@@ -169,72 +141,43 @@ void read_decode_cache_bcr(void)
+ */
+ void arc_cache_init(void)
+ {
+- unsigned int cpu = smp_processor_id();
+- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+- unsigned int dcache_does_alias, temp;
++ unsigned int __maybe_unused cpu = smp_processor_id();
++ struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+- if (!ic->ver)
+- goto chk_dc;
+-
+-#ifdef CONFIG_ARC_HAS_ICACHE
+- /* 1. Confirm some of I-cache params which Linux assumes */
+- if (ic->line_len != L1_CACHE_BYTES)
+- panic("Cache H/W doesn't match kernel Config");
+-
+- if (ic->ver != CONFIG_ARC_MMU_VER)
+- panic("Cache ver doesn't match MMU ver\n");
+-#endif
+-
+- /* Enable/disable I-Cache */
+- temp = read_aux_reg(ARC_REG_IC_CTRL);
+-
+ #ifdef CONFIG_ARC_HAS_ICACHE
+- temp &= ~IC_CTRL_CACHE_DISABLE;
+-#else
+- temp |= IC_CTRL_CACHE_DISABLE;
++ ic = &cpuinfo_arc700[cpu].icache;
++ if (ic->ver) {
++ if (ic->line_len != L1_CACHE_BYTES)
++ panic("ICache line [%d] != kernel Config [%d]",
++ ic->line_len, L1_CACHE_BYTES);
++
++ if (ic->ver != CONFIG_ARC_MMU_VER)
++ panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
++ ic->ver, CONFIG_ARC_MMU_VER);
++ }
+ #endif
+
+- write_aux_reg(ARC_REG_IC_CTRL, temp);
+-
+-chk_dc:
+- if (!dc->ver)
+- return;
+-
+ #ifdef CONFIG_ARC_HAS_DCACHE
+- if (dc->line_len != L1_CACHE_BYTES)
+- panic("Cache H/W doesn't match kernel Config");
++ dc = &cpuinfo_arc700[cpu].dcache;
++ if (dc->ver) {
++ unsigned int dcache_does_alias;
+
+- /* check for D-Cache aliasing */
+- dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
++ if (dc->line_len != L1_CACHE_BYTES)
++ panic("DCache line [%d] != kernel Config [%d]",
++ dc->line_len, L1_CACHE_BYTES);
+
+- if (dcache_does_alias && !cache_is_vipt_aliasing())
+- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+- else if (!dcache_does_alias && cache_is_vipt_aliasing())
+- panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+-#endif
+-
+- /* Set the default Invalidate Mode to "simpy discard dirty lines"
+- * as this is more frequent then flush before invalidate
+- * Ofcourse we toggle this default behviour when desired
+- */
+- temp = read_aux_reg(ARC_REG_DC_CTRL);
+- temp &= ~DC_CTRL_INV_MODE_FLUSH;
++ /* check for D-Cache aliasing */
++ dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
+
+-#ifdef CONFIG_ARC_HAS_DCACHE
+- /* Enable D-Cache: Clear Bit 0 */
+- write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+-#else
+- /* Flush D cache */
+- write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+- /* Disable D cache */
+- write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
++ if (dcache_does_alias && !cache_is_vipt_aliasing())
++ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
++ else if (!dcache_does_alias && cache_is_vipt_aliasing())
++ panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
++ }
+ #endif
+-
+- return;
+ }
+
+ #define OP_INV 0x1
+@@ -254,12 +197,16 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
+
+ if (cacheop == OP_INV_IC) {
+ aux_cmd = ARC_REG_IC_IVIL;
++#if (CONFIG_ARC_MMU_VER > 2)
+ aux_tag = ARC_REG_IC_PTAG;
++#endif
+ }
+ else {
+ /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
+ aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
++#if (CONFIG_ARC_MMU_VER > 2)
+ aux_tag = ARC_REG_DC_PTAG;
++#endif
+ }
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
+index 992aaba..b463f2a 100644
+--- a/arch/mips/include/asm/ftrace.h
++++ b/arch/mips/include/asm/ftrace.h
+@@ -24,7 +24,7 @@ do { \
+ asm volatile ( \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
+- "2:\n" \
++ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+@@ -46,7 +46,7 @@ do { \
+ asm volatile ( \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
+- "2:\n" \
++ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 65d452a..dd012c5 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1057,6 +1057,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+ struct mips_huge_tlb_info {
+ int huge_pte;
+ int restore_scratch;
++ bool need_reload_pte;
+ };
+
+ static struct mips_huge_tlb_info
+@@ -1071,6 +1072,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+
+ rv.huge_pte = scratch;
+ rv.restore_scratch = 0;
++ rv.need_reload_pte = false;
+
+ if (check_for_high_segbits) {
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+@@ -1259,6 +1261,7 @@ static void build_r4000_tlb_refill_handler(void)
+ } else {
+ htlb_info.huge_pte = K0;
+ htlb_info.restore_scratch = 0;
++ htlb_info.need_reload_pte = true;
+ vmalloc_mode = refill_noscratch;
+ /*
+ * create the plain linear handler
+@@ -1295,7 +1298,8 @@ static void build_r4000_tlb_refill_handler(void)
+ }
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ uasm_l_tlb_huge_update(&l, p);
+- UASM_i_LW(&p, K0, 0, K1);
++ if (htlb_info.need_reload_pte)
++ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ htlb_info.restore_scratch);
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index a8fe5aa..3b46eed 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -380,7 +380,7 @@ static int dlpar_online_cpu(struct device_node *dn)
+ BUG_ON(get_cpu_current_state(cpu)
+ != CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+- rc = cpu_up(cpu);
++ rc = device_online(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+@@ -463,7 +463,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
+ if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+- rc = cpu_down(cpu);
++ rc = device_offline(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+index ff1465c..5acf89c 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
+ };
+
+ static struct resource scif0_resources[] = {
+- DEFINE_RES_MEM(0xfffffe80, 0x100),
++ DEFINE_RES_MEM(0xfffffe80, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x4e0)),
+ };
+
+@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
+ };
+
+ static struct resource scif1_resources[] = {
+- DEFINE_RES_MEM(0xa4000150, 0x100),
++ DEFINE_RES_MEM(0xa4000150, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+ };
+
+@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
+ };
+
+ static struct resource scif2_resources[] = {
+- DEFINE_RES_MEM(0xa4000140, 0x100),
++ DEFINE_RES_MEM(0xa4000140, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+ };
+
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 3716e69..e8ab93c 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q)
+
+ while(1){
+ struct ubd *dev = q->queuedata;
+- if(dev->end_sg == 0){
++ if(dev->request == NULL){
+ struct request *req = blk_fetch_request(q);
+ if(req == NULL)
+ return;
+@@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q)
+ return;
+ }
+ prepare_flush_request(req, io_req);
+- submit_request(io_req, dev);
++ if (submit_request(io_req, dev) == false)
++ return;
+ }
+
+ while(dev->start_sg < dev->end_sg){
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index e409891..98aa930 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2436,12 +2436,9 @@ config X86_DMA_REMAP
+ depends on STA2X11
+
+ config IOSF_MBI
+- bool
++ tristate
++ default m
+ depends on PCI
+- ---help---
+- To be selected by modules requiring access to the Intel OnChip System
+- Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms
+- enumerable by PCI.
+
+ source "net/Kconfig"
+
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 4299eb0..92a2e93 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
+ 1: movl (%rbp),%ebp
+ _ASM_EXTABLE(1b,ia32_badarg)
+ ASM_CLAC
++
++ /*
++ * Sysenter doesn't filter flags, so we need to clear NT
++ * ourselves. To save a few cycles, we can check whether
++ * NT was set instead of doing an unconditional popfq.
++ */
++ testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
++ jnz sysenter_fix_flags
++sysenter_flags_fixed:
++
+ orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ CFI_REMEMBER_STATE
+@@ -184,6 +194,8 @@ sysexit_from_sys_call:
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS_SYSEXIT32
+
++ CFI_RESTORE_STATE
++
+ #ifdef CONFIG_AUDITSYSCALL
+ .macro auditsys_entry_common
+ movl %esi,%r9d /* 6th arg: 4th syscall arg */
+@@ -226,7 +238,6 @@ sysexit_from_sys_call:
+ .endm
+
+ sysenter_auditsys:
+- CFI_RESTORE_STATE
+ auditsys_entry_common
+ movl %ebp,%r9d /* reload 6th syscall arg */
+ jmp sysenter_dispatch
+@@ -235,6 +246,11 @@ sysexit_audit:
+ auditsys_exit sysexit_from_sys_call
+ #endif
+
++sysenter_fix_flags:
++ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
++ popfq_cfi
++ jmp sysenter_flags_fixed
++
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 9c999c1..01f15b2 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -155,8 +155,9 @@ do { \
+ #define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+-#define compat_elf_check_arch(x) \
+- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
++#define compat_elf_check_arch(x) \
++ (elf_check_arch_ia32(x) || \
++ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
+diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
+index 8e71c79..57995f0 100644
+--- a/arch/x86/include/asm/iosf_mbi.h
++++ b/arch/x86/include/asm/iosf_mbi.h
+@@ -50,6 +50,32 @@
+ #define BT_MBI_PCIE_READ 0x00
+ #define BT_MBI_PCIE_WRITE 0x01
+
++/* Quark available units */
++#define QRK_MBI_UNIT_HBA 0x00
++#define QRK_MBI_UNIT_HB 0x03
++#define QRK_MBI_UNIT_RMU 0x04
++#define QRK_MBI_UNIT_MM 0x05
++#define QRK_MBI_UNIT_MMESRAM 0x05
++#define QRK_MBI_UNIT_SOC 0x31
++
++/* Quark read/write opcodes */
++#define QRK_MBI_HBA_READ 0x10
++#define QRK_MBI_HBA_WRITE 0x11
++#define QRK_MBI_HB_READ 0x10
++#define QRK_MBI_HB_WRITE 0x11
++#define QRK_MBI_RMU_READ 0x10
++#define QRK_MBI_RMU_WRITE 0x11
++#define QRK_MBI_MM_READ 0x10
++#define QRK_MBI_MM_WRITE 0x11
++#define QRK_MBI_MMESRAM_READ 0x12
++#define QRK_MBI_MMESRAM_WRITE 0x13
++#define QRK_MBI_SOC_READ 0x06
++#define QRK_MBI_SOC_WRITE 0x07
++
++#if IS_ENABLED(CONFIG_IOSF_MBI)
++
++bool iosf_mbi_available(void);
++
+ /**
+ * iosf_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+@@ -87,4 +113,33 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+ */
+ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
++#else /* CONFIG_IOSF_MBI is not enabled */
++static inline
++bool iosf_mbi_available(void)
++{
++ return false;
++}
++
++static inline
++int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
++{
++ WARN(1, "IOSF_MBI driver not available");
++ return -EPERM;
++}
++
++static inline
++int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
++{
++ WARN(1, "IOSF_MBI driver not available");
++ return -EPERM;
++}
++
++static inline
++int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
++{
++ WARN(1, "IOSF_MBI driver not available");
++ return -EPERM;
++}
++#endif /* CONFIG_IOSF_MBI */
++
+ #endif /* IOSF_MBI_SYMS_H */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index ac63ea4..e9dc029 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -984,6 +984,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+
++static inline u64 get_canonical(u64 la)
++{
++ return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++ return get_canonical(la) != la;
++#else
++ return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+@@ -1042,7 +1056,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index 0e79420..990a2fe 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -67,6 +67,7 @@
+ #define EXIT_REASON_EPT_MISCONFIG 49
+ #define EXIT_REASON_INVEPT 50
+ #define EXIT_REASON_PREEMPTION_TIMER 52
++#define EXIT_REASON_INVVPID 53
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+ #define EXIT_REASON_APIC_WRITE 56
+@@ -114,6 +115,7 @@
+ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
+ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_INVD, "INVD" }, \
++ { EXIT_REASON_INVVPID, "INVVPID" }, \
+ { EXIT_REASON_INVPCID, "INVPCID" }
+
+ #endif /* _UAPIVMX_H */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 7f26c9a..523f147 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1290,7 +1290,7 @@ void setup_local_APIC(void)
+ unsigned int value, queued;
+ int i, j, acked = 0;
+ unsigned long long tsc = 0, ntsc;
+- long long max_loops = cpu_khz;
++ long long max_loops = cpu_khz ? cpu_khz : 1000000;
+
+ if (cpu_has_tsc)
+ rdtscll(tsc);
+@@ -1387,7 +1387,7 @@ void setup_local_APIC(void)
+ break;
+ }
+ if (queued) {
+- if (cpu_has_tsc) {
++ if (cpu_has_tsc && cpu_khz) {
+ rdtscll(ntsc);
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 8e28bf2..3f27f5f 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1141,7 +1141,7 @@ void syscall_init(void)
+ /* Flags to clear on syscall */
+ wrmsrl(MSR_SYSCALL_MASK,
+ X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
+- X86_EFLAGS_IOPL|X86_EFLAGS_AC);
++ X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index c1a07d3..66746a8 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -383,6 +383,13 @@ static void init_intel(struct cpuinfo_x86 *c)
+ detect_extended_topology(c);
+
+ l2 = init_intel_cacheinfo(c);
++
++ /* Detect legacy cache sizes if init_intel_cacheinfo did not */
++ if (l2 == 0) {
++ cpu_detect_cache_sizes(c);
++ l2 = c->x86_cache_size;
++ }
++
+ if (c->cpuid_level > 9) {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+@@ -497,6 +504,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ */
+ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+ size = 256;
++
++ /*
++ * Intel Quark SoC X1000 contains a 4-way set associative
++ * 16K cache with a 16 byte cache line and 256 lines per tag
++ */
++ if ((c->x86 == 5) && (c->x86_model == 9))
++ size = 16;
+ return size;
+ }
+ #endif
+@@ -724,7 +738,8 @@ static const struct cpu_dev intel_cpu_dev = {
+ [3] = "OverDrive PODP5V83",
+ [4] = "Pentium MMX",
+ [7] = "Mobile Pentium 75 - 200",
+- [8] = "Mobile Pentium MMX"
++ [8] = "Mobile Pentium MMX",
++ [9] = "Quark SoC X1000",
+ }
+ },
+ { .family = 6, .model_names =
+diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
+index c3aae66..2e97b3c 100644
+--- a/arch/x86/kernel/iosf_mbi.c
++++ b/arch/x86/kernel/iosf_mbi.c
+@@ -25,6 +25,10 @@
+
+ #include <asm/iosf_mbi.h>
+
++#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
++#define PCI_DEVICE_ID_BRASWELL 0x2280
++#define PCI_DEVICE_ID_QUARK_X1000 0x0958
++
+ static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+ static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+@@ -177,6 +181,13 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+ }
+ EXPORT_SYMBOL(iosf_mbi_modify);
+
++bool iosf_mbi_available(void)
++{
++ /* Mbi isn't hot-pluggable. No remove routine is provided */
++ return mbi_pdev;
++}
++EXPORT_SYMBOL(iosf_mbi_available);
++
+ static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+ {
+@@ -193,7 +204,9 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
+ }
+
+ static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
+ { 0, },
+ };
+ MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 9e5de68..b88fc86 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -673,6 +673,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ * handler too.
+ */
+ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
++ /*
++ * Ensure the signal handler starts with the new fpu state.
++ */
++ if (used_math())
++ drop_init_fpu(current);
+ }
+ signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
+ }
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index e0d1d7a..de02906 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1173,14 +1173,17 @@ void __init tsc_init(void)
+
+ x86_init.timers.tsc_pre_init();
+
+- if (!cpu_has_tsc)
++ if (!cpu_has_tsc) {
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
++ }
+
+ tsc_khz = x86_platform.calibrate_tsc();
+ cpu_khz = tsc_khz;
+
+ if (!tsc_khz) {
+ mark_tsc_unstable("could not calculate TSC khz");
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index a4b451c..dd50e26 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -268,8 +268,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
+ return -1;
+
+- drop_init_fpu(tsk); /* trigger finit */
+-
+ return 0;
+ }
+
+@@ -399,8 +397,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ set_used_math();
+ }
+
+- if (use_eager_fpu())
++ if (use_eager_fpu()) {
++ preempt_disable();
+ math_state_restore();
++ preempt_enable();
++ }
+
+ return err;
+ } else {
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 7bff3e2..38d3751 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -498,11 +498,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+ masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
+ }
+
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+- register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ u32 limit = get_desc_limit(desc);
+@@ -576,6 +571,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
+ return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++ int cs_l)
++{
++ switch (ctxt->op_bytes) {
++ case 2:
++ ctxt->_eip = (u16)dst;
++ break;
++ case 4:
++ ctxt->_eip = (u32)dst;
++ break;
++ case 8:
++ if ((cs_l && is_noncanonical_address(dst)) ||
++ (!cs_l && (dst & ~(u32)-1)))
++ return emulate_gp(ctxt, 0);
++ ctxt->_eip = dst;
++ break;
++ default:
++ WARN(1, "unsupported eip assignment size\n");
++ }
++ return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
++}
++
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++ return assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ u16 selector;
+@@ -1958,13 +1985,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ case 2: /* call near abs */ {
+ long int old_eip;
+ old_eip = ctxt->_eip;
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
++ if (rc != X86EMUL_CONTINUE)
++ break;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
+ break;
+ }
+ case 4: /* jmp abs */
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
+ break;
+ case 5: /* jmp far */
+ rc = em_jmp_far(ctxt);
+@@ -1996,10 +2025,14 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
+
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- return em_pop(ctxt);
++ int rc;
++ unsigned long eip;
++
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ return assign_eip_near(ctxt, eip);
+ }
+
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+@@ -2277,7 +2310,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ {
+ const struct x86_emulate_ops *ops = ctxt->ops;
+ struct desc_struct cs, ss;
+- u64 msr_data;
++ u64 msr_data, rcx, rdx;
+ int usermode;
+ u16 cs_sel = 0, ss_sel = 0;
+
+@@ -2293,6 +2326,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ else
+ usermode = X86EMUL_MODE_PROT32;
+
++ rcx = reg_read(ctxt, VCPU_REGS_RCX);
++ rdx = reg_read(ctxt, VCPU_REGS_RDX);
++
+ cs.dpl = 3;
+ ss.dpl = 3;
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2310,6 +2346,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ss_sel = cs_sel + 8;
+ cs.d = 0;
+ cs.l = 1;
++ if (is_noncanonical_address(rcx) ||
++ is_noncanonical_address(rdx))
++ return emulate_gp(ctxt, 0);
+ break;
+ }
+ cs_sel |= SELECTOR_RPL_MASK;
+@@ -2318,8 +2357,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
+- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
++ ctxt->_eip = rdx;
++ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+
+ return X86EMUL_CONTINUE;
+ }
+@@ -2858,10 +2897,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
+
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc;
+ long rel = ctxt->src.val;
+
+ ctxt->src.val = (unsigned long)ctxt->_eip;
+- jmp_rel(ctxt, rel);
++ rc = jmp_rel(ctxt, rel);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+ return em_push(ctxt);
+ }
+
+@@ -2893,11 +2935,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
++ unsigned long eip;
+
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_near(ctxt, eip);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ rsp_increment(ctxt, ctxt->src.val);
+@@ -3227,20 +3270,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_in(struct x86_emulate_ctxt *ctxt)
+@@ -4637,7 +4684,7 @@ special_insn:
+ break;
+ case 0x70 ... 0x7f: /* jcc (short) */
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x8d: /* lea r16/r32, m */
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -4666,7 +4713,7 @@ special_insn:
+ break;
+ case 0xe9: /* jmp rel */
+ case 0xeb: /* jmp rel short */
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xf4: /* hlt */
+@@ -4786,7 +4833,7 @@ twobyte_insn:
+ break;
+ case 0x80 ... 0x8f: /* jnz rel, etc*/
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x90 ... 0x9f: /* setcc r/m8 */
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 518d864..298781d 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+ return;
+
+ timer = &pit->pit_state.timer;
++ mutex_lock(&pit->pit_state.lock);
+ if (hrtimer_cancel(timer))
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++ mutex_unlock(&pit->pit_state.lock);
+ }
+
+ static void destroy_pit_timer(struct kvm_pit *pit)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2de1bc0..9643eda6 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3213,7 +3213,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
+ msr.host_initiated = false;
+
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+- if (svm_set_msr(&svm->vcpu, &msr)) {
++ if (kvm_set_msr(&svm->vcpu, &msr)) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(&svm->vcpu, 0);
+ } else {
+@@ -3495,9 +3495,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
+
+ if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+ || !svm_exit_handlers[exit_code]) {
+- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+- kvm_run->hw.hardware_exit_reason = exit_code;
+- return 0;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+
+ return svm_exit_handlers[exit_code](svm);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3927528..0c90f4b 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2582,12 +2582,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ default:
+ msr = find_msr_entry(vmx, msr_index);
+ if (msr) {
++ u64 old_msr_data = msr->data;
+ msr->data = data;
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
+- kvm_set_shared_msr(msr->index, msr->data,
+- msr->mask);
++ ret = kvm_set_shared_msr(msr->index, msr->data,
++ msr->mask);
+ preempt_enable();
++ if (ret)
++ msr->data = old_msr_data;
+ }
+ break;
+ }
+@@ -5169,7 +5172,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+ msr.data = data;
+ msr.index = ecx;
+ msr.host_initiated = false;
+- if (vmx_set_msr(vcpu, &msr) != 0) {
++ if (kvm_set_msr(vcpu, &msr) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+@@ -6441,6 +6444,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ return 1;
+ }
+
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6486,6 +6495,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_INVEPT] = handle_invept,
++ [EXIT_REASON_INVVPID] = handle_invvpid,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6719,7 +6729,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+- case EXIT_REASON_INVEPT:
++ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -6884,10 +6894,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ && kvm_vmx_exit_handlers[exit_reason])
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ else {
+- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+- vcpu->run->hw.hardware_exit_reason = exit_reason;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+- return 0;
+ }
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8fbd1a7..51c2851 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void)
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
+ }
+
+-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ {
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++ int err;
+
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
+- return;
++ return 0;
+ smsr->values[slot].curr = value;
+- wrmsrl(shared_msrs_global.msrs[slot], value);
++ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
++ if (err)
++ return 1;
++
+ if (!smsr->registered) {
+ smsr->urn.on_user_return = kvm_on_user_return;
+ user_return_notifier_register(&smsr->urn);
+ smsr->registered = true;
+ }
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
+
+@@ -946,7 +951,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
+-
+ /*
+ * Writes msr value into into the appropriate "register".
+ * Returns 0 on success, non-0 otherwise.
+@@ -954,8 +958,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ {
++ switch (msr->index) {
++ case MSR_FS_BASE:
++ case MSR_GS_BASE:
++ case MSR_KERNEL_GS_BASE:
++ case MSR_CSTAR:
++ case MSR_LSTAR:
++ if (is_noncanonical_address(msr->data))
++ return 1;
++ break;
++ case MSR_IA32_SYSENTER_EIP:
++ case MSR_IA32_SYSENTER_ESP:
++ /*
++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++ * non-canonical address is written on Intel but not on
++ * AMD (which ignores the top 32-bits, because it does
++ * not implement 64-bit SYSENTER).
++ *
++ * 64-bit code should hence be able to write a non-canonical
++ * value on AMD. Making the address canonical ensures that
++ * vmentry does not fail on Intel after writing a non-canonical
++ * value, and that something deterministic happens if the guest
++ * invokes 64-bit SYSENTER.
++ */
++ msr->data = get_canonical(msr->data);
++ }
+ return kvm_x86_ops->set_msr(vcpu, msr);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+
+ /*
+ * Adapt set_msr() to msr_io()'s calling convention
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index a348868..fed892d 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -405,7 +405,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
+ psize = page_level_size(level);
+ pmask = page_level_mask(level);
+ offset = virt_addr & ~pmask;
+- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
++ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ return (phys_addr | offset);
+ }
+ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 5d21239..95138e9 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -553,7 +553,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ bottom = max(b->physical_block_size, b->io_min) + alignment;
+
+ /* Verify that top and bottom intervals line up */
+- if (max(top, bottom) & (min(top, bottom) - 1)) {
++ if (max(top, bottom) % min(top, bottom)) {
+ t->misaligned = 1;
+ ret = -1;
+ }
+@@ -598,7 +598,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+
+ /* Find lowest common alignment_offset */
+ t->alignment_offset = lcm(t->alignment_offset, alignment)
+- & (max(t->physical_block_size, t->io_min) - 1);
++ % max(t->physical_block_size, t->io_min);
+
+ /* Verify that new alignment_offset is on a logical block boundary */
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 2648797..4044cf7 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -489,7 +489,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ err = DRIVER_ERROR << 24;
+- goto out;
++ goto error;
+ }
+
+ memset(sense, 0, sizeof(sense));
+@@ -499,7 +499,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ blk_execute_rq(q, disk, rq, 0);
+
+-out:
+ err = rq->errors & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (rq->sense_len && rq->sense) {
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a19c027..83187f4 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -49,7 +49,7 @@ struct skcipher_ctx {
+ struct ablkcipher_request req;
+ };
+
+-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
++#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
+ sizeof(struct scatterlist) - 1)
+
+ static inline int skcipher_sndbuf(struct sock *sk)
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index b603720..37acda6 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+- /* software reset. causes dev0 to be selected */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- ap->last_ctl = ap->ctl;
++ if (ap->ioaddr.ctl_addr) {
++ /* software reset. causes dev0 to be selected */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ ap->last_ctl = ap->ctl;
++ }
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+- /* ignore ata_sff_softreset if ctl isn't accessible */
+- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+- softreset = NULL;
+-
+ /* ignore built-in hardresets if SCR access is not available */
+ if ((hardreset == sata_std_hardreset ||
+ hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index 96c6a79..79dedba 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
+ pci_write_config_byte(pdev, 0x54, ultra_cfg);
+ }
+
+-static struct scsi_host_template serverworks_sht = {
++static struct scsi_host_template serverworks_osb4_sht = {
++ ATA_BMDMA_SHT(DRV_NAME),
++ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
++};
++
++static struct scsi_host_template serverworks_csb_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+ static struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
++ .qc_prep = ata_bmdma_dumb_qc_prep,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+ .set_piomode = serverworks_set_piomode,
+@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
+
+ static struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
++ .qc_prep = ata_bmdma_qc_prep,
+ .mode_filter = serverworks_csb_filter,
+ };
+
+@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ }
+ };
+ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
++ struct scsi_host_template *sht = &serverworks_csb_sht;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ /* Select non UDMA capable OSB4 if we can't do fixups */
+ if (rc < 0)
+ ppi[0] = &info[1];
++ sht = &serverworks_osb4_sht;
+ }
+ /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ ppi[1] = &ata_dummy_port_info;
+ }
+
+- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
++ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 2b56717..6a8955e 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -741,12 +741,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ return &dir->kobj;
+ }
+
++static DEFINE_MUTEX(gdp_mutex);
+
+ static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+ {
+ if (dev->class) {
+- static DEFINE_MUTEX(gdp_mutex);
+ struct kobject *kobj = NULL;
+ struct kobject *parent_kobj;
+ struct kobject *k;
+@@ -810,7 +810,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ glue_dir->kset != &dev->class->p->glue_dirs)
+ return;
+
++ mutex_lock(&gdp_mutex);
+ kobject_put(glue_dir);
++ mutex_unlock(&gdp_mutex);
+ }
+
+ static void cleanup_device_parent(struct device *dev)
+diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
+index 89c497c..04a14e0 100644
+--- a/drivers/block/drbd/drbd_interval.c
++++ b/drivers/block/drbd/drbd_interval.c
+@@ -79,6 +79,7 @@ bool
+ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ {
+ struct rb_node **new = &root->rb_node, *parent = NULL;
++ sector_t this_end = this->sector + (this->size >> 9);
+
+ BUG_ON(!IS_ALIGNED(this->size, 512));
+
+@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ rb_entry(*new, struct drbd_interval, rb);
+
+ parent = *new;
++ if (here->end < this_end)
++ here->end = this_end;
+ if (this->sector < here->sector)
+ new = &(*new)->rb_left;
+ else if (this->sector > here->sector)
+@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ return false;
+ }
+
++ this->end = this_end;
+ rb_link_node(&this->rb, parent, new);
+ rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ return true;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 7296c7f..255ca23 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3217,7 +3217,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+ page_count = (u32) calc_pages_for(offset, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+- ret = PTR_ERR(pages);
++ return PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, offset, length,
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 64c60ed..63fc7f0 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -763,6 +763,7 @@ again:
+ BUG_ON(new_map_idx >= segs_to_map);
+ if (unlikely(map[new_map_idx].status != 0)) {
+ pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
++ put_free_pages(blkif, &pages[seg_idx]->page, 1);
+ pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ ret |= 1;
+ goto next;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 429b75b..8a64dbe 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1063,8 +1063,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pool while mixing, and hash one final time.
+ */
+ sha_transform(hash.w, extract, workspace);
+- memset(extract, 0, sizeof(extract));
+- memset(workspace, 0, sizeof(workspace));
++ memzero_explicit(extract, sizeof(extract));
++ memzero_explicit(workspace, sizeof(workspace));
+
+ /*
+ * In case the hash function has some recognizable output
+@@ -1076,7 +1076,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ hash.w[2] ^= rol32(hash.w[2], 16);
+
+ memcpy(out, &hash, EXTRACT_SIZE);
+- memset(&hash, 0, sizeof(hash));
++ memzero_explicit(&hash, sizeof(hash));
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -1124,7 +1124,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+@@ -1162,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 4159236..4854f81 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -460,7 +460,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+ show_one(scaling_min_freq, min);
+ show_one(scaling_max_freq, max);
+-show_one(scaling_cur_freq, cur);
++
++static ssize_t show_scaling_cur_freq(
++ struct cpufreq_policy *policy, char *buf)
++{
++ ssize_t ret;
++
++ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
++ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
++ else
++ ret = sprintf(buf, "%u\n", policy->cur);
++ return ret;
++}
+
+ static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
+@@ -854,11 +865,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
+ if (ret)
+ goto err_out_kobj_put;
+ }
+- if (has_target()) {
+- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+- if (ret)
+- goto err_out_kobj_put;
+- }
++
++ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
++ if (ret)
++ goto err_out_kobj_put;
++
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index ae52c77..533a509 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -55,6 +55,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
+ return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+ }
+
++static inline int ceiling_fp(int32_t x)
++{
++ int mask, ret;
++
++ ret = fp_toint(x);
++ mask = (1 << FRAC_BITS) - 1;
++ if (x & mask)
++ ret += 1;
++ return ret;
++}
++
+ struct sample {
+ int32_t core_pct_busy;
+ u64 aperf;
+@@ -67,6 +78,7 @@ struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
++ int scaling;
+ int turbo_pstate;
+ };
+
+@@ -118,6 +130,7 @@ struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
++ int (*get_scaling)(void);
+ void (*set)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
+ };
+@@ -397,7 +410,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+- vid = fp_toint(vid_fp);
++ vid = ceiling_fp(vid_fp);
+
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+@@ -407,6 +420,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+ }
+
++#define BYT_BCLK_FREQS 5
++static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
++
++static int byt_get_scaling(void)
++{
++ u64 value;
++ int i;
++
++ rdmsrl(MSR_FSB_FREQ, value);
++ i = value & 0x3;
++
++ BUG_ON(i > BYT_BCLK_FREQS);
++
++ return byt_freq_table[i] * 100;
++}
++
+ static void byt_get_vid(struct cpudata *cpudata)
+ {
+ u64 value;
+@@ -451,6 +480,11 @@ static int core_get_turbo_pstate(void)
+ return ret;
+ }
+
++static inline int core_get_scaling(void)
++{
++ return 100000;
++}
++
+ static void core_set_pstate(struct cpudata *cpudata, int pstate)
+ {
+ u64 val;
+@@ -475,6 +509,7 @@ static struct cpu_defaults core_params = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
++ .get_scaling = core_get_scaling,
+ .set = core_set_pstate,
+ },
+ };
+@@ -493,6 +528,7 @@ static struct cpu_defaults byt_params = {
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_turbo_pstate,
+ .set = byt_set_pstate,
++ .get_scaling = byt_get_scaling,
+ .get_vid = byt_get_vid,
+ },
+ };
+@@ -526,7 +562,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+- trace_cpu_frequency(pstate * 100000, cpu->cpu);
++ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+
+ cpu->pstate.current_pstate = pstate;
+
+@@ -555,6 +591,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
++ cpu->pstate.scaling = pstate_funcs.get_scaling();
+
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+@@ -574,7 +611,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
+ core_pct += 1;
+
+ sample->freq = fp_toint(
+- mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
++ mul_fp(int_tofp(
++ cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
++ core_pct));
+
+ sample->core_pct_busy = (int32_t)core_pct;
+ }
+@@ -685,10 +724,14 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x37, byt_params),
+ ICPU(0x3a, core_params),
+ ICPU(0x3c, core_params),
++ ICPU(0x3d, core_params),
+ ICPU(0x3e, core_params),
+ ICPU(0x3f, core_params),
+ ICPU(0x45, core_params),
+ ICPU(0x46, core_params),
++ ICPU(0x4c, byt_params),
++ ICPU(0x4f, core_params),
++ ICPU(0x56, core_params),
+ {}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+@@ -751,6 +794,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
++ limits.max_policy_pct = 100;
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+ limits.no_turbo = limits.turbo_disabled;
+@@ -812,12 +856,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+- policy->min = cpu->pstate.min_pstate * 100000;
+- policy->max = cpu->pstate.turbo_pstate * 100000;
++ policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
++ policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
+- policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
++ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
++ policy->cpuinfo.max_freq =
++ cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+@@ -875,6 +920,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
++ pstate_funcs.get_scaling = funcs->get_scaling;
+ pstate_funcs.set = funcs->set;
+ pstate_funcs.get_vid = funcs->get_vid;
+ }
+diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
+index df6575f..682288c 100644
+--- a/drivers/edac/cpc925_edac.c
++++ b/drivers/edac/cpc925_edac.c
+@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
+
+ if (apiexcp & UECC_EXCP_DETECTED) {
+ cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
+- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "");
+diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
+index 3cda79b..ece3aef 100644
+--- a/drivers/edac/e7xxx_edac.c
++++ b/drivers/edac/e7xxx_edac.c
+@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+ static void process_ce_no_info(struct mem_ctl_info *mci)
+ {
+ edac_dbg(3, "\n");
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "");
+ }
+
+diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
+index fa1326e..ad76f10 100644
+--- a/drivers/edac/i3200_edac.c
++++ b/drivers/edac/i3200_edac.c
+@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
+ -1, -1,
+ "i3000 UE", "");
+ } else if (log & I3200_ECCERRLOG_CE) {
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+- "i3000 UE", "");
++ "i3000 CE", "");
+ }
+ }
+ }
+diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
+index 3382f63..4382343 100644
+--- a/drivers/edac/i82860_edac.c
++++ b/drivers/edac/i82860_edac.c
+@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "");
+ else
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "");
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index cca063b..d2e56e9 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
++ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
++ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
+index 08ce520..faa1f42 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
+@@ -32,6 +32,8 @@ static struct drm_driver driver;
+ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
++ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
++ 0x0001, 0, 0, 0 },
+ {0,}
+ };
+
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index fd98bec..c6d9777 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -645,7 +645,7 @@ static void pch_enable_backlight(struct intel_connector *connector)
+
+ cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+- WARN(1, "cpu backlight already enabled\n");
++ DRM_DEBUG_KMS("cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
+@@ -693,7 +693,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
+
+ ctl = I915_READ(BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+- WARN(1, "backlight already enabled\n");
++ DRM_DEBUG_KMS("backlight already enabled\n");
+ I915_WRITE(BLC_PWM_CTL, 0);
+ }
+
+@@ -724,7 +724,7 @@ static void i965_enable_backlight(struct intel_connector *connector)
+
+ ctl2 = I915_READ(BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+- WARN(1, "backlight already enabled\n");
++ DRM_DEBUG_KMS("backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ }
+@@ -758,7 +758,7 @@ static void vlv_enable_backlight(struct intel_connector *connector)
+
+ ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+- WARN(1, "backlight already enabled\n");
++ DRM_DEBUG_KMS("backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+index 2d9b9d7..f3edd28 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+ {
+ u16 dcb = dcb_outp(bios, idx, ver, len);
++ memset(outp, 0x00, sizeof(*outp));
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 798bde2..c39c414 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -523,7 +523,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ struct qxl_framebuffer *qfb;
+ struct qxl_bo *bo, *old_bo = NULL;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+- uint32_t width, height, base_offset;
+ bool recreate_primary = false;
+ int ret;
+ int surf_id;
+@@ -553,9 +552,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ if (qcrtc->index == 0)
+ recreate_primary = true;
+
+- width = mode->hdisplay;
+- height = mode->vdisplay;
+- base_offset = 0;
++ if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
++ DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
++ return -EINVAL;
++ }
+
+ ret = qxl_bo_reserve(bo, false);
+ if (ret != 0)
+@@ -569,10 +569,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ if (recreate_primary) {
+ qxl_io_destroy_primary(qdev);
+ qxl_io_log(qdev,
+- "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+- width, height, bo->surf.width,
+- bo->surf.height, bo->surf.stride, bo->surf.format);
+- qxl_io_create_primary(qdev, base_offset, bo);
++ "recreate primary: %dx%d,%d,%d\n",
++ bo->surf.width, bo->surf.height,
++ bo->surf.stride, bo->surf.format);
++ qxl_io_create_primary(qdev, 0, bo);
+ bo->is_primary = true;
+ surf_id = 0;
+ } else {
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 0a2f5b4..879e628 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -6200,7 +6200,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
+ if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+ index == 0) {
+ /* XXX disable for A0 tahiti */
+- si_pi->ulv.supported = true;
++ si_pi->ulv.supported = false;
+ si_pi->ulv.pl = *pl;
+ si_pi->ulv.one_pcie_lane_in_ulv = false;
+ si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index 0644429..52b4711 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
+ if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
++ drm_mode_config_cleanup(dev);
+ return -ENXIO;
+ }
+
+@@ -178,33 +179,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto fail_free_priv;
++ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_iounmap;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_put_clk;
+ }
+
+ #ifdef CONFIG_CPU_FREQ
+@@ -214,7 +219,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+- goto fail;
++ goto fail_put_disp_clk;
+ }
+ #endif
+
+@@ -259,13 +264,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+- goto fail;
++ goto fail_cpufreq_unregister;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+- goto fail;
++ goto fail_mode_config_cleanup;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+@@ -273,7 +278,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+- goto fail;
++ goto fail_vblank_cleanup;
+ }
+
+ platform_set_drvdata(pdev, dev);
+@@ -289,13 +294,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ priv->fbdev = drm_fbdev_cma_init(dev, bpp,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
++ if (IS_ERR(priv->fbdev)) {
++ ret = PTR_ERR(priv->fbdev);
++ goto fail_irq_uninstall;
++ }
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+-fail:
+- tilcdc_unload(dev);
++fail_irq_uninstall:
++ pm_runtime_get_sync(dev->dev);
++ drm_irq_uninstall(dev);
++ pm_runtime_put_sync(dev->dev);
++
++fail_vblank_cleanup:
++ drm_vblank_cleanup(dev);
++
++fail_mode_config_cleanup:
++ drm_mode_config_cleanup(dev);
++
++fail_cpufreq_unregister:
++ pm_runtime_disable(dev->dev);
++#ifdef CONFIG_CPU_FREQ
++ cpufreq_unregister_notifier(&priv->freq_transition,
++ CPUFREQ_TRANSITION_NOTIFIER);
++fail_put_disp_clk:
++ clk_put(priv->disp_clk);
++#endif
++
++fail_put_clk:
++ clk_put(priv->clk);
++
++fail_iounmap:
++ iounmap(priv->mmio);
++
++fail_free_wq:
++ flush_workqueue(priv->wq);
++ destroy_workqueue(priv->wq);
++
++fail_free_priv:
++ dev->dev_private = NULL;
++ kfree(priv);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 0083cbf..fb7c36e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ goto out_err0;
+ }
+
+- if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
++ /*
++ * Limit back buffer size to VRAM size. Remove this once
++ * screen targets are implemented.
++ */
++ if (dev_priv->prim_bb_mem > dev_priv->vram_size)
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+ mutex_unlock(&dev_priv->hw_mutex);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 8a65041..c8f8ecf 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1954,6 +1954,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
++ u32 assumed_bpp = 2;
++
++ /*
++ * If using screen objects, then assume 32-bpp because that's what the
++ * SVGA device is assuming
++ */
++ if (dev_priv->sou_priv)
++ assumed_bpp = 4;
+
+ /* Add preferred mode */
+ {
+@@ -1964,8 +1972,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+
+- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+- mode->vdisplay)) {
++ if (vmw_kms_validate_mode_vram(dev_priv,
++ mode->hdisplay * assumed_bpp,
++ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+@@ -1987,7 +1996,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ bmode->vdisplay > max_height)
+ continue;
+
+- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
++ if (!vmw_kms_validate_mode_vram(dev_priv,
++ bmode->hdisplay * assumed_bpp,
+ bmode->vdisplay))
+ continue;
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 6e12cd0..91bc66b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -292,6 +292,11 @@
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+
++#define USB_VENDOR_ID_ELAN 0x04f3
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
++
+ #define USB_VENDOR_ID_ELECOM 0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084 0x0061
+
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 44df131..617c47f 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
+ struct usbhid_device *usbhid = hid->driver_data;
+
+ spin_lock_irqsave(&usbhid->lock, flags);
+- if (hid->open > 0 &&
++ if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
+ !test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
+ !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
+ !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
+@@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb)
+ case 0: /* success */
+ usbhid_mark_busy(usbhid);
+ usbhid->retry_delay = 0;
++ if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
++ break;
+ hid_input_report(urb->context, HID_INPUT_REPORT,
+ urb->transfer_buffer,
+ urb->actual_length, 1);
+@@ -734,8 +736,10 @@ void usbhid_close(struct hid_device *hid)
+ if (!--hid->open) {
+ spin_unlock_irq(&usbhid->lock);
+ hid_cancel_delayed_stuff(usbhid);
+- usb_kill_urb(usbhid->urbin);
+- usbhid->intf->needs_remote_wakeup = 0;
++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
++ usb_kill_urb(usbhid->urbin);
++ usbhid->intf->needs_remote_wakeup = 0;
++ }
+ } else {
+ spin_unlock_irq(&usbhid->lock);
+ }
+@@ -1119,6 +1123,19 @@ static int usbhid_start(struct hid_device *hid)
+
+ set_bit(HID_STARTED, &usbhid->iofl);
+
++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
++ ret = usb_autopm_get_interface(usbhid->intf);
++ if (ret)
++ goto fail;
++ usbhid->intf->needs_remote_wakeup = 1;
++ ret = hid_start_in(hid);
++ if (ret) {
++ dev_err(&hid->dev,
++ "failed to start in urb: %d\n", ret);
++ }
++ usb_autopm_put_interface(usbhid->intf);
++ }
++
+ /* Some keyboards don't work until their LEDs have been set.
+ * Since BIOSes do set the LEDs, it must be safe for any device
+ * that supports the keyboard boot protocol.
+@@ -1151,6 +1168,9 @@ static void usbhid_stop(struct hid_device *hid)
+ if (WARN_ON(!usbhid))
+ return;
+
++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
++ usbhid->intf->needs_remote_wakeup = 0;
++
+ clear_bit(HID_STARTED, &usbhid->iofl);
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
+ set_bit(HID_DISCONNECTED, &usbhid->iofl);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 8e4ddb3..deb3643 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -69,6 +69,9 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 11e9c7f..8873d84 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ }
+ }
+
+- ret = wait_for_completion_io_timeout(&dev->cmd_complete,
++ ret = wait_for_completion_timeout(&dev->cmd_complete,
+ dev->adapter.timeout);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+index 1665c8e..e18bc67 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
++++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+ goto st_sensors_free_memory;
+ }
+
+- for (i = 0; i < n * num_data_channels; i++) {
++ for (i = 0; i < n * byte_for_channel; i++) {
+ if (i < n)
+ buf[i] = rx_array[i];
+ else
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index f1da362..8fca488f 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ },
+ {
+ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
++ },
++ },
++ {
++ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+@@ -609,6 +615,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ },
+ },
+ {
++ /* Fujitsu A544 laptop */
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
++ },
++ },
++ {
++ /* Fujitsu AH544 laptop */
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
++ },
++ },
++ {
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ .matches = {
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 0e722c1..ca1621b 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -465,6 +465,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
+ c->n_buffers[dirty]++;
+ b->list_mode = dirty;
+ list_move(&b->lru_list, &c->lru[dirty]);
++ b->last_accessed = jiffies;
+ }
+
+ /*----------------------------------------------------------------
+@@ -1485,9 +1486,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+- break;
++ return freed;
++ dm_bufio_cond_resched();
+ }
+- dm_bufio_cond_resched();
+ }
+ return freed;
+ }
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 08d9a20..c69d0b7 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+- cn_del_callback(&ulog_cn_id);
++ kfree(prealloced_cn_msg);
+ return r;
+ }
+
+diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
+index 1e344b0..22e8c20 100644
+--- a/drivers/media/dvb-frontends/ds3000.c
++++ b/drivers/media/dvb-frontends/ds3000.c
+@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
+ memcpy(&state->frontend.ops, &ds3000_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
++
++ /*
++ * Some devices like T480 starts with voltage on. Be sure
++ * to turn voltage off during init, as this can otherwise
++ * interfere with Unicable SCR systems.
++ */
++ ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
+ return &state->frontend;
+
+ error3:
+diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
+index 72af644..cf93021 100644
+--- a/drivers/media/i2c/tda7432.c
++++ b/drivers/media/i2c/tda7432.c
+@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
+ if (t->mute->val) {
+ lf |= TDA7432_MUTE;
+ lr |= TDA7432_MUTE;
+- lf |= TDA7432_MUTE;
++ rf |= TDA7432_MUTE;
+ rr |= TDA7432_MUTE;
+ }
+ /* Mute & update balance*/
+diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
+index 40c42de..7a62097 100644
+--- a/drivers/media/tuners/m88ts2022.c
++++ b/drivers/media/tuners/m88ts2022.c
+@@ -314,7 +314,7 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
+ div_min = gdiv28 * 78 / 100;
+ div_max = clamp_val(div_max, 0U, 63U);
+
+- f_3db_hz = c->symbol_rate * 135UL / 200UL;
++ f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
+ f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
+ f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
+
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 4d97a76..c1a3f8f 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -2993,16 +2993,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
+ }
+ }
+
+- if (dev->chip_id == CHIP_ID_EM2870 ||
+- dev->chip_id == CHIP_ID_EM2874 ||
+- dev->chip_id == CHIP_ID_EM28174 ||
+- dev->chip_id == CHIP_ID_EM28178) {
+- /* Digital only device - don't load any alsa module */
+- dev->audio_mode.has_audio = false;
+- dev->has_audio_class = false;
+- dev->has_alsa_audio = false;
+- }
+-
+ if (chip_name != default_chip_name)
+ printk(KERN_INFO DRIVER_NAME
+ ": chip ID is %s\n", chip_name);
+@@ -3272,7 +3262,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
+ dev->alt = -1;
+ dev->is_audio_only = has_audio && !(has_video || has_dvb);
+ dev->has_alsa_audio = has_audio;
+- dev->audio_mode.has_audio = has_audio;
+ dev->has_video = has_video;
+ dev->ifnum = ifnum;
+
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index 898fb9b..97fd881 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -506,8 +506,18 @@ int em28xx_audio_setup(struct em28xx *dev)
+ int vid1, vid2, feat, cfg;
+ u32 vid;
+
+- if (!dev->audio_mode.has_audio)
++ if (dev->chip_id == CHIP_ID_EM2870 ||
++ dev->chip_id == CHIP_ID_EM2874 ||
++ dev->chip_id == CHIP_ID_EM28174 ||
++ dev->chip_id == CHIP_ID_EM28178) {
++ /* Digital only device - don't load any alsa module */
++ dev->audio_mode.has_audio = false;
++ dev->has_audio_class = false;
++ dev->has_alsa_audio = false;
+ return 0;
++ }
++
++ dev->audio_mode.has_audio = true;
+
+ /* See how this device is configured */
+ cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index c3c9289..e24ee08 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -953,13 +953,16 @@ static int em28xx_stop_streaming(struct vb2_queue *vq)
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
++ if (dev->usb_ctl.vid_buf != NULL) {
++ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
++ dev->usb_ctl.vid_buf = NULL;
++ }
+ while (!list_empty(&vidq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+- dev->usb_ctl.vid_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
+@@ -981,13 +984,16 @@ int em28xx_stop_vbi_streaming(struct vb2_queue *vq)
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
++ if (dev->usb_ctl.vbi_buf != NULL) {
++ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
++ dev->usb_ctl.vbi_buf = NULL;
++ }
+ while (!list_empty(&vbiq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+- dev->usb_ctl.vbi_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index c3bb250..753ad4c 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2210,6 +2210,15 @@ static struct usb_device_id uvc_ids[] = {
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
++ /* Dell XPS M1330 (OmniVision OV7670 webcam) */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x05a9,
++ .idProduct = 0x7670,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_QUIRK_PROBE_DEF },
+ /* Apple Built-In iSight */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
+index 433d6d7..c5521ce 100644
+--- a/drivers/media/v4l2-core/v4l2-common.c
++++ b/drivers/media/v4l2-core/v4l2-common.c
+@@ -431,16 +431,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
++ /* Clamp to aligned min and max */
++ x = clamp(x, (min + ~mask) & mask, max & mask);
++
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+- /* Clamp to aligned value of min and max */
+- if (x < min)
+- x = (min + ~mask) & mask;
+- else if (x > max)
+- x = max & mask;
+-
+ return x;
+ }
+
+diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
+index 1d15735..89b4c42 100644
+--- a/drivers/mfd/rtsx_pcr.c
++++ b/drivers/mfd/rtsx_pcr.c
+@@ -1177,7 +1177,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ pcr->msi_en = msi_en;
+ if (pcr->msi_en) {
+ ret = pci_enable_msi(pcidev);
+- if (ret < 0)
++ if (ret)
+ pcr->msi_en = false;
+ }
+
+diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
+index d4e8604..e87a248 100644
+--- a/drivers/mfd/ti_am335x_tscadc.c
++++ b/drivers/mfd/ti_am335x_tscadc.c
+@@ -54,11 +54,11 @@ void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
+- tsadc->reg_se_cache = val;
++ tsadc->reg_se_cache |= val;
+ if (tsadc->adc_waiting)
+ wake_up(&tsadc->reg_se_wait);
+ else if (!tsadc->adc_in_use)
+- tscadc_writel(tsadc, REG_SE, val);
++ tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
+
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ }
+@@ -97,6 +97,7 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
+ void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
+ {
+ spin_lock_irq(&tsadc->reg_lock);
++ tsadc->reg_se_cache |= val;
+ am335x_tscadc_need_adc(tsadc);
+
+ tscadc_writel(tsadc, REG_SE, val);
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index 7e18661..ca297d7 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -342,6 +342,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
++ /*
++ * The controller offloads the last byte {CRC-7, end bit 1'b1}
++ * of response type R2. Assign dummy CRC, 0, and end bit to the
++ * byte(ptr[16], goes into the LSB of resp[3] later).
++ */
++ ptr[16] = 1;
++
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 0955777..19bfa0a 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -103,6 +103,10 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
++static const struct sdhci_pci_fixes sdhci_intel_qrk = {
++ .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
++};
++
+ static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+@@ -733,6 +737,14 @@ static const struct pci_device_id pci_ids[] = {
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_QRK_SD,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_qrk,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index 6d71871..c101477 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -17,6 +17,7 @@
+ #define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
+ #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
+ #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
++#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+
+ /*
+ * PCI registers
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index c5dad65..904b451 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
++ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ return UBI_BAD_FASTMAP;
+ }
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 494b888..7e5c6a8 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -135,6 +135,7 @@ config MACVLAN
+ config MACVTAP
+ tristate "MAC-VLAN based tap driver"
+ depends on MACVLAN
++ depends on INET
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+@@ -205,6 +206,7 @@ config RIONET_RX_SIZE
+
+ config TUN
+ tristate "Universal TUN/TAP device driver support"
++ depends on INET
+ select CRC32
+ ---help---
+ TUN/TAP provides packet reception and transmission for user space
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 0c6adaa..f30ceb1 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -16,6 +16,7 @@
+ #include <linux/idr.h>
+ #include <linux/fs.h>
+
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -65,7 +66,7 @@ static struct cdev macvtap_cdev;
+ static const struct proto_ops macvtap_socket_ops;
+
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+- NETIF_F_TSO6 | NETIF_F_UFO)
++ NETIF_F_TSO6)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+ #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
+@@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
++ current->comm);
+ gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ return -EINVAL;
+@@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (sinfo->gso_type & SKB_GSO_UDP)
+- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+@@ -950,9 +953,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ if (arg & TUN_F_TSO6)
+ feature_mask |= NETIF_F_TSO6;
+ }
+-
+- if (arg & TUN_F_UFO)
+- feature_mask |= NETIF_F_UFO;
+ }
+
+ /* tun/tap driver inverts the usage for TSO offloads, where
+@@ -963,7 +963,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
+ * When user space turns off TSO, we turn off GSO/LRO so that
+ * user-space will not receive TSO frames.
+ */
+- if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
++ if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+ features |= RX_OFFLOADS;
+ else
+ features &= ~RX_OFFLOADS;
+@@ -1064,7 +1064,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+- TUN_F_TSO_ECN | TUN_F_UFO))
++ TUN_F_TSO_ECN))
+ return -EINVAL;
+
+ rtnl_lock();
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 72ff14b..5a1897d 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+- if (atomic_long_read(&file->f_count) <= 2) {
++ if (atomic_long_read(&file->f_count) < 2) {
+ ppp_release(NULL, file);
+ err = 0;
+ } else
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 26f8635..2c8b1c2 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -65,6 +65,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -174,7 +175,7 @@ struct tun_struct {
+ struct net_device *dev;
+ netdev_features_t set_features;
+ #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+- NETIF_F_TSO6|NETIF_F_UFO)
++ NETIF_F_TSO6)
+
+ int vnet_hdr_sz;
+ int sndbuf;
+@@ -1140,6 +1141,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ break;
+ }
+
++ skb_reset_network_header(skb);
++
+ if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ pr_debug("GSO!\n");
+ switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -1150,8 +1153,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ {
++ static bool warned;
++
++ if (!warned) {
++ warned = true;
++ netdev_warn(tun->dev,
++ "%s: using disabled UFO feature; please fix this program\n",
++ current->comm);
++ }
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
++ }
+ default:
+ tun->dev->stats.rx_frame_errors++;
+ kfree_skb(skb);
+@@ -1180,7 +1195,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ }
+
+- skb_reset_network_header(skb);
+ skb_probe_transport_header(skb, 0);
+
+ rxhash = skb_get_hash(skb);
+@@ -1252,8 +1266,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (sinfo->gso_type & SKB_GSO_UDP)
+- gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else {
+ pr_err("unexpected GSO type: "
+ "0x%x, gso_size %d, hdr_len %d\n",
+@@ -1783,11 +1795,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
+ features |= NETIF_F_TSO6;
+ arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
+ }
+-
+- if (arg & TUN_F_UFO) {
+- features |= NETIF_F_UFO;
+- arg &= ~TUN_F_UFO;
+- }
+ }
+
+ /* This gives the user a way to test for new features in future by
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index 054e59c..8cee173 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -696,6 +696,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ {
+ struct usbnet *dev = netdev_priv(net);
+ struct sockaddr *addr = p;
++ int ret;
+
+ if (netif_running(net))
+ return -EBUSY;
+@@ -705,8 +706,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* Set the MAC address */
+- return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
++ ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, net->dev_addr);
++ if (ret < 0)
++ return ret;
++
++ return 0;
+ }
+
+ static const struct net_device_ops ax88179_netdev_ops = {
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 841b608..07a3255 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -496,8 +496,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
++ {
++ static bool warned;
++
++ if (!warned) {
++ warned = true;
++ netdev_warn(dev,
++ "host using disabled UFO feature; please fix it\n");
++ }
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+ break;
++ }
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ break;
+@@ -836,8 +845,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+- else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
+@@ -1657,7 +1664,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+- dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
++ dev->hw_features |= NETIF_F_TSO
+ | NETIF_F_TSO_ECN | NETIF_F_TSO6;
+ }
+ /* Individual feature bits: what can host handle? */
+@@ -1667,11 +1674,9 @@ static int virtnet_probe(struct virtio_device *vdev)
+ dev->hw_features |= NETIF_F_TSO6;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->hw_features |= NETIF_F_TSO_ECN;
+- if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+- dev->hw_features |= NETIF_F_UFO;
+
+ if (gso)
+- dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
++ dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+ /* (!csum && gso) case will be fixed by register_netdev() */
+ }
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+@@ -1711,8 +1716,7 @@ static int virtnet_probe(struct virtio_device *vdev)
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ vi->big_packets = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+@@ -1903,9 +1907,9 @@ static struct virtio_device_id id_table[] = {
+ static unsigned int features[] = {
+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
+ VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
++ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
+- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
++ VIRTIO_NET_F_GUEST_ECN,
+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
+ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 9b40532..0704a04 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1447,9 +1447,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ if (!in6_dev)
+ goto out;
+
+- if (!pskb_may_pull(skb, skb->len))
+- goto out;
+-
+ iphdr = ipv6_hdr(skb);
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+@@ -1770,6 +1767,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
++ struct net_device *dev = skb->dev;
++ int len = skb->len;
+
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+@@ -1793,16 +1792,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tx_packets++;
+- tx_stats->tx_bytes += skb->len;
++ tx_stats->tx_bytes += len;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->rx_packets++;
+- rx_stats->rx_bytes += skb->len;
++ rx_stats->rx_bytes += len;
+ u64_stats_update_end(&rx_stats->syncp);
+ } else {
+- skb->dev->stats.rx_dropped++;
++ dev->stats.rx_dropped++;
+ }
+ }
+
+@@ -1977,7 +1976,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ return arp_reduce(dev, skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
++ pskb_may_pull(skb, sizeof(struct ipv6hdr)
++ + sizeof(struct nd_msg)) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *msg;
+
+@@ -1986,6 +1986,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ return neigh_reduce(dev, skb);
+ }
++ eth = eth_hdr(skb);
+ #endif
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index 2ca62af..76ee486 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -173,14 +173,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+
+ /*
+ * for data packets, rate info comes from the table inside the fw. This
+- * table is controlled by LINK_QUALITY commands. Exclude ctrl port
+- * frames like EAPOLs which should be treated as mgmt frames. This
+- * avoids them being sent initially in high rates which increases the
+- * chances for completion of the 4-Way handshake.
++ * table is controlled by LINK_QUALITY commands
+ */
+
+- if (ieee80211_is_data(fc) && sta &&
+- !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
++ if (ieee80211_is_data(fc) && sta) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index 7cf6081..ebd5625 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -52,6 +52,7 @@
+ * RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
+ * RF5360 2.4G 1T1R
++ * RF5362 2.4G 1T1R
+ * RF5370 2.4G 1T1R
+ * RF5390 2.4G 1T1R
+ */
+@@ -72,6 +73,7 @@
+ #define RF3070 0x3070
+ #define RF3290 0x3290
+ #define RF5360 0x5360
++#define RF5362 0x5362
+ #define RF5370 0x5370
+ #define RF5372 0x5372
+ #define RF5390 0x5390
+@@ -2145,7 +2147,7 @@ struct mac_iveiv_entry {
+ /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
+ #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70)
+ #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
+-/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
++/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */
+ #define RFCSR3_VCOCAL_EN FIELD8(0x80)
+ /* Bits for RF3050 */
+ #define RFCSR3_BIT1 FIELD8(0x02)
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 41d4a81..4e16d4d 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -3142,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ break;
+ case RF3070:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -3159,6 +3160,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
++ rt2x00_rf(rt2x00dev, RF5362) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5372) ||
+ rt2x00_rf(rt2x00dev, RF5390) ||
+@@ -4273,6 +4275,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
+ case RF3070:
+ case RF3290:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7073,6 +7076,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ case RF3320:
+ case RF3322:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7529,6 +7533,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ case RF3320:
+ case RF3322:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7658,6 +7663,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ case RF3070:
+ case RF3290:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index caddc1b..57d3967 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -1062,6 +1062,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
++ { USB_DEVICE(0x1b75, 0xa200) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888) },
+ /* Pegatron */
+@@ -1235,6 +1236,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Arcadyan */
+ { USB_DEVICE(0x043e, 0x7a12) },
+ { USB_DEVICE(0x043e, 0x7a32) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x17e8) },
+ /* Azurewave */
+ { USB_DEVICE(0x13d3, 0x3329) },
+ { USB_DEVICE(0x13d3, 0x3365) },
+@@ -1271,6 +1274,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x057c, 0x8501) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x0241) },
++ { USB_DEVICE(0x0411, 0x0253) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c1a) },
+ { USB_DEVICE(0x2001, 0x3c21) },
+@@ -1361,6 +1365,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0df6, 0x0053) },
+ { USB_DEVICE(0x0df6, 0x0069) },
+ { USB_DEVICE(0x0df6, 0x006f) },
++ { USB_DEVICE(0x0df6, 0x0078) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0xa512) },
+ { USB_DEVICE(0x083a, 0xc522) },
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 89e888a..3935614 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1117,52 +1117,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_read_string);
+
+ /**
+- * of_property_read_string_index - Find and read a string from a multiple
+- * strings property.
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @index: index of the string in the list of strings
+- * @out_string: pointer to null terminated return string, modified only if
+- * return value is 0.
+- *
+- * Search for a property in a device tree node and retrieve a null
+- * terminated string value (pointer to data, not a copy) in the list of strings
+- * contained in that property.
+- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+- * property does not have a value, and -EILSEQ if the string is not
+- * null-terminated within the length of the property data.
+- *
+- * The out_string pointer is modified only if a valid string can be decoded.
+- */
+-int of_property_read_string_index(struct device_node *np, const char *propname,
+- int index, const char **output)
+-{
+- struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
+-
+- if (!prop)
+- return -EINVAL;
+- if (!prop->value)
+- return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+- p = prop->value;
+-
+- for (i = 0; total < prop->length; total += l, p += l) {
+- l = strlen(p) + 1;
+- if (i++ == index) {
+- *output = p;
+- return 0;
+- }
+- }
+- return -ENODATA;
+-}
+-EXPORT_SYMBOL_GPL(of_property_read_string_index);
+-
+-/**
+ * of_property_match_string() - Find string in a list and return index
+ * @np: pointer to node containing string list property
+ * @propname: string list property name
+@@ -1188,7 +1142,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ end = p + prop->length;
+
+ for (i = 0; p < end; i++, p += l) {
+- l = strlen(p) + 1;
++ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ pr_debug("comparing %s with %s\n", string, p);
+@@ -1200,39 +1154,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_match_string);
+
+ /**
+- * of_property_count_strings - Find and return the number of strings from a
+- * multiple strings property.
++ * of_property_read_string_util() - Utility helper for parsing string properties
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ * @skip: Number of strings to skip over at beginning of list.
+ *
+- * Search for a property in a device tree node and retrieve the number of null
+- * terminated string contain in it. Returns the number of strings on
+- * success, -EINVAL if the property does not exist, -ENODATA if property
+- * does not have a value, and -EILSEQ if the string is not null-terminated
+- * within the length of the property data.
++ * Don't call this function directly. It is a utility helper for the
++ * of_property_read_string*() family of functions.
+ */
+-int of_property_count_strings(struct device_node *np, const char *propname)
++int of_property_read_string_helper(struct device_node *np, const char *propname,
++ const char **out_strs, size_t sz, int skip)
+ {
+ struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
++ int l = 0, i = 0;
++ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+ p = prop->value;
++ end = p + prop->length;
+
+- for (i = 0; total < prop->length; total += l, p += l, i++)
+- l = strlen(p) + 1;
+-
+- return i;
++ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
++ l = strnlen(p, end - p) + 1;
++ if (p + l > end)
++ return -EILSEQ;
++ if (out_strs && i >= skip)
++ *out_strs++ = p;
++ }
++ i -= skip;
++ return i <= 0 ? -ENODATA : i;
+ }
+-EXPORT_SYMBOL_GPL(of_property_count_strings);
++EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+
+ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
+ {
+diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
+index 6643d19..70c61d7 100644
+--- a/drivers/of/selftest.c
++++ b/drivers/of/selftest.c
+@@ -132,8 +132,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ }
+
+-static void __init of_selftest_property_match_string(void)
++static void __init of_selftest_property_string(void)
+ {
++ const char *strings[4];
+ struct device_node *np;
+ int rc;
+
+@@ -150,13 +151,66 @@ static void __init of_selftest_property_match_string(void)
+ rc = of_property_match_string(np, "phandle-list-names", "third");
+ selftest(rc == 2, "third expected:0 got:%i\n", rc);
+ rc = of_property_match_string(np, "phandle-list-names", "fourth");
+- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
++ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
+ rc = of_property_match_string(np, "missing-property", "blah");
+- selftest(rc == -EINVAL, "missing property; rc=%i", rc);
++ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "empty-property", "blah");
+- selftest(rc == -ENODATA, "empty property; rc=%i", rc);
++ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "unterminated-string", "blah");
+- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++
++ /* of_property_count_strings() tests */
++ rc = of_property_count_strings(np, "string-property");
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "phandle-list-names");
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string");
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string-list");
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++
++ /* of_property_read_string_index() tests */
++ rc = of_property_read_string_index(np, "string-property", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "string-property", 1, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++
++ /* of_property_read_string_array() tests */
++ rc = of_property_read_string_array(np, "string-property", strings, 4);
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ /* -- An incorrectly formed string should cause a failure */
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++ /* -- parsing the correctly formed strings should still work: */
++ strings[2] = NULL;
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
++ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
++ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
+ }
+
+ static void __init of_selftest_parse_interrupts(void)
+@@ -379,7 +433,7 @@ static int __init of_selftest(void)
+
+ pr_info("start of selftest - you will see error messages\n");
+ of_selftest_parse_phandle_with_args();
+- of_selftest_property_match_string();
++ of_selftest_property_string();
+ of_selftest_parse_interrupts();
+ of_selftest_parse_interrupts_extended();
+ of_selftest_match_node();
+diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
+index 0007d3c..eedee37 100644
+--- a/drivers/of/testcase-data/tests-phandle.dtsi
++++ b/drivers/of/testcase-data/tests-phandle.dtsi
+@@ -32,7 +32,9 @@
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider3 0>;
+ empty-property;
++ string-property = "foobar";
+ unterminated-string = [40 41 42 43];
++ unterminated-string-list = "first", "second", [40 41 42 43];
+ };
+ };
+ };
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 39a207a..a943c6c 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -186,9 +186,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(modalias);
+
+-static ssize_t enabled_store(struct device *dev,
+- struct device_attribute *attr, const char *buf,
+- size_t count)
++static ssize_t enable_store(struct device *dev,
++ struct device_attribute *attr, const char *buf,
++ size_t count)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+@@ -212,15 +212,15 @@ static ssize_t enabled_store(struct device *dev,
+ return result < 0 ? result : count;
+ }
+
+-static ssize_t enabled_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
++static ssize_t enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev (dev);
+ return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ }
+-static DEVICE_ATTR_RW(enabled);
++static DEVICE_ATTR_RW(enable);
+
+ #ifdef CONFIG_NUMA
+ static ssize_t
+@@ -526,7 +526,7 @@ static struct attribute *pci_dev_attrs[] = {
+ #endif
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+- &dev_attr_enabled.attr,
++ &dev_attr_enable.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
+ #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
+index 665b96b..eb9f190 100644
+--- a/drivers/pinctrl/pinctrl-baytrail.c
++++ b/drivers/pinctrl/pinctrl-baytrail.c
+@@ -263,7 +263,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
+ spin_lock_irqsave(&vg->lock, flags);
+
+ reg_val = readl(reg) | BYT_DIR_MASK;
+- reg_val &= ~BYT_OUTPUT_EN;
++ reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
+
+ if (value)
+ writel(reg_val | BYT_LEVEL, reg);
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index c91f69b3..dcfcaea 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -570,6 +570,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ },
+ },
++ {
++ /*
++ * Note no video_set_backlight_video_vendor, we must use the
++ * acer interface, as there is no native backlight interface.
++ */
++ .ident = "Acer KAV80",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
+index 9e4dab4..ef1f4c9 100644
+--- a/drivers/power/charger-manager.c
++++ b/drivers/power/charger-manager.c
+@@ -1720,6 +1720,11 @@ static int charger_manager_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (!desc->psy_fuel_gauge) {
++ dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
++ return -EINVAL;
++ }
++
+ /* Counting index only */
+ while (desc->psy_charger_stat[i])
+ i++;
+diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
+index 5fb899f..24c926bf 100644
+--- a/drivers/regulator/max77693.c
++++ b/drivers/regulator/max77693.c
+@@ -232,7 +232,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
+ struct max77693_pmic_dev *max77693_pmic;
+ struct max77693_regulator_data *rdata = NULL;
+ int num_rdata, i;
+- struct regulator_config config;
++ struct regulator_config config = { };
+
+ num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
+ if (!rdata || num_rdata <= 0) {
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 788c4fe..9d81f76 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -707,7 +707,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+- WARN_ON(node && (node != se_nacl));
++ if (WARN_ON(node && (node != se_nacl))) {
++ /*
++ * The nacl no longer matches what we think it should be.
++ * Most likely a new dynamic acl has been added while
++ * someone dropped the hardware lock. It clearly is a
++ * bug elsewhere, but this bit can't make things worse.
++ */
++ btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
++ node, GFP_ATOMIC);
++ }
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index a253920..a5db6f9 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -45,7 +45,7 @@
+
+ #define SPI_TCR 0x08
+
+-#define SPI_CTAR(x) (0x0c + (x * 4))
++#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
+ #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
+ #define SPI_CTAR_CPOL(x) ((x) << 26)
+ #define SPI_CTAR_CPHA(x) ((x) << 25)
+@@ -69,7 +69,7 @@
+
+ #define SPI_PUSHR 0x34
+ #define SPI_PUSHR_CONT (1 << 31)
+-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
++#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
+ #define SPI_PUSHR_EOQ (1 << 27)
+ #define SPI_PUSHR_CTCNT (1 << 26)
+ #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 2789b45..971855e 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -1075,7 +1075,7 @@ err_rxdesc:
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+- pl022->sgt_tx.nents, DMA_FROM_DEVICE);
++ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ err_rx_sgmap:
+ sg_free_table(&pl022->sgt_tx);
+ err_alloc_tx_sg:
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index ced9ecf..7ab3ccb 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1280,7 +1280,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
+ if (status != 0)
+ return status;
+ write_SSCR0(0, drv_data->ioaddr);
+- clk_disable_unprepare(ssp->clk);
++
++ if (!pm_runtime_suspended(dev))
++ clk_disable_unprepare(ssp->clk);
+
+ return 0;
+ }
+@@ -1294,7 +1296,8 @@ static int pxa2xx_spi_resume(struct device *dev)
+ pxa2xx_spi_dma_resume(drv_data);
+
+ /* Enable the SSP clock */
+- clk_prepare_enable(ssp->clk);
++ if (!pm_runtime_suspended(dev))
++ clk_prepare_enable(ssp->clk);
+
+ /* Restore LPSS private register bits */
+ lpss_ssp_setup(drv_data);
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 2b96665..97d4b3f 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = AD5933_REG_TEMP_DATA,
++ .scan_index = -1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 14,
+@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "real_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+- BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "real",
+ .address = AD5933_REG_REAL_DATA,
+ .scan_index = 0,
+ .scan_type = {
+@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "imag_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+- BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "imag",
+ .address = AD5933_REG_IMAG_DATA,
+ .scan_index = 1,
+ .scan_type = {
+@@ -748,14 +745,14 @@ static int ad5933_probe(struct i2c_client *client,
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5933_channels;
+- indio_dev->num_channels = 1; /* only register temp0_input */
++ indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
+
+ ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+- /* skip temp0_input, register in0_(real|imag)_raw */
+- ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
++ ret = iio_buffer_register(indio_dev, ad5933_channels,
++ ARRAY_SIZE(ad5933_channels));
+ if (ret)
+ goto error_unreg_ring;
+
+diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
+index 0731820..e8c98cf 100644
+--- a/drivers/staging/iio/meter/ade7758.h
++++ b/drivers/staging/iio/meter/ade7758.h
+@@ -119,7 +119,6 @@ struct ade7758_state {
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+- const struct iio_chan_spec *ade7758_ring_channels;
+ struct spi_transfer ring_xfer[4];
+ struct spi_message ring_msg;
+ /*
+diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
+index cba183e..94d9914 100644
+--- a/drivers/staging/iio/meter/ade7758_core.c
++++ b/drivers/staging/iio/meter/ade7758_core.c
+@@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
+ .scan_index = 0,
+ .scan_type = {
+@@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
+ .scan_index = 1,
+ .scan_type = {
+@@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
+ .scan_index = 2,
+ .scan_type = {
+@@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
+ .scan_index = 3,
+ .scan_type = {
+@@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
+ .scan_index = 4,
+ .scan_type = {
+@@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
+ .scan_index = 5,
+ .scan_type = {
+@@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
+ .scan_index = 6,
+ .scan_type = {
+@@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
+ .scan_index = 7,
+ .scan_type = {
+@@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
+ .scan_index = 8,
+ .scan_type = {
+@@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
+ .scan_index = 9,
+ .scan_type = {
+@@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
+ .scan_index = 10,
+ .scan_type = {
+@@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
+ .scan_index = 11,
+ .scan_type = {
+@@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
+ .scan_index = 12,
+ .scan_type = {
+@@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
+ .scan_index = 13,
+ .scan_type = {
+@@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
+ .scan_index = 14,
+ .scan_type = {
+@@ -869,13 +833,14 @@ static int ade7758_probe(struct spi_device *spi)
+ goto error_free_rx;
+ }
+ st->us = spi;
+- st->ade7758_ring_channels = &ade7758_channels[0];
+ mutex_init(&st->buf_lock);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ade7758_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
++ indio_dev->channels = ade7758_channels;
++ indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
+
+ ret = ade7758_configure_ring(indio_dev);
+ if (ret)
+diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
+index c0accf8..6e90064 100644
+--- a/drivers/staging/iio/meter/ade7758_ring.c
++++ b/drivers/staging/iio/meter/ade7758_ring.c
+@@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
+ **/
+ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
+ {
+- struct ade7758_state *st = iio_priv(indio_dev);
+ unsigned channel;
+
+- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
++ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ return -EINVAL;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ ade7758_write_waveform_type(&indio_dev->dev,
+- st->ade7758_ring_channels[channel].address);
++ indio_dev->channels[channel].address);
+
+ return 0;
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 6ea95d2..38b4be2 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1409,7 +1409,8 @@ int core_dev_add_initiator_node_lun_acl(
+ * Check to see if there are any existing persistent reservation APTPL
+ * pre-registrations that need to be enabled for this LUN ACL..
+ */
+- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
++ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
++ lacl->mapped_lun);
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 3013287..1205dbd 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -944,10 +944,10 @@ int core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+- struct se_lun_acl *lun_acl)
++ struct se_node_acl *nacl,
++ u32 mapped_lun)
+ {
+- struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+- struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
++ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ return 0;
+diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
+index 2ee2936..749fd7b 100644
+--- a/drivers/target/target_core_pr.h
++++ b/drivers/target/target_core_pr.h
+@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
+ unsigned char *, u16, u32, int, int, u8);
+ extern int core_scsi3_check_aptpl_registration(struct se_device *,
+ struct se_portal_group *, struct se_lun *,
+- struct se_lun_acl *);
++ struct se_node_acl *, u32);
+ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+ struct se_node_acl *);
+ extern void core_scsi3_free_all_registrations(struct se_device *);
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index c036595..fb8a1a1 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -40,6 +40,7 @@
+ #include <target/target_core_fabric.h>
+
+ #include "target_core_internal.h"
++#include "target_core_pr.h"
+
+ extern struct se_device *g_lun0_dev;
+
+@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
+
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
++ /*
++ * Check to see if there are any existing persistent reservation
++ * APTPL pre-registrations that need to be enabled for this dynamic
++ * LUN ACL now..
++ */
++ core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
++ lun->unpacked_lun);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 24f5279..9232c773 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1855,8 +1855,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+- if (ret)
+- goto out;
++ goto out;
+ }
+
+ switch (cmd->data_direction) {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 25b8f68..27b5554 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -353,7 +353,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
+ * Die! Die! Die!
+ */
+- if (baud == 38400)
++ if (try == 0 && baud == 38400)
+ baud = altbaud;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index d3448a9..25d0741 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1701,6 +1701,7 @@ int tty_release(struct inode *inode, struct file *filp)
+ int pty_master, tty_closing, o_tty_closing, do_sleep;
+ int idx;
+ char buf[64];
++ long timeout = 0;
+
+ if (tty_paranoia_check(tty, inode, __func__))
+ return 0;
+@@ -1785,7 +1786,11 @@ int tty_release(struct inode *inode, struct file *filp)
+ __func__, tty_name(tty, buf));
+ tty_unlock_pair(tty, o_tty);
+ mutex_unlock(&tty_mutex);
+- schedule();
++ schedule_timeout_killable(timeout);
++ if (timeout < 120 * HZ)
++ timeout = 2 * timeout + 1;
++ else
++ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ /*
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index eabccd4..331f06a 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -965,11 +965,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ /* FIXME: Needs to clear unsupported bits in the termios */
+ acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+- if (!newline.dwDTERate) {
++ if (C_BAUD(tty) == B0) {
+ newline.dwDTERate = acm->line.dwDTERate;
+ newctrl &= ~ACM_CTRL_DTR;
+- } else
++ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ newctrl |= ACM_CTRL_DTR;
++ }
+
+ if (newctrl != acm->ctrlout)
+ acm_set_control(acm, acm->ctrlout = newctrl);
+@@ -1672,6 +1673,7 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
++ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ },
+ /* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 2518c32..ef6ec13b 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2057,6 +2057,8 @@ int usb_alloc_streams(struct usb_interface *interface,
+ return -EINVAL;
+ if (dev->speed != USB_SPEED_SUPER)
+ return -EINVAL;
++ if (dev->state < USB_STATE_CONFIGURED)
++ return -ENODEV;
+
+ /* Streams only apply to bulk endpoints. */
+ for (i = 0; i < num_eps; i++)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 445d62a..d2bd9d7 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4378,6 +4378,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
+ struct usb_qualifier_descriptor *qual;
+ int status;
+
++ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
++ return;
++
+ qual = kmalloc (sizeof *qual, GFP_KERNEL);
+ if (qual == NULL)
+ return;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 5144d11..c854593 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -93,6 +93,16 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* Elan Touchscreen */
++ { USB_DEVICE(0x04f3, 0x0089), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
++ { USB_DEVICE(0x04f3, 0x009b), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
++ { USB_DEVICE(0x04f3, 0x016f), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
+ /* Roland SC-8820 */
+ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 21a3520..0985ff7 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+
+ /* stall is always issued on EP0 */
+ dep = dwc->eps[0];
+- __dwc3_gadget_ep_set_halt(dep, 1);
++ __dwc3_gadget_ep_set_halt(dep, 1, false);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
+
+@@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ return -EINVAL;
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ break;
+- ret = __dwc3_gadget_ep_set_halt(dep, set);
++ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
+ return -EINVAL;
+ break;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 09e9619..d90c70c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -532,12 +532,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ if (!usb_endpoint_xfer_isoc(desc))
+ return 0;
+
+- memset(&trb_link, 0, sizeof(trb_link));
+-
+ /* Link TRB for ISOC. The HWO bit is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
++ memset(trb_link, 0, sizeof(*trb_link));
+
+ trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+@@ -588,7 +587,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+
+ /* make sure HW endpoint isn't stalled */
+ if (dep->flags & DWC3_EP_STALL)
+- __dwc3_gadget_ep_set_halt(dep, 0);
++ __dwc3_gadget_ep_set_halt(dep, 0, false);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+@@ -1186,7 +1185,7 @@ out0:
+ return ret;
+ }
+
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+@@ -1195,6 +1194,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ memset(&params, 0x00, sizeof(params));
+
+ if (value) {
++ if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
++ (!list_empty(&dep->req_queued) ||
++ !list_empty(&dep->request_list)))) {
++ dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
++ dep->name);
++ return -EAGAIN;
++ }
++
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETSTALL, &params);
+ if (ret)
+@@ -1234,7 +1241,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+ goto out;
+ }
+
+- ret = __dwc3_gadget_ep_set_halt(dep, value);
++ ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+ out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -1254,7 +1261,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
+ if (dep->number == 0 || dep->number == 1)
+ return dwc3_gadget_ep0_set_halt(ep, 1);
+ else
+- return dwc3_gadget_ep_set_halt(ep, 1);
++ return __dwc3_gadget_ep_set_halt(dep, 1, false);
+ }
+
+ /* -------------------------------------------------------------------------- */
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index a0ee75b..ac625582 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
+ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+
+ /**
+ * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
+index ab1065a..3384486 100644
+--- a/drivers/usb/gadget/f_acm.c
++++ b/drivers/usb/gadget/f_acm.c
+@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ if (acm->notify->driver_data) {
+ VDBG(cdev, "reset acm control interface %d\n", intf);
+ usb_ep_disable(acm->notify);
+- } else {
+- VDBG(cdev, "init acm ctrl interface %d\n", intf);
++ }
++
++ if (!acm->notify->desc)
+ if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+ return -EINVAL;
+- }
++
+ usb_ep_enable(acm->notify);
+ acm->notify->driver_data = acm;
+
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index 5bcf7d0..afd0a15 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -1995,8 +1995,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+ func->conf = c;
+ func->gadget = c->cdev->gadget;
+
+- ffs_data_get(func->ffs);
+-
+ /*
+ * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+ * configurations are bound in sequence with list_for_each_entry,
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 27768a7..9ce0b13 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -456,6 +456,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
+ {
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
++ if (!udc->driver) {
++ dev_err(dev, "soft-connect without a gadget driver\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (sysfs_streq(buf, "connect")) {
+ usb_gadget_udc_start(udc->gadget, udc->driver);
+ usb_gadget_connect(udc->gadget);
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index c2d5afc..1d29bbf 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -190,7 +190,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+ }
+ }
+
+- if (!list_empty(&controller->early_tx_list)) {
++ if (!list_empty(&controller->early_tx_list) &&
++ !hrtimer_is_queued(&controller->early_tx)) {
+ ret = HRTIMER_RESTART;
+ hrtimer_forward_now(&controller->early_tx,
+ ktime_set(0, 150 * NSEC_PER_USEC));
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 85f5215..865243e 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -733,7 +733,9 @@ static int dsps_resume(struct device *dev)
+ dsps_writel(mbase, wrp->mode, glue->context.mode);
+ dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+- setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
++ if (musb->xceiv->state == OTG_STATE_B_IDLE &&
++ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
++ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+
+ return 0;
+ }
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 8afa813..0180eef 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -229,6 +229,9 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
+ phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
+ if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
+ dev_dbg(dev, "unable to find transceiver\n");
++ if (!IS_ERR(phy))
++ phy = ERR_PTR(-ENODEV);
++
+ goto err0;
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 63b2af2..3beae72 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
++ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 3614620..a523ada 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -145,6 +145,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+ * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
+ */
+ static const struct usb_device_id id_table_combined[] = {
++ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
+@@ -674,6 +675,8 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5937b2d..6786b70 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -30,6 +30,12 @@
+
+ /*** third-party PIDs (using FTDI_VID) ***/
+
++/*
++ * Certain versions of the official Windows FTDI driver reprogrammed
++ * counterfeit FTDI devices to PID 0. Support these devices anyway.
++ */
++#define FTDI_BRICK_PID 0x0000
++
+ #define FTDI_LUMEL_PD12_PID 0x6002
+
+ /*
+@@ -143,8 +149,12 @@
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+ #define XSENS_VID 0x2639
+-#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++#define XSENS_AWINDA_STATION_PID 0x0101
++#define XSENS_AWINDA_DONGLE_PID 0x0102
+ #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
++#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++
++/* Xsens devices using FTDI VID */
+ #define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
+ #define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID 0xD38A
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 618c1c1..5cdb32b 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ port->interrupt_out_urb->transfer_buffer_length = length;
+
+ priv->cur_pos = priv->cur_pos + length;
+- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
++ result = usb_submit_urb(port->interrupt_out_urb,
++ GFP_ATOMIC);
+ dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
+ todo = priv->filled - priv->cur_pos;
+
+@@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
+ priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
+ result = usb_submit_urb(port->interrupt_in_urb,
+- GFP_NOIO);
++ GFP_ATOMIC);
+ dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
+ }
+ }
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index 4856fb7..4b7bfb3 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+
+ /* The connected devices do not have a bulk write endpoint,
+ * to transmit data to de barcode device the control endpoint is used */
+- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
++ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!dr) {
+ count = -ENOMEM;
+ goto error_no_dr;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index e47aabe..8b34841 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
+ #define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE920 0x1200
++#define TELIT_PRODUCT_LE910 0x1201
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -361,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Haier products */
+ #define HAIER_VENDOR_ID 0x201e
++#define HAIER_PRODUCT_CE81B 0x10f8
+ #define HAIER_PRODUCT_CE100 0x2009
+
+ /* Cinterion (formerly Siemens) products */
+@@ -588,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le910_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(2),
++};
++
+ static const struct option_blacklist_info telit_le920_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(5),
+@@ -1137,6 +1144,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1612,6 +1621,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
+ /* Pirelli */
+ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 22c7d43..b1d815e 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ */
+ if (result == USB_STOR_XFER_LONG)
+ fake_sense = 1;
++
++ /*
++ * Sometimes a device will mistakenly skip the data phase
++ * and go directly to the status phase without sending a
++ * zero-length packet. If we get a 13-byte response here,
++ * check whether it really is a CSW.
++ */
++ if (result == USB_STOR_XFER_SHORT &&
++ srb->sc_data_direction == DMA_FROM_DEVICE &&
++ transfer_length - scsi_get_resid(srb) ==
++ US_BULK_CS_WRAP_LEN) {
++ struct scatterlist *sg = NULL;
++ unsigned int offset = 0;
++
++ if (usb_stor_access_xfer_buf((unsigned char *) bcs,
++ US_BULK_CS_WRAP_LEN, srb, &sg,
++ &offset, FROM_XFER_BUF) ==
++ US_BULK_CS_WRAP_LEN &&
++ bcs->Signature ==
++ cpu_to_le32(US_BULK_CS_SIGN)) {
++ usb_stor_dbg(us, "Device skipped data phase\n");
++ scsi_set_resid(srb, transfer_length);
++ goto skipped_data_phase;
++ }
++ }
+ }
+
+ /* See flow chart on pg 15 of the Bulk Only Transport spec for
+@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
++ skipped_data_phase:
+ /* check bulk status */
+ residue = le32_to_cpu(bcs->Residue);
+ usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..dbfe4ee 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
+ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+ {
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bs = info->var.yres - bh;
+ struct fb_fillrect region;
+
+- region.color = attr_bgcol_ec(bgshift, vc, info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
+index 41b32ae..5a3cbf6 100644
+--- a/drivers/video/console/fbcon_ccw.c
++++ b/drivers/video/console/fbcon_ccw.c
+@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int bs = vc->vc_rows*ch;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
+index a93670e..e7ee44d 100644
+--- a/drivers/video/console/fbcon_cw.c
++++ b/drivers/video/console/fbcon_cw.c
+@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.yres - rw;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
+index ff0872c..19e3714 100644
+--- a/drivers/video/console/fbcon_ud.c
++++ b/drivers/video/console/fbcon_ud.c
+@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
+index a416f9b..827b5f8 100644
+--- a/drivers/virtio/virtio_pci.c
++++ b/drivers/virtio/virtio_pci.c
+@@ -791,6 +791,7 @@ static int virtio_pci_restore(struct device *dev)
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
++ unsigned status = 0;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+@@ -801,14 +802,40 @@ static int virtio_pci_restore(struct device *dev)
+ return ret;
+
+ pci_set_master(pci_dev);
++ /* We always start by resetting the device, in case a previous
++ * driver messed it up. */
++ vp_reset(&vp_dev->vdev);
++
++ /* Acknowledge that we've seen the device. */
++ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
++ vp_set_status(&vp_dev->vdev, status);
++
++ /* Maybe driver failed before freeze.
++ * Restore the failed status, for debugging. */
++ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++
++ if (!drv)
++ return 0;
++
++ /* We have a driver! */
++ status |= VIRTIO_CONFIG_S_DRIVER;
++ vp_set_status(&vp_dev->vdev, status);
++
+ vp_finalize_features(&vp_dev->vdev);
+
+- if (drv && drv->restore)
++ if (drv->restore) {
+ ret = drv->restore(&vp_dev->vdev);
++ if (ret) {
++ status |= VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++ return ret;
++ }
++ }
+
+ /* Finally, tell the device we're all set */
+- if (!ret)
+- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
++ status |= VIRTIO_CONFIG_S_DRIVER_OK;
++ vp_set_status(&vp_dev->vdev, status);
+
+ return ret;
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index ca248b0..196b089 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -423,7 +423,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ ret = 0;
+ fail:
+ while (ret < 0 && !list_empty(&tmplist)) {
+- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
++ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 71e2d0e..4d06a57 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2077,6 +2077,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ struct page *page, void *fsdata)
+ {
+ struct inode *inode = mapping->host;
++ loff_t old_size = inode->i_size;
+ int i_size_changed = 0;
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+@@ -2096,6 +2097,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ unlock_page(page);
+ page_cache_release(page);
+
++ if (old_size < pos)
++ pagecache_isize_extended(inode, old_size, pos);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+@@ -2313,6 +2316,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
+ err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
++
++ if (unlikely(fatal_signal_pending(current))) {
++ err = -EINTR;
++ goto out;
++ }
+ }
+
+ /* page covers the boundary, find the boundary offset */
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 58d57da..4366127 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2824,6 +2824,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
+ * the beginning of the name. The sequence number check at the caller will
+ * retry it again when a d_move() does happen. So any garbage in the buffer
+ * due to mismatched pointer and length will be discarded.
++ *
++ * Data dependency barrier is needed to make sure that we see that terminating
++ * NUL. Alpha strikes again, film at 11...
+ */
+ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ {
+@@ -2831,6 +2834,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ u32 dlen = ACCESS_ONCE(name->len);
+ char *p;
+
++ smp_read_barrier_depends();
++
+ *buflen -= dlen + 1;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 37fd31e..0498390 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -1354,13 +1354,6 @@ set_qf_format:
+ "not specified.");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext3_msg(sb, KERN_ERR, "error: journaled quota format "
+- "specified with no journaling "
+- "enabled.");
+- return 0;
+- }
+ }
+ #endif
+ return 1;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index 3285aa5..b610779 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ __u32 provided, calculated;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ __u32 csum;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ __u32 csum;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 62f024c..2a6830a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2110,6 +2110,7 @@ int do_journal_get_write_access(handle_t *handle,
+ #define CONVERT_INLINE_DATA 2
+
+ extern struct inode *ext4_iget(struct super_block *, unsigned long);
++extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+ extern int ext4_write_inode(struct inode *, struct writeback_control *);
+ extern int ext4_setattr(struct dentry *, struct iattr *);
+ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+@@ -2340,10 +2341,18 @@ extern int ext4_register_li_request(struct super_block *sb,
+ static inline int ext4_has_group_desc_csum(struct super_block *sb)
+ {
+ return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
++ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
++ (EXT4_SB(sb)->s_chksum_driver != NULL);
+ }
+
++static inline int ext4_has_metadata_csum(struct super_block *sb)
++{
++ WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
++ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++ !EXT4_SB(sb)->s_chksum_driver);
++
++ return (EXT4_SB(sb)->s_chksum_driver != NULL);
++}
+ static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
+ {
+ return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 4718891..96a1ce15 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -74,8 +74,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
+ {
+ struct ext4_extent_tail *et;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ et = find_ext4_extent_tail(eh);
+@@ -89,8 +88,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ {
+ struct ext4_extent_tail *et;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ et = find_ext4_extent_tail(eh);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 64bb32f1..a8d1a64 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -864,6 +864,10 @@ got:
+ struct buffer_head *block_bitmap_bh;
+
+ block_bitmap_bh = ext4_read_block_bitmap(sb, group);
++ if (!block_bitmap_bh) {
++ err = -EIO;
++ goto out;
++ }
+ BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
+ err = ext4_journal_get_write_access(handle, block_bitmap_bh);
+ if (err) {
+@@ -988,8 +992,7 @@ got:
+ spin_unlock(&sbi->s_next_gen_lock);
+
+ /* Precompute checksum seed for inode metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ __u32 csum;
+ __le32 inum = cpu_to_le32(inode->i_ino);
+ __le32 gen = cpu_to_le32(inode->i_generation);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 82edf5b..8c03b74 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1128,8 +1128,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ inode->i_size = inode->i_sb->s_blocksize;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index b56062d..3a7e034 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+
+ if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ cpu_to_le32(EXT4_OS_LINUX) ||
+- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ !ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ provided = le16_to_cpu(raw->i_checksum_lo);
+@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+
+ if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ cpu_to_le32(EXT4_OS_LINUX) ||
+- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ !ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ csum = ext4_inode_csum(inode, raw, ei);
+@@ -2633,6 +2631,20 @@ static int ext4_nonda_switch(struct super_block *sb)
+ return 0;
+ }
+
++/* We always reserve for an inode update; the superblock could be there too */
++static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
++{
++ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
++ return 1;
++
++ if (pos + len <= 0x7fffffffULL)
++ return 1;
++
++ /* We might need to update the superblock to set LARGE_FILE */
++ return 2;
++}
++
+ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+@@ -2683,7 +2695,8 @@ retry_grab:
+ * of file which has an already mapped buffer.
+ */
+ retry_journal:
+- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
++ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
++ ext4_da_write_credits(inode, pos, len));
+ if (IS_ERR(handle)) {
+ page_cache_release(page);
+ return PTR_ERR(handle);
+@@ -4061,8 +4074,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ ei->i_extra_isize = 0;
+
+ /* Precompute checksum seed for inode metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ __u32 csum;
+ __le32 inum = cpu_to_le32(inode->i_ino);
+@@ -4250,6 +4262,13 @@ bad_inode:
+ return ERR_PTR(ret);
+ }
+
++struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
++{
++ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
++ return ERR_PTR(-EIO);
++ return ext4_iget(sb, ino);
++}
++
+ static int ext4_inode_blocks_set(handle_t *handle,
+ struct ext4_inode *raw_inode,
+ struct ext4_inode_info *ei)
+@@ -4645,8 +4664,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ ext4_orphan_del(NULL, inode);
+ goto err_out;
+ }
+- } else
++ } else {
++ loff_t oldsize = inode->i_size;
++
+ i_size_write(inode, attr->ia_size);
++ pagecache_isize_extended(inode, oldsize, inode->i_size);
++ }
+
+ /*
+ * Blocks are going to be removed from the inode. Wait
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a2a837f..dfe982d 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -343,8 +343,7 @@ flags_out:
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(inode->i_sb)) {
+ ext4_warning(sb, "Setting inode version is not "
+ "supported with metadata_csum enabled.");
+ return -ENOTTY;
+@@ -544,9 +543,17 @@ group_add_out:
+ }
+
+ case EXT4_IOC_SWAP_BOOT:
++ {
++ int err;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+- return swap_inode_boot_loader(sb, inode);
++ err = mnt_want_write_file(filp);
++ if (err)
++ return err;
++ err = swap_inode_boot_loader(sb, inode);
++ mnt_drop_write_file(filp);
++ return err;
++ }
+
+ case EXT4_IOC_RESIZE_FS: {
+ ext4_fsblk_t n_blocks_count;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 04434ad..1268a1b 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+
+ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+@@ -29,8 +28,7 @@ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+
+ void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index d050e04..2dcbfb6 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -123,8 +123,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ "directory leaf block found instead of index block");
+ return ERR_PTR(-EIO);
+ }
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
++ if (!ext4_has_metadata_csum(inode->i_sb) ||
+ buffer_verified(bh))
+ return bh;
+
+@@ -339,8 +338,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+ {
+ struct ext4_dir_entry_tail *t;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ t = get_dirent_tail(inode, dirent);
+@@ -361,8 +359,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
+ {
+ struct ext4_dir_entry_tail *t;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ t = get_dirent_tail(inode, dirent);
+@@ -437,8 +434,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
+ struct dx_tail *t;
+ int count_offset, limit, count;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -467,8 +463,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+ struct dx_tail *t;
+ int count_offset, limit, count;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -556,8 +551,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
+ unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+ EXT4_DIR_REC_LEN(2) - infosize;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ entry_space -= sizeof(struct dx_tail);
+ return entry_space / sizeof(struct dx_entry);
+ }
+@@ -566,8 +560,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+ unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ entry_space -= sizeof(struct dx_tail);
+ return entry_space / sizeof(struct dx_entry);
+ }
+@@ -1429,7 +1422,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
+ dentry);
+ return ERR_PTR(-EIO);
+ }
+- inode = ext4_iget(dir->i_sb, ino);
++ inode = ext4_iget_normal(dir->i_sb, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ EXT4_ERROR_INODE(dir,
+ "deleted inode referenced: %u",
+@@ -1460,7 +1453,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
+ return ERR_PTR(-EIO);
+ }
+
+- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
++ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
+ }
+
+ /*
+@@ -1534,8 +1527,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ int csum_size = 0;
+ int err = 0, i;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ bh2 = ext4_append(handle, dir, &newblock);
+@@ -1704,8 +1696,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+ int csum_size = 0;
+ int err;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ if (!de) {
+@@ -1772,8 +1763,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ struct fake_dirent *fde;
+ int csum_size = 0;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ blocksize = dir->i_sb->s_blocksize;
+@@ -1889,8 +1879,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ ext4_lblk_t block, blocks;
+ int csum_size = 0;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ sb = dir->i_sb;
+@@ -2152,8 +2141,7 @@ static int ext4_delete_entry(handle_t *handle,
+ return err;
+ }
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ BUFFER_TRACE(bh, "get_write_access");
+@@ -2372,8 +2360,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+ int csum_size = 0;
+ int err;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index f3b84cd..2400ad1 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1071,7 +1071,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
+ break;
+
+ if (meta_bg == 0)
+- backup_block = group * bpg + blk_off;
++ backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
+ else
+ backup_block = (ext4_group_first_block_no(sb, group) +
+ ext4_bg_has_super(sb, group));
+@@ -1200,8 +1200,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
+ {
+ struct buffer_head *bh;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 0;
+
+ bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a46030d..9fb3e6c 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -140,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
+ int ext4_superblock_csum_verify(struct super_block *sb,
+ struct ext4_super_block *es)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ return es->s_checksum == ext4_superblock_csum(sb, es);
+@@ -151,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
+ {
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ es->s_checksum = ext4_superblock_csum(sb, es);
+@@ -996,7 +994,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
+ * Currently we don't know the generation for parent directory, so
+ * a generation of 0 means "accept any"
+ */
+- inode = ext4_iget(sb, ino);
++ inode = ext4_iget_normal(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+@@ -1706,13 +1704,6 @@ static int parse_options(char *options, struct super_block *sb,
+ "not specified");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext4_msg(sb, KERN_ERR, "journaled quota format "
+- "specified with no journaling "
+- "enabled");
+- return 0;
+- }
+ }
+ #endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+@@ -2010,8 +2001,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ __u16 crc = 0;
+ __le32 le_group = cpu_to_le32(block_group);
+
+- if ((sbi->s_es->s_feature_ro_compat &
+- cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
++ if (ext4_has_metadata_csum(sbi->s_sb)) {
+ /* Use new metadata_csum algorithm */
+ __le16 save_csum;
+ __u32 csum32;
+@@ -2029,6 +2019,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ }
+
+ /* old crc16 code */
++ if (!(sbi->s_es->s_feature_ro_compat &
++ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
++ return 0;
++
+ offset = offsetof(struct ext4_group_desc, bg_checksum);
+
+ crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+@@ -3167,8 +3161,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ int compat, incompat;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ /* journal checksum v3 */
+ compat = 0;
+ incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+@@ -3475,8 +3468,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ /* Precompute checksum seed for all metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(sb))
+ sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+ sizeof(es->s_uuid));
+
+@@ -3494,6 +3486,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ #ifdef CONFIG_EXT4_FS_POSIX_ACL
+ set_opt(sb, POSIX_ACL);
+ #endif
++ /* don't forget to enable journal_csum when metadata_csum is enabled. */
++ if (ext4_has_metadata_csum(sb))
++ set_opt(sb, JOURNAL_CHECKSUM);
++
+ if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
+ set_opt(sb, JOURNAL_DATA);
+ else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 55e611c..8825154 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -141,8 +141,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
+ sector_t block_nr,
+ struct ext4_xattr_header *hdr)
+ {
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++ if (ext4_has_metadata_csum(inode->i_sb) &&
+ (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+ return 0;
+ return 1;
+@@ -152,8 +151,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
+ sector_t block_nr,
+ struct ext4_xattr_header *hdr)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+@@ -189,14 +187,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ }
+
+ static int
+-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
++ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
++ void *value_start)
+ {
+- while (!IS_LAST_ENTRY(entry)) {
+- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
++ struct ext4_xattr_entry *e = entry;
++
++ while (!IS_LAST_ENTRY(e)) {
++ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
+ if ((void *)next >= end)
+ return -EIO;
+- entry = next;
++ e = next;
+ }
++
++ while (!IS_LAST_ENTRY(entry)) {
++ if (entry->e_value_size != 0 &&
++ (value_start + le16_to_cpu(entry->e_value_offs) <
++ (void *)e + sizeof(__u32) ||
++ value_start + le16_to_cpu(entry->e_value_offs) +
++ le32_to_cpu(entry->e_value_size) > end))
++ return -EIO;
++ entry = EXT4_XATTR_NEXT(entry);
++ }
++
+ return 0;
+ }
+
+@@ -213,7 +225,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
+ return -EIO;
+ if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+ return -EIO;
+- error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
++ error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
++ bh->b_data);
+ if (!error)
+ set_buffer_verified(bh);
+ return error;
+@@ -329,7 +342,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(entry, end);
++ error = ext4_xattr_check_names(entry, end, entry);
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_find_entry(&entry, name_index, name,
+@@ -457,7 +470,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ raw_inode = ext4_raw_inode(&iloc);
+ header = IHDR(inode, raw_inode);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(IFIRST(header), end);
++ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_list_entries(dentry, IFIRST(header),
+@@ -972,7 +985,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ is->s.here = is->s.first;
+ is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+- error = ext4_xattr_check_names(IFIRST(header), is->s.end);
++ error = ext4_xattr_check_names(IFIRST(header), is->s.end,
++ IFIRST(header));
+ if (error)
+ return error;
+ /* Find the named attribute. */
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 9b329b5..bcbef08 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal,
+ !jbd2_descr_block_csum_verify(journal,
+ bh->b_data)) {
+ err = -EIO;
++ brelse(bh);
+ goto failed;
+ }
+
+diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
+index 413ef89..046fee8 100644
+--- a/fs/jffs2/jffs2_fs_sb.h
++++ b/fs/jffs2/jffs2_fs_sb.h
+@@ -134,8 +134,6 @@ struct jffs2_sb_info {
+ struct rw_semaphore wbuf_sem; /* Protects the write buffer */
+
+ struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+- int wbuf_queued; /* non-zero delayed work is queued */
+- spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
+
+ unsigned char *oobbuf;
+ int oobavail; /* How many bytes are available for JFFS2 in OOB */
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index a6597d6..09ed551 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
+ struct jffs2_sb_info *c = work_to_sb(work);
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+- spin_lock(&c->wbuf_dwork_lock);
+- c->wbuf_queued = 0;
+- spin_unlock(&c->wbuf_dwork_lock);
+-
+ if (!(sb->s_flags & MS_RDONLY)) {
+ jffs2_dbg(1, "%s()\n", __func__);
+ jffs2_flush_wbuf_gc(c, 0);
+@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+- spin_lock(&c->wbuf_dwork_lock);
+- if (!c->wbuf_queued) {
++ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
++ if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
+ jffs2_dbg(1, "%s()\n", __func__);
+- delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+- queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+- c->wbuf_queued = 1;
+- }
+- spin_unlock(&c->wbuf_dwork_lock);
+ }
+
+ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+
+ /* Initialise write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ c->wbuf_pagesize = c->mtd->writesize;
+ c->wbuf_ofs = 0xFFFFFFFF;
+@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ c->wbuf_pagesize = c->mtd->erasesize;
+
+@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
+ c->wbuf_pagesize = c->mtd->writesize;
+@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
+ return 0;
+
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
+ c->wbuf_pagesize = c->mtd->writesize;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 1812f02..6ae664b 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+
+ msg.rpc_proc = &clnt->cl_procinfo[proc];
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++ if (status == -ECONNREFUSED) {
++ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
++ status);
++ rpc_force_rebind(clnt);
++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++ }
+ if (status < 0)
+ dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ status);
+diff --git a/fs/namei.c b/fs/namei.c
+index dd2f2c5..0dd72c8 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3128,7 +3128,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
+ if (error)
+ goto out2;
+ audit_inode(pathname, nd->path.dentry, 0);
+- error = may_open(&nd->path, op->acc_mode, op->open_flag);
++ /* Don't check for other permissions, the inode was just created */
++ error = may_open(&nd->path, MAY_OPEN, op->open_flag);
+ if (error)
+ goto out2;
+ file->f_path.mnt = nd->path.mnt;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index c7d4a0a..d9bf3ef 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2831,6 +2831,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ /* make sure we can reach put_old from new_root */
+ if (!is_path_reachable(old_mnt, old.dentry, &new))
+ goto out4;
++ /* make certain new is below the root */
++ if (!is_path_reachable(new_mnt, new.dentry, &root))
++ goto out4;
+ root_mp->m_count++; /* pin it so it won't go away */
+ lock_mount_hash();
+ detach_mnt(new_mnt, &parent_path);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index f23a6ca..86f5d3e 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1243,7 +1243,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
+ */
+ if (argp->opcnt == resp->opcnt)
+ return false;
+-
++ if (next->opnum == OP_ILLEGAL)
++ return false;
+ nextd = OPDESC(next);
+ /*
+ * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 1282384..14120a3 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -319,10 +319,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
+ compressed ? ".enc.z" : "");
+ break;
+ case PSTORE_TYPE_CONSOLE:
+- sprintf(name, "console-%s", psname);
++ sprintf(name, "console-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_FTRACE:
+- sprintf(name, "ftrace-%s", psname);
++ sprintf(name, "ftrace-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_MCE:
+ sprintf(name, "mce-%s-%lld", psname, id);
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index ce87c90..89da957 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -637,7 +637,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ dqstats_inc(DQST_LOOKUPS);
+ err = sb->dq_op->write_dquot(dquot);
+ if (!ret && err)
+- err = ret;
++ ret = err;
+ dqput(dquot);
+ spin_lock(&dq_list_lock);
+ }
+diff --git a/fs/super.c b/fs/super.c
+index 7624267..88a6bc6 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -81,6 +81,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
+ inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
+ dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
+ total_objects = dentries + inodes + fs_objects + 1;
++ if (!total_objects)
++ total_objects = 1;
+
+ /* proportion the scan between the caches */
+ dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
+diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
+index ff82293..26b69b2 100644
+--- a/fs/ubifs/commit.c
++++ b/fs/ubifs/commit.c
+@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c)
+ err = ubifs_orphan_end_commit(c);
+ if (err)
+ goto out;
+- old_ltail_lnum = c->ltail_lnum;
+- err = ubifs_log_end_commit(c, new_ltail_lnum);
+- if (err)
+- goto out;
+ err = dbg_check_old_index(c, &zroot);
+ if (err)
+ goto out;
+
+- mutex_lock(&c->mst_mutex);
+ c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
+ c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
+ c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
+@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ else
+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
+- err = ubifs_write_master(c);
+- mutex_unlock(&c->mst_mutex);
++
++ old_ltail_lnum = c->ltail_lnum;
++ err = ubifs_log_end_commit(c, new_ltail_lnum);
+ if (err)
+ goto out;
+
+diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
+index a902c59..8d59de8 100644
+--- a/fs/ubifs/log.c
++++ b/fs/ubifs/log.c
+@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
+ h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
+ t = (long long)c->ltail_lnum * c->leb_size;
+
+- if (h >= t)
++ if (h > t)
+ return c->log_bytes - h + t;
+- else
++ else if (h != t)
+ return t - h;
++ else if (c->lhead_lnum != c->ltail_lnum)
++ return 0;
++ else
++ return c->log_bytes;
+ }
+
+ /**
+@@ -447,9 +451,9 @@ out:
+ * @ltail_lnum: new log tail LEB number
+ *
+ * This function is called on when the commit operation was finished. It
+- * moves log tail to new position and unmaps LEBs which contain obsolete data.
+- * Returns zero in case of success and a negative error code in case of
+- * failure.
++ * moves log tail to new position and updates the master node so that it stores
++ * the new log tail LEB number. Returns zero in case of success and a negative
++ * error code in case of failure.
+ */
+ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ {
+@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ spin_unlock(&c->buds_lock);
+
+ err = dbg_check_bud_bytes(c);
++ if (err)
++ goto out;
+
++ err = ubifs_write_master(c);
++
++out:
+ mutex_unlock(&c->log_mutex);
+ return err;
+ }
+diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
+index ab83ace..1a4bb9e 100644
+--- a/fs/ubifs/master.c
++++ b/fs/ubifs/master.c
+@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
+ * ubifs_write_master - write master node.
+ * @c: UBIFS file-system description object
+ *
+- * This function writes the master node. The caller has to take the
+- * @c->mst_mutex lock before calling this function. Returns zero in case of
+- * success and a negative error code in case of failure. The master node is
+- * written twice to enable recovery.
++ * This function writes the master node. Returns zero in case of success and a
++ * negative error code in case of failure. The master node is written twice to
++ * enable recovery.
+ */
+ int ubifs_write_master(struct ubifs_info *c)
+ {
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 5ded849..94d9a64 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1957,7 +1957,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
+ mutex_init(&c->lp_mutex);
+ mutex_init(&c->tnc_mutex);
+ mutex_init(&c->log_mutex);
+- mutex_init(&c->mst_mutex);
+ mutex_init(&c->umount_mutex);
+ mutex_init(&c->bu_mutex);
+ mutex_init(&c->write_reserve_mutex);
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index e8c8cfe..7ab9c71 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1042,7 +1042,6 @@ struct ubifs_debug_info;
+ *
+ * @mst_node: master node
+ * @mst_offs: offset of valid master node
+- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
+ *
+ * @max_bu_buf_len: maximum bulk-read buffer length
+ * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
+@@ -1282,7 +1281,6 @@ struct ubifs_info {
+
+ struct ubifs_mst_node *mst_node;
+ int mst_offs;
+- struct mutex mst_mutex;
+
+ int max_bu_buf_len;
+ struct mutex bu_mutex;
+diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
+index c6ff3cf5..0eaaa2d 100644
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -321,7 +321,6 @@ reread:
+ * Initialize the mount structure from the superblock.
+ */
+ xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
+- xfs_sb_quota_from_disk(sbp);
+
+ /*
+ * If we haven't validated the superblock, do so now before we try
+diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
+index 1e11679..4afd393 100644
+--- a/fs/xfs/xfs_sb.c
++++ b/fs/xfs/xfs_sb.c
+@@ -397,10 +397,11 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
+ }
+ }
+
+-void
+-xfs_sb_from_disk(
++static void
++__xfs_sb_from_disk(
+ struct xfs_sb *to,
+- xfs_dsb_t *from)
++ xfs_dsb_t *from,
++ bool convert_xquota)
+ {
+ to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
+ to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
+@@ -456,6 +457,17 @@ xfs_sb_from_disk(
+ to->sb_pad = 0;
+ to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
+ to->sb_lsn = be64_to_cpu(from->sb_lsn);
++ /* Convert on-disk flags to in-memory flags? */
++ if (convert_xquota)
++ xfs_sb_quota_from_disk(to);
++}
++
++void
++xfs_sb_from_disk(
++ struct xfs_sb *to,
++ xfs_dsb_t *from)
++{
++ __xfs_sb_from_disk(to, from, true);
+ }
+
+ static inline void
+@@ -571,7 +583,11 @@ xfs_sb_verify(
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_sb sb;
+
+- xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
++ /*
++ * Use call variant which doesn't convert quota flags from disk
++ * format, because xfs_mount_validate_sb checks the on-disk flags.
++ */
++ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
+
+ /*
+ * Only check the in progress field for the primary superblock as
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index bcec4c4..ca52de5 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -74,7 +74,6 @@
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 4afa4f8..a693c6d 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1232,10 +1232,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
+ static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+ {
+ unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+- unsigned int alignment = (sector << 9) & (granularity - 1);
++ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+
+- return (granularity + lim->alignment_offset - alignment)
+- & (granularity - 1);
++ return (granularity + lim->alignment_offset - alignment) % granularity;
+ }
+
+ static inline int bdev_alignment_offset(struct block_device *bdev)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 31b9d29..00c88fc 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -286,6 +286,7 @@ struct hid_item {
+ #define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+ #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
+ #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
++#define HID_QUIRK_ALWAYS_POLL 0x00000400
+ #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
+ #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
+ #define HID_QUIRK_NO_INIT_REPORTS 0x20000000
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index c1b7414..0a0b024 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1123,6 +1123,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+
+ extern void truncate_pagecache(struct inode *inode, loff_t new);
+ extern void truncate_setsize(struct inode *inode, loff_t newsize);
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+ int truncate_inode_page(struct address_space *mapping, struct page *page);
+ int generic_error_remove_page(struct address_space *mapping, struct page *page);
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 435cb99..3f8144d 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -215,14 +215,12 @@ extern int of_property_read_u64(const struct device_node *np,
+ extern int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string);
+-extern int of_property_read_string_index(struct device_node *np,
+- const char *propname,
+- int index, const char **output);
+ extern int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string);
+-extern int of_property_count_strings(struct device_node *np,
+- const char *propname);
++extern int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index);
+ extern int of_device_is_compatible(const struct device_node *device,
+ const char *);
+ extern int of_device_is_available(const struct device_node *device);
+@@ -422,15 +420,9 @@ static inline int of_property_read_string(struct device_node *np,
+ return -ENOSYS;
+ }
+
+-static inline int of_property_read_string_index(struct device_node *np,
+- const char *propname, int index,
+- const char **out_string)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_count_strings(struct device_node *np,
+- const char *propname)
++static inline int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index)
+ {
+ return -ENOSYS;
+ }
+@@ -536,6 +528,70 @@ static inline struct device_node *of_find_matching_node(
+ }
+
+ /**
++ * of_property_read_string_array() - Read an array of strings from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ *
++ * Search for a property in a device tree node and retrieve a list of
++ * terminated string values (pointer to data, not a copy) in that property.
++ *
++ * If @out_strs is NULL, the number of strings in the property is returned.
++ */
++static inline int of_property_read_string_array(struct device_node *np,
++ const char *propname, const char **out_strs,
++ size_t sz)
++{
++ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
++}
++
++/**
++ * of_property_count_strings() - Find and return the number of strings from a
++ * multiple strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ *
++ * Search for a property in a device tree node and retrieve the number of null
++ * terminated string contain in it. Returns the number of strings on
++ * success, -EINVAL if the property does not exist, -ENODATA if property
++ * does not have a value, and -EILSEQ if the string is not null-terminated
++ * within the length of the property data.
++ */
++static inline int of_property_count_strings(struct device_node *np,
++ const char *propname)
++{
++ return of_property_read_string_helper(np, propname, NULL, 0, 0);
++}
++
++/**
++ * of_property_read_string_index() - Find and read a string from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @index: index of the string in the list of strings
++ * @out_string: pointer to null terminated return string, modified only if
++ * return value is 0.
++ *
++ * Search for a property in a device tree node and retrieve a null
++ * terminated string value (pointer to data, not a copy) in the list of strings
++ * contained in that property.
++ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
++ * property does not have a value, and -EILSEQ if the string is not
++ * null-terminated within the length of the property data.
++ *
++ * The out_string pointer is modified only if a valid string can be decoded.
++ */
++static inline int of_property_read_string_index(struct device_node *np,
++ const char *propname,
++ int index, const char **output)
++{
++ int rc = of_property_read_string_helper(np, propname, output, 1, index);
++ return rc < 0 ? rc : 0;
++}
++
++/**
+ * of_property_read_bool - Findfrom a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 4cd6267..17f0949 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
+ extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
++
++extern int oom_kills_count(void);
++extern void note_oom_kill(void);
+ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+diff --git a/include/linux/string.h b/include/linux/string.h
+index ac889c5..0ed878d 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+- const void *from, size_t available);
++ const void *from, size_t available);
+
+ /**
+ * strstarts - does @str start with @prefix?
+@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
+
+-extern size_t memweight(const void *ptr, size_t bytes);
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
+
+ /**
+ * kbasename - return the last part of a pathname.
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index 8097b9d..51009d2 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -340,6 +340,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
+ #define XPRT_CONNECTION_ABORT (7)
+ #define XPRT_CONNECTION_CLOSE (8)
+ #define XPRT_CONGESTED (9)
++#define XPRT_CONNECTION_REUSE (10)
+
+ static inline void xprt_set_connected(struct rpc_xprt *xprt)
+ {
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 49587dc..8b96ae2 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -33,4 +33,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 9ac6578..a60948d 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -660,6 +660,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+
++void ipv6_proxy_select_ident(struct sk_buff *skb);
++
+ int ip6_dst_hoplimit(struct dst_entry *dst);
+
+ /*
+diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
+index 87792a5..33b7395 100644
+--- a/include/uapi/drm/vmwgfx_drm.h
++++ b/include/uapi/drm/vmwgfx_drm.h
+@@ -29,7 +29,7 @@
+ #define __VMWGFX_DRM_H__
+
+ #ifndef __KERNEL__
+-#include <drm.h>
++#include <drm/drm.h>
+ #endif
+
+ #define DRM_VMW_MAX_SURFACE_FACES 6
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index aa6a8aa..8f9279b 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
+ if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
+ return false;
+
++ if (test_thread_flag(TIF_MEMDIE))
++ return false;
++
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 6716a1f..1d679a6 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1841,7 +1841,9 @@ static void free_module(struct module *mod)
+
+ /* We leave it in list to prevent duplicate loads, but make sure
+ * that noone uses it while it's being deconstructed. */
++ mutex_lock(&module_mutex);
+ mod->state = MODULE_STATE_UNFORMED;
++ mutex_unlock(&module_mutex);
+
+ /* Remove dynamic debug info */
+ ddebug_remove_module(mod->name);
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 424c2d4..77e6b83 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -634,6 +634,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ goto out;
+ }
+ } else {
++ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+ event.sigev_value.sival_int = new_timer->it_id;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 37170d4..126586a 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -492,8 +492,14 @@ int hibernation_restore(int platform_mode)
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+ error = resume_target_kernel(platform_mode);
+- dpm_resume_end(PMSG_RECOVER);
++ /*
++ * The above should either succeed and jump to the new kernel,
++ * or return with an error. Otherwise things are just
++ * undefined, so let's be paranoid.
++ */
++ BUG_ON(!error);
+ }
++ dpm_resume_end(PMSG_RECOVER);
+ pm_restore_gfp_mask();
+ ftrace_start();
+ resume_console();
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 14f9a8d..f1fe7ec 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -107,6 +107,28 @@ static int try_to_freeze_tasks(bool user_only)
+ return todo ? -EBUSY : 0;
+ }
+
++/*
++ * Returns true if all freezable tasks (except for current) are frozen already
++ */
++static bool check_frozen_processes(void)
++{
++ struct task_struct *g, *p;
++ bool ret = true;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ if (p != current && !freezer_should_skip(p) &&
++ !frozen(p)) {
++ ret = false;
++ goto done;
++ }
++ }
++done:
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++
+ /**
+ * freeze_processes - Signal user space processes to enter the refrigerator.
+ * The current thread will not be frozen. The same process that calls
+@@ -117,6 +139,7 @@ static int try_to_freeze_tasks(bool user_only)
+ int freeze_processes(void)
+ {
+ int error;
++ int oom_kills_saved;
+
+ error = __usermodehelper_disable(UMH_FREEZING);
+ if (error)
+@@ -130,12 +153,27 @@ int freeze_processes(void)
+
+ printk("Freezing user space processes ... ");
+ pm_freezing = true;
++ oom_kills_saved = oom_kills_count();
+ error = try_to_freeze_tasks(true);
+ if (!error) {
+- printk("done.");
+ __usermodehelper_set_disable_depth(UMH_DISABLED);
+ oom_killer_disable();
++
++ /*
++ * There might have been an OOM kill while we were
++ * freezing tasks and the killed task might be still
++ * on the way out so we have to double check for race.
++ */
++ if (oom_kills_count() != oom_kills_saved &&
++ !check_frozen_processes()) {
++ __usermodehelper_set_disable_depth(UMH_ENABLED);
++ printk("OOM in progress.");
++ error = -EBUSY;
++ goto done;
++ }
++ printk("done.");
+ }
++done:
+ printk("\n");
+ BUG_ON(in_atomic());
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 677ebad..9a3f3c4 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1895,6 +1895,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
+ #ifdef CONFIG_SMP
+ inline struct dl_bw *dl_bw_of(int i)
+ {
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
+ return &cpu_rq(i)->rd->dl_bw;
+ }
+
+@@ -1903,6 +1905,8 @@ static inline int dl_bw_cpus(int i)
+ struct root_domain *rd = cpu_rq(i)->rd;
+ int cpus = 0;
+
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
+ for_each_cpu_and(i, rd->span, cpu_active_mask)
+ cpus++;
+
+@@ -3937,13 +3941,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+ * root_domain.
+ */
+ #ifdef CONFIG_SMP
+- if (task_has_dl_policy(p)) {
+- const struct cpumask *span = task_rq(p)->rd->span;
+-
+- if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
++ if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
++ rcu_read_lock();
++ if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
+ retval = -EBUSY;
++ rcu_read_unlock();
+ goto out_unlock;
+ }
++ rcu_read_unlock();
+ }
+ #endif
+ again:
+@@ -7458,6 +7463,8 @@ static int sched_dl_global_constraints(void)
+ int cpu, ret = 0;
+ unsigned long flags;
+
++ rcu_read_lock();
++
+ /*
+ * Here we want to check the bandwidth not being set to some
+ * value smaller than the currently allocated bandwidth in
+@@ -7479,6 +7486,8 @@ static int sched_dl_global_constraints(void)
+ break;
+ }
+
++ rcu_read_unlock();
++
+ return ret;
+ }
+
+@@ -7494,6 +7503,7 @@ static void sched_dl_do_global(void)
+ if (global_rt_runtime() != RUNTIME_INF)
+ new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
++ rcu_read_lock();
+ /*
+ * FIXME: As above...
+ */
+@@ -7504,6 +7514,7 @@ static void sched_dl_do_global(void)
+ dl_b->bw = new_bw;
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ }
++ rcu_read_unlock();
+ }
+
+ static int sched_rt_global_validate(void)
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index 759d5e0..7e3cd7a 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ int syscall_nr;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
+ return;
+@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
+ return;
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 06f7e4f..e5c4ebe 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++ dst[k] = lower >> rem;
++ if (rem)
++ dst[k] |= upper << (BITS_PER_LONG - rem);
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
++ dst[k + off] = upper << rem;
++ if (rem)
++ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+diff --git a/lib/string.c b/lib/string.c
+index e5878de..43d0781 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ * keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++ memset(s, 0, count);
++ OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+ * memcpy - Copy one area of memory to another
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 718bfa1..331faa5 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -199,7 +199,7 @@ retry:
+ preempt_disable();
+ if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+ preempt_enable();
+- __free_page(zero_page);
++ __free_pages(zero_page, compound_order(zero_page));
+ goto retry;
+ }
+
+@@ -231,7 +231,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
+ if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ struct page *zero_page = xchg(&huge_zero_page, NULL);
+ BUG_ON(zero_page == NULL);
+- __free_page(zero_page);
++ __free_pages(zero_page, compound_order(zero_page));
+ return HPAGE_PMD_NR;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9b35da2..b58d4fb 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -292,6 +292,9 @@ struct mem_cgroup {
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
++ /* css_online() has been completed */
++ int initialized;
++
+ /*
+ * the counter to account for mem+swap usage.
+ */
+@@ -1127,9 +1130,21 @@ skip_node:
+ * skipping css reference should be safe.
+ */
+ if (next_css) {
+- if ((next_css == &root->css) ||
+- ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
+- return mem_cgroup_from_css(next_css);
++ struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
++
++ if (next_css == &root->css)
++ return memcg;
++
++ if (css_tryget(next_css)) {
++ /*
++ * Make sure the memcg is initialized:
++ * mem_cgroup_css_online() orders the the
++ * initialization against setting the flag.
++ */
++ if (smp_load_acquire(&memcg->initialized))
++ return memcg;
++ css_put(next_css);
++ }
+
+ prev_css = next_css;
+ goto skip_node;
+@@ -6538,6 +6553,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ {
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
++ int ret;
+
+ if (css->cgroup->id > MEM_CGROUP_ID_MAX)
+ return -ENOSPC;
+@@ -6574,7 +6590,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
+ }
+ mutex_unlock(&memcg_create_mutex);
+
+- return memcg_init_kmem(memcg, &mem_cgroup_subsys);
++ ret = memcg_init_kmem(memcg, &mem_cgroup_subsys);
++ if (ret)
++ return ret;
++
++ /*
++ * Make sure the memcg is initialized: mem_cgroup_iter()
++ * orders reading memcg->initialized against its callers
++ * reading the memcg members.
++ */
++ smp_store_release(&memcg->initialized, 1);
++
++ return 0;
+ }
+
+ /*
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 3291e82..171c00f 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -406,6 +406,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ dump_tasks(memcg, nodemask);
+ }
+
++/*
++ * Number of OOM killer invocations (including memcg OOM killer).
++ * Primarily used by PM freezer to check for potential races with
++ * OOM killed frozen task.
++ */
++static atomic_t oom_kills = ATOMIC_INIT(0);
++
++int oom_kills_count(void)
++{
++ return atomic_read(&oom_kills);
++}
++
++void note_oom_kill(void)
++{
++ atomic_inc(&oom_kills);
++}
++
+ #define K(x) ((x) << (PAGE_SHIFT-10))
+ /*
+ * Must be called while holding a reference to p, which will be released upon
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ff0f6b1..7b2611a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1957,7 +1957,7 @@ zonelist_scan:
+ if (alloc_flags & ALLOC_FAIR) {
+ if (!zone_local(preferred_zone, zone))
+ continue;
+- if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
++ if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0)
+ continue;
+ }
+ /*
+@@ -2196,6 +2196,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ }
+
+ /*
++ * PM-freezer should be notified that there might be an OOM killer on
++ * its way to kill and wake somebody up. This is too early and we might
++ * end up not killing anything but false positives are acceptable.
++ * See freeze_processes.
++ */
++ note_oom_kill();
++
++ /*
+ * Go through the zonelist yet one more time, keep very high watermark
+ * here, this is only to catch a parallel oom killing, we must fail if
+ * we're still under heavy pressure.
+@@ -5662,9 +5670,8 @@ static void __setup_per_zone_wmarks(void)
+ zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH,
+- high_wmark_pages(zone) -
+- low_wmark_pages(zone) -
+- zone_page_state(zone, NR_ALLOC_BATCH));
++ high_wmark_pages(zone) - low_wmark_pages(zone) -
++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
+
+ setup_zone_migrate_reserve(zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
+index cfd1628..0e9a319 100644
+--- a/mm/page_cgroup.c
++++ b/mm/page_cgroup.c
+@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
+ sizeof(struct page_cgroup) * PAGES_PER_SECTION;
+
+ BUG_ON(PageReserved(page));
++ kmemleak_free(addr);
+ free_pages_exact(addr, table_size);
+ }
+ }
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 8cd4308..a2a54a8 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1917,8 +1917,6 @@ void __init setup_per_cpu_areas(void)
+
+ if (pcpu_setup_first_chunk(ai, fc) < 0)
+ panic("Failed to initialize percpu areas.");
+-
+- pcpu_free_alloc_info(ai);
+ }
+
+ #endif /* CONFIG_SMP */
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 353b683..ac18edc 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -20,6 +20,7 @@
+ #include <linux/buffer_head.h> /* grr. try_to_release_page,
+ do_invalidatepage */
+ #include <linux/cleancache.h>
++#include <linux/rmap.h>
+ #include "internal.h"
+
+
+@@ -613,12 +614,67 @@ EXPORT_SYMBOL(truncate_pagecache);
+ */
+ void truncate_setsize(struct inode *inode, loff_t newsize)
+ {
++ loff_t oldsize = inode->i_size;
++
+ i_size_write(inode, newsize);
++ if (newsize > oldsize)
++ pagecache_isize_extended(inode, oldsize, newsize);
+ truncate_pagecache(inode, newsize);
+ }
+ EXPORT_SYMBOL(truncate_setsize);
+
+ /**
++ * pagecache_isize_extended - update pagecache after extension of i_size
++ * @inode: inode for which i_size was extended
++ * @from: original inode size
++ * @to: new inode size
++ *
++ * Handle extension of inode size either caused by extending truncate or by
++ * write starting after current i_size. We mark the page straddling current
++ * i_size RO so that page_mkwrite() is called on the nearest write access to
++ * the page. This way filesystem can be sure that page_mkwrite() is called on
++ * the page before user writes to the page via mmap after the i_size has been
++ * changed.
++ *
++ * The function must be called after i_size is updated so that page fault
++ * coming after we unlock the page will already see the new i_size.
++ * The function must be called while we still hold i_mutex - this not only
++ * makes sure i_size is stable but also that userspace cannot observe new
++ * i_size value before we are prepared to store mmap writes at new inode size.
++ */
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
++{
++ int bsize = 1 << inode->i_blkbits;
++ loff_t rounded_from;
++ struct page *page;
++ pgoff_t index;
++
++ WARN_ON(to > inode->i_size);
++
++ if (from >= to || bsize == PAGE_CACHE_SIZE)
++ return;
++ /* Page straddling @from will not have any hole block created? */
++ rounded_from = round_up(from, bsize);
++ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
++ return;
++
++ index = from >> PAGE_CACHE_SHIFT;
++ page = find_lock_page(inode->i_mapping, index);
++ /* Page not cached? Nothing to do */
++ if (!page)
++ return;
++ /*
++ * See clear_page_dirty_for_io() for details why set_page_dirty()
++ * is needed.
++ */
++ if (page_mkclean(page))
++ set_page_dirty(page);
++ unlock_page(page);
++ page_cache_release(page);
++}
++EXPORT_SYMBOL(pagecache_isize_extended);
++
++/**
+ * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
+ * @inode: inode
+ * @lstart: offset of beginning of hole
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 0a31298..2e87eec 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -291,7 +291,11 @@ int ceph_msgr_init(void)
+ if (ceph_msgr_slab_init())
+ return -ENOMEM;
+
+- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
++ /*
++ * The number of active work items is limited by the number of
++ * connections, so leave @max_active at default.
++ */
++ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
+ if (ceph_msgr_wq)
+ return 0;
+
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 9d43468..017fa5e 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -535,7 +535,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
+ return 1;
+
+ attrlen = rtnh_attrlen(rtnh);
+- if (attrlen < 0) {
++ if (attrlen > 0) {
+ struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+ nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 2d24f29..8c8493e 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -50,7 +50,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+
+- ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
++ ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
+ if (unlikely(ghl < sizeof(*greh)))
+ goto out;
+
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index ed88d78..844323b 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1487,6 +1487,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ struct sk_buff *nskb;
+ struct sock *sk;
+ struct inet_sock *inet;
++ int err;
+
+ if (ip_options_echo(&replyopts.opt.opt, skb))
+ return;
+@@ -1525,8 +1526,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ sock_net_set(sk, net);
+ __skb_queue_head_init(&sk->sk_write_queue);
+ sk->sk_sndbuf = sysctl_wmem_default;
+- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
+- &ipc, &rt, MSG_DONTWAIT);
++ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
++ len, 0, &ipc, &rt, MSG_DONTWAIT);
++ if (unlikely(err)) {
++ ip_flush_pending_frames(sk);
++ goto out;
++ }
++
+ nskb = skb_peek(&sk->sk_write_queue);
+ if (nskb) {
+ if (arg->csumoffset >= 0)
+@@ -1538,7 +1544,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
+ ip_push_pending_frames(sk, &fl4);
+ }
+-
++out:
+ put_cpu_var(unicast_sock);
+
+ ip_rt_put(rt);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index 65b664d..791a419 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ skb_pull_rcsum(skb, hdr_len);
+
+ if (inner_proto == htons(ETH_P_TEB)) {
+- struct ethhdr *eh = (struct ethhdr *)skb->data;
++ struct ethhdr *eh;
+
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ return -ENOMEM;
+
++ eh = (struct ethhdr *)skb->data;
+ if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+ skb->protocol = eh->h_proto;
+ else
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index f7d71ec..29d240b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2954,61 +2954,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
+ #endif
+
+ #ifdef CONFIG_TCP_MD5SIG
+-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
++static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
+ static DEFINE_MUTEX(tcp_md5sig_mutex);
+-
+-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+-
+- if (p->md5_desc.tfm)
+- crypto_free_hash(p->md5_desc.tfm);
+- }
+- free_percpu(pool);
+-}
++static bool tcp_md5sig_pool_populated = false;
+
+ static void __tcp_alloc_md5sig_pool(void)
+ {
+ int cpu;
+- struct tcp_md5sig_pool __percpu *pool;
+-
+- pool = alloc_percpu(struct tcp_md5sig_pool);
+- if (!pool)
+- return;
+
+ for_each_possible_cpu(cpu) {
+- struct crypto_hash *hash;
+-
+- hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+- if (IS_ERR_OR_NULL(hash))
+- goto out_free;
++ if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
++ struct crypto_hash *hash;
+
+- per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
++ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR_OR_NULL(hash))
++ return;
++ per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
++ }
+ }
+- /* before setting tcp_md5sig_pool, we must commit all writes
+- * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
++ /* before setting tcp_md5sig_pool_populated, we must commit all writes
++ * to memory. See smp_rmb() in tcp_get_md5sig_pool()
+ */
+ smp_wmb();
+- tcp_md5sig_pool = pool;
+- return;
+-out_free:
+- __tcp_free_md5sig_pool(pool);
++ tcp_md5sig_pool_populated = true;
+ }
+
+ bool tcp_alloc_md5sig_pool(void)
+ {
+- if (unlikely(!tcp_md5sig_pool)) {
++ if (unlikely(!tcp_md5sig_pool_populated)) {
+ mutex_lock(&tcp_md5sig_mutex);
+
+- if (!tcp_md5sig_pool)
++ if (!tcp_md5sig_pool_populated)
+ __tcp_alloc_md5sig_pool();
+
+ mutex_unlock(&tcp_md5sig_mutex);
+ }
+- return tcp_md5sig_pool != NULL;
++ return tcp_md5sig_pool_populated;
+ }
+ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+@@ -3022,13 +3003,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+ */
+ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
+ {
+- struct tcp_md5sig_pool __percpu *p;
+-
+ local_bh_disable();
+- p = ACCESS_ONCE(tcp_md5sig_pool);
+- if (p)
+- return __this_cpu_ptr(p);
+
++ if (tcp_md5sig_pool_populated) {
++ /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
++ smp_rmb();
++ return this_cpu_ptr(&tcp_md5sig_pool);
++ }
+ local_bh_enable();
+ return NULL;
+ }
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 798eb0f..ae4a06b 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -3,10 +3,43 @@
+ * not configured or static. These functions are needed by GSO/GRO implementation.
+ */
+ #include <linux/export.h>
++#include <net/ip.h>
+ #include <net/ipv6.h>
+ #include <net/ip6_fib.h>
+ #include <net/addrconf.h>
+
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ *
++ * The network header must be set before calling this.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++ static u32 ip6_proxy_idents_hashrnd __read_mostly;
++ struct in6_addr buf[2];
++ struct in6_addr *addrs;
++ u32 hash, id;
++
++ addrs = skb_header_pointer(skb,
++ skb_network_offset(skb) +
++ offsetof(struct ipv6hdr, saddr),
++ sizeof(buf), buf);
++ if (!addrs)
++ return;
++
++ net_get_random_once(&ip6_proxy_idents_hashrnd,
++ sizeof(ip6_proxy_idents_hashrnd));
++
++ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++ hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++ id = ip_idents_reserve(hash, 1);
++ skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 22b223f..74350c3 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -462,7 +462,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
+ */
+ if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ u32 basic_rates = vif->bss_conf.basic_rates;
+- s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
++ s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
+
+ rate = &sband->bitrates[rates[0].idx];
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index c375d73..7c177bc 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -707,7 +707,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ * after validation, the socket and the ring may only be used by a
+ * single process, otherwise we fall back to copying.
+ */
+- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
++ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+ atomic_read(&nlk->mapped) > 1)
+ excl = false;
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 3ea5cda..5ff8b87 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -533,6 +533,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
+
+ if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
+ clnt->cl_autobind = 1;
++ if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
++ clnt->cl_noretranstimeo = 1;
+ if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
+ clnt->cl_discrtry = 1;
+ if (!(args->flags & RPC_CLNT_CREATE_QUIET))
+@@ -571,6 +573,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
+ /* Turn off autobind on clones */
+ new->cl_autobind = 0;
+ new->cl_softrtry = clnt->cl_softrtry;
++ new->cl_noretranstimeo = clnt->cl_noretranstimeo;
+ new->cl_discrtry = clnt->cl_discrtry;
+ new->cl_chatty = clnt->cl_chatty;
+ return new;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 0addefc..41c2f9d 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -842,6 +842,8 @@ static void xs_error_report(struct sock *sk)
+ dprintk("RPC: xs_error_report client %p, error=%d...\n",
+ xprt, -err);
+ trace_rpc_socket_error(xprt, sk->sk_socket, err);
++ if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state))
++ goto out;
+ xprt_wake_pending_tasks(xprt, err);
+ out:
+ read_unlock_bh(&sk->sk_callback_lock);
+@@ -2251,7 +2253,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
+ &xprt->state);
+ /* "close" the socket, preserving the local port */
++ set_bit(XPRT_CONNECTION_REUSE, &xprt->state);
+ xs_tcp_reuse_connection(transport);
++ clear_bit(XPRT_CONNECTION_REUSE, &xprt->state);
+
+ if (abort_and_exit)
+ goto out_eagain;
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 3c5cbb9..7e71e06 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -269,6 +269,13 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ goto out;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
++ if (evm_status == INTEGRITY_NOXATTRS) {
++ struct integrity_iint_cache *iint;
++
++ iint = integrity_iint_find(dentry->d_inode);
++ if (iint && (iint->flags & IMA_NEW_FILE))
++ return 0;
++ }
+ out:
+ if (evm_status != INTEGRITY_PASS)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+@@ -296,9 +303,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ {
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+- && (xattr_data->type == EVM_XATTR_HMAC))
+- return -EPERM;
++ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
++ if (!xattr_value_len)
++ return -EINVAL;
++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
++ return -EPERM;
++ }
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ }
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index e294b86..47b5c69 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -470,6 +470,7 @@ next_inode:
+ list_entry(sbsec->isec_head.next,
+ struct inode_security_struct, list);
+ struct inode *inode = isec->inode;
++ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+ inode = igrab(inode);
+ if (inode) {
+@@ -478,7 +479,6 @@ next_inode:
+ iput(inode);
+ }
+ spin_lock(&sbsec->isec_lock);
+- list_del_init(&isec->list);
+ goto next_inode;
+ }
+ spin_unlock(&sbsec->isec_lock);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index af49721..c4ac3c1 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -206,6 +206,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
+ if (err < 0)
+ return err;
+
++ if (clear_user(src, sizeof(*src)))
++ return -EFAULT;
+ if (put_user(status.state, &src->state) ||
+ compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ compat_put_timespec(&status.tstamp, &src->tstamp) ||
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7ec9142..103e85a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -4027,6 +4027,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ /* BayTrail */
+ { PCI_DEVICE(0x8086, 0x0f04),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
++ /* Braswell */
++ { PCI_DEVICE(0x8086, 0x2284),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ /* ICH */
+ { PCI_DEVICE(0x8086, 0x2668),
+ .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 8253b48..611110a 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3317,6 +3317,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
++{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -3373,6 +3374,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862807");
+ MODULE_ALIAS("snd-hda-codec-id:80862808");
+ MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:80862882");
++MODULE_ALIAS("snd-hda-codec-id:80862883");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+
+ MODULE_LICENSE("GPL");
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index eb241c6..fd53d37 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -1121,6 +1121,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
+ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
+ {
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
++ unsigned int pll_c, pll_d;
+ int ret;
+
+ if (power) {
+@@ -1138,6 +1139,18 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
+ /* Sync reg_cache with the hardware */
+ regcache_cache_only(aic3x->regmap, false);
+ regcache_sync(aic3x->regmap);
++
++ /* Rewrite paired PLL D registers in case cached sync skipped
++ * writing one of them and thus caused other one also not
++ * being written
++ */
++ pll_c = snd_soc_read(codec, AIC3X_PLL_PROGC_REG);
++ pll_d = snd_soc_read(codec, AIC3X_PLL_PROGD_REG);
++ if (pll_c == aic3x_reg[AIC3X_PLL_PROGC_REG].def ||
++ pll_d == aic3x_reg[AIC3X_PLL_PROGD_REG].def) {
++ snd_soc_write(codec, AIC3X_PLL_PROGC_REG, pll_c);
++ snd_soc_write(codec, AIC3X_PLL_PROGD_REG, pll_d);
++ }
+ } else {
+ /*
+ * Do soft reset to this codec instance in order to clear
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 731d47b..e4da224 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -689,9 +689,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ int shared;
+ struct snd_kcontrol *kcontrol;
+ bool wname_in_long_name, kcname_in_long_name;
+- char *long_name;
++ char *long_name = NULL;
+ const char *name;
+- int ret;
++ int ret = 0;
+
+ if (dapm->codec)
+ prefix = dapm->codec->name_prefix;
+@@ -756,15 +756,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+
+ kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
+ prefix);
+- kfree(long_name);
+- if (!kcontrol)
+- return -ENOMEM;
++ if (!kcontrol) {
++ ret = -ENOMEM;
++ goto exit_free;
++ }
++
+ kcontrol->private_free = dapm_kcontrol_free;
+
+ ret = dapm_kcontrol_data_alloc(w, kcontrol);
+ if (ret) {
+ snd_ctl_free_one(kcontrol);
+- return ret;
++ goto exit_free;
+ }
+
+ ret = snd_ctl_add(card, kcontrol);
+@@ -772,17 +774,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ dev_err(dapm->dev,
+ "ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
+ w->name, name, ret);
+- return ret;
++ goto exit_free;
+ }
+ }
+
+ ret = dapm_kcontrol_add_widget(kcontrol, w);
+- if (ret)
+- return ret;
++ if (ret == 0)
++ w->kcontrols[kci] = kcontrol;
+
+- w->kcontrols[kci] = kcontrol;
++exit_free:
++ kfree(long_name);
+
+- return 0;
++ return ret;
+ }
+
+ /* create new dapm mixer control */
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index af19560..ab433a0 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -586,18 +586,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ {
+ struct snd_card *card;
+ struct list_head *p;
++ bool was_shutdown;
+
+ if (chip == (void *)-1L)
+ return;
+
+ card = chip->card;
+ down_write(&chip->shutdown_rwsem);
++ was_shutdown = chip->shutdown;
+ chip->shutdown = 1;
+ up_write(&chip->shutdown_rwsem);
+
+ mutex_lock(&register_mutex);
+- chip->num_interfaces--;
+- if (chip->num_interfaces <= 0) {
++ if (!was_shutdown) {
+ struct snd_usb_endpoint *ep;
+
+ snd_card_disconnect(card);
+@@ -617,6 +618,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ list_for_each(p, &chip->mixer_list) {
+ snd_usb_mixer_disconnect(p);
+ }
++ }
++
++ chip->num_interfaces--;
++ if (chip->num_interfaces <= 0) {
+ usb_chip[chip->index] = NULL;
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 714b949..1f0dc1e 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages);
+
+ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+- unsigned long size)
++ unsigned long npages)
+ {
+ gfn_t end_gfn;
+ pfn_t pfn;
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+- end_gfn = gfn + (size >> PAGE_SHIFT);
++ end_gfn = gfn + npages;
+ gfn += 1;
+
+ if (is_error_noslot_pfn(pfn))
+@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
+ */
+- pfn = kvm_pin_pages(slot, gfn, page_size);
++ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
+ if (is_error_noslot_pfn(pfn)) {
+ gfn += 1;
+ continue;
+@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
+- kvm_unpin_pages(kvm, pfn, page_size);
++ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ goto unmap_pages;
+ }
+
diff --git a/3.14.23/4420_grsecurity-3.0-3.14.23-201411062033.patch b/3.14.24/4420_grsecurity-3.0-3.14.24-201411150026.patch
index 399d2be..b8fbeb3 100644
--- a/3.14.23/4420_grsecurity-3.0-3.14.23-201411062033.patch
+++ b/3.14.24/4420_grsecurity-3.0-3.14.24-201411150026.patch
@@ -292,7 +292,7 @@ index 7116fda..2f71588 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 135a04a..79b5e32 100644
+index 8fd0610..914c673 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -4827,6 +4827,19 @@ index 6c0f684..5faea9d 100644
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
+diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
+index 6e0ed93..c17967f 100644
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
+ sub x1, x1, #2
+ 4: adds x1, x1, #1
+ b.mi 5f
+- strb wzr, [x0]
++USER(9f, strb wzr, [x0] )
+ 5: mov x0, #0
+ ret
+ ENDPROC(__clear_user)
diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
index c3a58a1..78fbf54 100644
--- a/arch/avr32/include/asm/cache.h
@@ -12341,7 +12354,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e409891..8ec65be 100644
+index 98aa930..d2cef74 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
@@ -14500,7 +14513,7 @@ index 2206757..85cbcfa 100644
err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4299eb0..c0687a7 100644
+index 92a2e93..9b829fa 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -15,8 +15,10 @@
@@ -14578,7 +14591,7 @@ index 4299eb0..c0687a7 100644
movl %ebp,%ebp /* zero extension */
pushq_cfi $__USER32_DS
/*CFI_REL_OFFSET ss,0*/
-@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
+@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rsp,0
pushfq_cfi
/*CFI_REL_OFFSET rflags,0*/
@@ -14620,20 +14633,27 @@ index 4299eb0..c0687a7 100644
1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_CLOSE_USERLAND
+#endif
+
+ /*
+ * Sysenter doesn't filter flags, so we need to clear NT
+ * ourselves. To save a few cycles, we can check whether
+@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
+ jnz sysenter_fix_flags
+ sysenter_flags_fixed:
+
+- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ GET_THREAD_INFO(%r11)
+ orl $TS_COMPAT,TI_status(%r11)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,15 +209,18 @@ sysenter_do_call:
+@@ -172,15 +218,18 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -14656,7 +14676,7 @@ index 4299eb0..c0687a7 100644
CFI_REGISTER rip,rdx
RESTORE_ARGS 0,24,0,0,0,0
xorq %r8,%r8
-@@ -193,6 +243,9 @@ sysexit_from_sys_call:
+@@ -205,6 +254,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call __audit_syscall_entry
@@ -14666,7 +14686,7 @@ index 4299eb0..c0687a7 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -204,7 +257,7 @@ sysexit_from_sys_call:
+@@ -216,7 +268,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -14675,7 +14695,7 @@ index 4299eb0..c0687a7 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -215,11 +268,12 @@ sysexit_from_sys_call:
+@@ -227,11 +279,12 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
call __audit_syscall_exit
@@ -14689,7 +14709,7 @@ index 4299eb0..c0687a7 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -237,7 +291,7 @@ sysexit_audit:
+@@ -253,7 +306,7 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -14698,7 +14718,7 @@ index 4299eb0..c0687a7 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -249,6 +303,9 @@ sysenter_tracesys:
+@@ -265,6 +318,9 @@ sysenter_tracesys:
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
@@ -14708,7 +14728,7 @@ index 4299eb0..c0687a7 100644
jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target)
-@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
+@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -14736,7 +14756,7 @@ index 4299eb0..c0687a7 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
+@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -14764,7 +14784,7 @@ index 4299eb0..c0687a7 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -319,13 +395,16 @@ cstar_do_call:
+@@ -335,13 +410,16 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -14784,7 +14804,7 @@ index 4299eb0..c0687a7 100644
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d
-@@ -352,7 +431,7 @@ sysretl_audit:
+@@ -368,7 +446,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -14793,7 +14813,7 @@ index 4299eb0..c0687a7 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -366,11 +445,19 @@ cstar_tracesys:
+@@ -382,11 +460,19 @@ cstar_tracesys:
xchgl %ebp,%r9d
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
@@ -14813,7 +14833,7 @@ index 4299eb0..c0687a7 100644
movq $-EFAULT,%rax
jmp ia32_sysret
CFI_ENDPROC
-@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
+@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -14847,7 +14867,7 @@ index 4299eb0..c0687a7 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -442,6 +536,9 @@ ia32_tracesys:
+@@ -458,6 +551,9 @@ ia32_tracesys:
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
@@ -16612,22 +16632,10 @@ index ced283a..ffe04cc 100644
union {
u64 v64;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index 9c999c1..5718a82 100644
+index 01f15b2..5718a82 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
-@@ -155,8 +155,9 @@ do { \
- #define elf_check_arch(x) \
- ((x)->e_machine == EM_X86_64)
-
--#define compat_elf_check_arch(x) \
-- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
-+#define compat_elf_check_arch(x) \
-+ (elf_check_arch_ia32(x) || \
-+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
-
- #if __USER32_DS != __USER_DS
- # error "The following code assumes __USER32_DS == __USER_DS"
-@@ -243,7 +244,25 @@ extern int force_personality32;
+@@ -244,7 +244,25 @@ extern int force_personality32;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -16653,7 +16661,7 @@ index 9c999c1..5718a82 100644
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
-@@ -296,16 +315,12 @@ do { \
+@@ -297,16 +315,12 @@ do { \
#define ARCH_DLINFO \
do { \
@@ -16672,7 +16680,7 @@ index 9c999c1..5718a82 100644
} while (0)
#define AT_SYSINFO 32
-@@ -320,7 +335,7 @@ else \
+@@ -321,7 +335,7 @@ else \
#endif /* !CONFIG_X86_32 */
@@ -16681,7 +16689,7 @@ index 9c999c1..5718a82 100644
#define VDSO_ENTRY \
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
-@@ -336,9 +351,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
+@@ -337,9 +351,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -20729,7 +20737,7 @@ index df94598..f3b29bf 100644
bp_int3_handler = handler;
bp_int3_addr = (u8 *)addr + sizeof(int3);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index 7f26c9a..694544e 100644
+index 523f147..7b996e0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -198,7 +198,7 @@ int first_system_vector = 0xfe;
@@ -21105,7 +21113,7 @@ index c67ffa6..f41fbbf 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 8e28bf2..bf5c0d2 100644
+index 3f27f5f..6c575e3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
@@ -27163,7 +27171,7 @@ index 5cdff03..80fa283 100644
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 9e5de68..147c254 100644
+index b88fc86..99a7057 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
@@ -27952,7 +27960,7 @@ index 57409f6..b505597 100644
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index e0d1d7a..db035d4 100644
+index de02906..7353850 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
@@ -28435,7 +28443,7 @@ index e48b674..a451dd9 100644
.read = native_io_apic_read,
.write = native_io_apic_write,
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index a4b451c..8dfe1ad 100644
+index dd50e26..6e07dc3 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
@@ -28477,7 +28485,7 @@ index a4b451c..8dfe1ad 100644
if (use_xsave())
err = xsave_user(buf);
else if (use_fxsr())
-@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+@@ -309,6 +310,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
*/
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
{
@@ -28563,7 +28571,7 @@ index cba218a..1cc1bed 100644
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 2de1bc0..22251ee 100644
+index 9643eda6..c9cb765 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3508,7 +3508,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
@@ -28590,7 +28598,7 @@ index 2de1bc0..22251ee 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 3927528..cd7f2ac 100644
+index 0c90f4b..9fca4d7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -441,6 +441,7 @@ struct vcpu_vmx {
@@ -28648,7 +28656,7 @@ index 3927528..cd7f2ac 100644
{
u64 host_tsc, tsc_offset;
-@@ -3024,8 +3033,11 @@ static __init int hardware_setup(void)
+@@ -3027,8 +3036,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -28662,7 +28670,7 @@ index 3927528..cd7f2ac 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3036,13 +3048,15 @@ static __init int hardware_setup(void)
+@@ -3039,13 +3051,15 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -28682,7 +28690,7 @@ index 3927528..cd7f2ac 100644
if (nested)
nested_vmx_setup_ctls_msrs();
-@@ -4162,10 +4176,17 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4165,10 +4179,17 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
u32 low32, high32;
unsigned long tmpl;
struct desc_ptr dt;
@@ -28701,7 +28709,7 @@ index 3927528..cd7f2ac 100644
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64
-@@ -4187,7 +4208,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4190,7 +4211,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -28710,7 +28718,7 @@ index 3927528..cd7f2ac 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -7186,7 +7207,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+@@ -7196,7 +7217,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -28719,7 +28727,7 @@ index 3927528..cd7f2ac 100644
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
-@@ -7207,6 +7228,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7217,6 +7238,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
@@ -28732,7 +28740,7 @@ index 3927528..cd7f2ac 100644
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
-@@ -7265,6 +7292,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7275,6 +7302,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -28745,7 +28753,7 @@ index 3927528..cd7f2ac 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -7317,6 +7350,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7327,6 +7360,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -28757,7 +28765,7 @@ index 3927528..cd7f2ac 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -7330,7 +7368,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7340,7 +7378,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -28766,7 +28774,7 @@ index 3927528..cd7f2ac 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -7339,8 +7377,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7349,8 +7387,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -28788,10 +28796,10 @@ index 3927528..cd7f2ac 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 8fbd1a7..e046eef 100644
+index 51c2851..394306f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1776,8 +1776,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1806,8 +1806,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -28802,7 +28810,7 @@ index 8fbd1a7..e046eef 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2688,6 +2688,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2718,6 +2718,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -28811,7 +28819,16 @@ index 8fbd1a7..e046eef 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5502,7 +5504,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -4911,7 +4913,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+
+ ++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
+- if (!is_guest_mode(vcpu)) {
++ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+@@ -5532,7 +5534,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -33416,7 +33433,7 @@ index 461bc82..4e091a3 100644
struct split_state {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index a348868..3c64310 100644
+index fed892d..e380153 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
@@ -36467,7 +36484,7 @@ index dc51f46..d5446a8 100644
(u8 *) pte, count) < count) {
kfree(pte);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index 2648797..92ed21f 100644
+index 4044cf7..555ae4e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
@@ -38476,7 +38493,7 @@ index 0e06f0c..d98cde3 100644
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
-index 89c497c..9c736ae 100644
+index 04a14e0..5b8f0aa 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
@@ -39381,7 +39398,7 @@ index 8320abd..ec48108 100644
if (cmd != SIOCWANDEV)
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 429b75b..58488cc 100644
+index 8a64dbe..58488cc 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -284,9 +284,6 @@
@@ -39427,35 +39444,6 @@ index 429b75b..58488cc 100644
unsigned int add =
((pool_size - entropy_count)*anfrac*3) >> s;
-@@ -1063,8 +1060,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
- * pool while mixing, and hash one final time.
- */
- sha_transform(hash.w, extract, workspace);
-- memset(extract, 0, sizeof(extract));
-- memset(workspace, 0, sizeof(workspace));
-+ memzero_explicit(extract, sizeof(extract));
-+ memzero_explicit(workspace, sizeof(workspace));
-
- /*
- * In case the hash function has some recognizable output
-@@ -1076,7 +1073,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
-- memset(&hash, 0, sizeof(hash));
-+ memzero_explicit(&hash, sizeof(hash));
- }
-
- static ssize_t extract_entropy(struct entropy_store *r, void *buf,
-@@ -1124,7 +1121,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- }
-
- /* Wipe data just returned from memory */
-- memset(tmp, 0, sizeof(tmp));
-+ memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
- }
@@ -1151,7 +1148,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
@@ -39465,15 +39453,6 @@ index 429b75b..58488cc 100644
ret = -EFAULT;
break;
}
-@@ -1162,7 +1159,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
- }
-
- /* Wipe data just returned from memory */
-- memset(tmp, 0, sizeof(tmp));
-+ memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
- }
@@ -1507,7 +1504,7 @@ EXPORT_SYMBOL(generate_random_uuid);
#include <linux/sysctl.h>
@@ -39705,10 +39684,10 @@ index 18448a7..d5fad43 100644
/* Force all MSRs to the same value */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 4159236..b850472 100644
+index 4854f81..d9178cb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -1974,7 +1974,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+@@ -1985,7 +1985,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
#endif
mutex_lock(&cpufreq_governor_mutex);
@@ -39717,7 +39696,7 @@ index 4159236..b850472 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2204,7 +2204,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+@@ -2215,7 +2215,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -39726,7 +39705,7 @@ index 4159236..b850472 100644
.notifier_call = cpufreq_cpu_callback,
};
-@@ -2244,13 +2244,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2255,13 +2255,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -39746,7 +39725,7 @@ index 4159236..b850472 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n", __func__,
-@@ -2304,8 +2308,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2315,8 +2319,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -39760,7 +39739,7 @@ index 4159236..b850472 100644
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
-@@ -2320,8 +2327,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2331,8 +2338,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
@@ -39862,10 +39841,10 @@ index 18d4091..434be15 100644
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index ae52c77..3d8f69b 100644
+index 533a509..4e1860b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
-@@ -125,10 +125,10 @@ struct pstate_funcs {
+@@ -138,10 +138,10 @@ struct pstate_funcs {
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -39878,7 +39857,7 @@ index ae52c77..3d8f69b 100644
struct perf_limits {
int no_turbo;
-@@ -530,7 +530,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+@@ -566,7 +566,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu->pstate.current_pstate = pstate;
@@ -39887,16 +39866,18 @@ index ae52c77..3d8f69b 100644
}
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
-@@ -552,12 +552,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+@@ -588,13 +588,13 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
sprintf(cpu->name, "Intel 2nd generation core");
- cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+- cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.min_pstate = pstate_funcs->get_min();
+ cpu->pstate.max_pstate = pstate_funcs->get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
++ cpu->pstate.scaling = pstate_funcs->get_scaling();
- if (pstate_funcs.get_vid)
- pstate_funcs.get_vid(cpu);
@@ -39905,7 +39886,7 @@ index ae52c77..3d8f69b 100644
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-@@ -844,9 +844,9 @@ static int intel_pstate_msrs_not_valid(void)
+@@ -889,9 +889,9 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -39918,7 +39899,7 @@ index ae52c77..3d8f69b 100644
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
-@@ -860,7 +860,7 @@ static int intel_pstate_msrs_not_valid(void)
+@@ -905,7 +905,7 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
@@ -39927,13 +39908,14 @@ index ae52c77..3d8f69b 100644
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.p_gain_pct = policy->p_gain_pct;
-@@ -872,11 +872,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
+@@ -917,12 +917,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
static void copy_cpu_funcs(struct pstate_funcs *funcs)
{
- pstate_funcs.get_max = funcs->get_max;
- pstate_funcs.get_min = funcs->get_min;
- pstate_funcs.get_turbo = funcs->get_turbo;
+- pstate_funcs.get_scaling = funcs->get_scaling;
- pstate_funcs.set = funcs->set;
- pstate_funcs.get_vid = funcs->get_vid;
+ pstate_funcs = funcs;
@@ -40420,6 +40402,20 @@ index 57ea7f4..af06b76 100644
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index d7d5c8a..6d44568 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
+ _IOC_SIZE(cmd) > sizeof(buffer))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) == _IOC_READ)
+- memset(&buffer, 0, _IOC_SIZE(cmd));
++ memset(&buffer, 0, sizeof(buffer));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 2c6d5e1..a2cca6b 100644
--- a/drivers/firewire/core-device.c
@@ -46166,6 +46162,20 @@ index 98d24ae..bc22415 100644
return 1;
}
+diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+index 5c45c9d..9c29552 100644
+--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
++++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
++ if (cmd->msg_len > sizeof(b) - 4)
++ return -EINVAL;
++
+ memcpy(&b[4], cmd->msg, cmd->msg_len);
+
+ state->config->send_command(fe, 0x72,
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index fca336b..fb70ab7 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -47763,9 +47773,18 @@ index fbf7dcd..ad71499 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 0c6adaa..0784e3f 100644
+index f30ceb1..81c589c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
+@@ -422,7 +422,7 @@ static void macvtap_setup(struct net_device *dev)
+ dev->tx_queue_len = TUN_READQ_SIZE;
+ }
+
+-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
++static struct rtnl_link_ops macvtap_link_ops = {
+ .kind = "macvtap",
+ .setup = macvtap_setup,
+ .newlink = macvtap_newlink,
@@ -1018,7 +1018,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
@@ -47785,18 +47804,9 @@ index 0c6adaa..0784e3f 100644
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index 72ff14b..e860630 100644
+index 5a1897d..e860630 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
-@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
- }
-- if (atomic_long_read(&file->f_count) <= 2) {
-+ if (atomic_long_read(&file->f_count) < 2) {
- ppp_release(NULL, file);
- err = 0;
- } else
@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
struct ppp_stats stats;
@@ -47842,10 +47852,10 @@ index 979fe43..1f1230c 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 26f8635..c237839 100644
+index 2c8b1c2..9942a89 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
-@@ -1876,7 +1876,7 @@ unlock:
+@@ -1883,7 +1883,7 @@ unlock:
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
@@ -47854,7 +47864,7 @@ index 26f8635..c237839 100644
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
-@@ -1889,6 +1889,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+@@ -1896,6 +1896,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned int ifindex;
int ret;
@@ -47991,7 +48001,7 @@ index a2515887..6d13233 100644
/* we will have to manufacture ethernet headers, prepare template */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
-index 841b608..198a8b7 100644
+index 07a3255..4c59b30 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,7 +47,7 @@ module_param(gso, bool, 0444);
@@ -48004,59 +48014,10 @@ index 841b608..198a8b7 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index 9b40532..e3294ac 100644
+index 0704a04..4208d2d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -1447,9 +1447,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
- if (!in6_dev)
- goto out;
-
-- if (!pskb_may_pull(skb, skb->len))
-- goto out;
--
- iphdr = ipv6_hdr(skb);
- saddr = &iphdr->saddr;
- daddr = &iphdr->daddr;
-@@ -1770,6 +1767,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
- struct pcpu_sw_netstats *tx_stats, *rx_stats;
- union vxlan_addr loopback;
- union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
-+ struct net_device *dev = skb->dev;
-+ int len = skb->len;
-
- tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
-@@ -1793,16 +1792,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tx_packets++;
-- tx_stats->tx_bytes += skb->len;
-+ tx_stats->tx_bytes += len;
- u64_stats_update_end(&tx_stats->syncp);
-
- if (netif_rx(skb) == NET_RX_SUCCESS) {
- u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
-- rx_stats->rx_bytes += skb->len;
-+ rx_stats->rx_bytes += len;
- u64_stats_update_end(&rx_stats->syncp);
- } else {
-- skb->dev->stats.rx_dropped++;
-+ dev->stats.rx_dropped++;
- }
- }
-
-@@ -1977,7 +1976,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
- return arp_reduce(dev, skb);
- #if IS_ENABLED(CONFIG_IPV6)
- else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
-- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
-+ pskb_may_pull(skb, sizeof(struct ipv6hdr)
-+ + sizeof(struct nd_msg)) &&
- ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
- struct nd_msg *msg;
-
-@@ -2846,7 +2846,7 @@ nla_put_failure:
+@@ -2847,7 +2847,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -48065,7 +48026,7 @@ index 9b40532..e3294ac 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2893,7 +2893,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2894,7 +2894,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -49297,7 +49258,7 @@ index fb02fc2..83dc2c3 100644
kfree(msi_dev_attr);
++count;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index 39a207a..d1ec78a 100644
+index a943c6c..ad1a3cc 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1112,7 +1112,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -51402,6 +51363,58 @@ index 236ed66..dd9cd74 100644
ret = -EBUSY;
goto err_busy;
}
+diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
+index 7a6d85e..4c55a18 100644
+--- a/drivers/staging/line6/driver.c
++++ b/drivers/staging/line6/driver.c
+@@ -458,7 +458,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ {
+ struct usb_device *usbdev = line6->usbdev;
+ int ret;
+- unsigned char len;
++ unsigned char *plen;
+
+ /* query the serial number: */
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
+@@ -471,27 +471,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ return ret;
+ }
+
++ plen = kmalloc(1, GFP_KERNEL);
++ if (plen == NULL)
++ return -ENOMEM;
++
+ /* Wait for data length. We'll get 0xff until length arrives. */
+ do {
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_IN,
+- 0x0012, 0x0000, &len, 1,
++ 0x0012, 0x0000, plen, 1,
+ LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "receive length failed (error %d)\n", ret);
++ kfree(plen);
+ return ret;
+ }
+- } while (len == 0xff);
++ } while (*plen == 0xff);
+
+- if (len != datalen) {
++ if (*plen != datalen) {
+ /* should be equal or something went wrong */
+ dev_err(line6->ifcdev,
+ "length mismatch (expected %d, got %d)\n",
+- (int)datalen, (int)len);
++ (int)datalen, (int)*plen);
++ kfree(plen);
+ return -EINVAL;
+ }
++ kfree(plen);
+
+ /* receive the result: */
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 3f8020c..649fded 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -51903,10 +51916,10 @@ index 24884ca..26c8220 100644
login->tgt_agt = sbp_target_agent_register(login);
if (IS_ERR(login->tgt_agt)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
-index 6ea95d2..88607b4 100644
+index 38b4be2..c68af1c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
-@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+@@ -1526,7 +1526,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
sema_init(&dev->caw_sem, 1);
@@ -51916,7 +51929,7 @@ index 6ea95d2..88607b4 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 24f5279..046edc5 100644
+index 9232c773..e42a77a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1154,7 +1154,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -52679,7 +52692,7 @@ index 9cd706d..6ff2de7 100644
if (cfg->uart_flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
-index 25b8f68..3e23c14 100644
+index 27b5554..8131d9d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1451,7 +1451,7 @@ static void uart_hangup(struct tty_struct *tty)
@@ -53121,10 +53134,10 @@ index ce396ec..04a37be 100644
if (get_user(c, buf))
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index d3448a9..28e8db0 100644
+index 25d0741..36e7237 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
-@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
+@@ -3480,7 +3480,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
void tty_default_fops(struct file_operations *fops)
{
@@ -53559,7 +53572,7 @@ index 9ca7716..a2ccc2e 100644
dev->rawdescriptors[i] + (*ppos - pos),
min(len, alloclen))) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 2518c32..1c201bb 100644
+index ef6ec13b..5c6e68e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
@@ -53581,7 +53594,7 @@ index 2518c32..1c201bb 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 445d62a..e0657a3 100644
+index d2bd9d7..1ddb53a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -53592,7 +53605,7 @@ index 445d62a..e0657a3 100644
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-@@ -4551,6 +4552,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+@@ -4554,6 +4555,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
goto done;
return;
}
@@ -53660,19 +53673,6 @@ index 4d11449..f4ccabf 100644
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
-diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index 09e9619..d266724 100644
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- if (!usb_endpoint_xfer_isoc(desc))
- return 0;
-
-- memset(&trb_link, 0, sizeof(trb_link));
--
- /* Link TRB for ISOC. The HWO bit is never reset */
- trb_st_hw = &dep->trb_pool[0];
-
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 8cfc319..4868255 100644
--- a/drivers/usb/early/ehci-dbgp.c
@@ -58911,22 +58911,10 @@ index ff286f3..8153a14 100644
.attrs = attrs,
};
diff --git a/fs/buffer.c b/fs/buffer.c
-index 71e2d0e..7e40912 100644
+index 4d06a57..5977df8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -2313,6 +2313,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
- err = 0;
-
- balance_dirty_pages_ratelimited(mapping);
-+
-+ if (unlikely(fatal_signal_pending(current))) {
-+ err = -EINTR;
-+ goto out;
-+ }
- }
-
- /* page covers the boundary, find the boundary offset */
-@@ -3430,7 +3435,7 @@ void __init buffer_init(void)
+@@ -3438,7 +3438,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -59966,7 +59954,7 @@ index a93f7e6..d58bcbe 100644
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index 58d57da..a3f889f 100644
+index 4366127..581b312 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -250,7 +250,7 @@ static void __d_free(struct rcu_head *head)
@@ -60119,7 +60107,7 @@ index 58d57da..a3f889f 100644
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -3313,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -3318,7 +3319,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
@@ -60128,7 +60116,7 @@ index 58d57da..a3f889f 100644
}
}
return D_WALK_CONTINUE;
-@@ -3429,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3434,7 +3435,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -61085,7 +61073,7 @@ index 6ea7b14..8fa16d9 100644
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 62f024c..a6a1a61 100644
+index 2a6830a..d25d59c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1269,19 +1269,19 @@ struct ext4_sb_info {
@@ -61251,10 +61239,10 @@ index 242226a..f3eb6c1 100644
return 0;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
-index 04434ad..6404663 100644
+index 1268a1b..adf949f 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
-@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
const char *function, unsigned int line, const char *msg)
{
@@ -61264,10 +61252,10 @@ index 04434ad..6404663 100644
"MMP failure info: last update time: %llu, last update "
"node: %s, last update device: %s\n",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index a46030d..1477295 100644
+index 9fb3e6c..9a82508 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
-@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+@@ -1268,7 +1268,7 @@ static ext4_fsblk_t get_sb_block(void **data)
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
@@ -61276,7 +61264,7 @@ index a46030d..1477295 100644
"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
#ifdef CONFIG_QUOTA
-@@ -2448,7 +2448,7 @@ struct ext4_attr {
+@@ -2442,7 +2442,7 @@ struct ext4_attr {
int offset;
int deprecated_val;
} u;
@@ -61286,10 +61274,10 @@ index a46030d..1477295 100644
static int parse_strtoull(const char *buf,
unsigned long long max, unsigned long long *value)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index 55e611c..cfad16d 100644
+index 8825154..af51586 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
-@@ -381,7 +381,7 @@ static int
+@@ -394,7 +394,7 @@ static int
ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
char *buffer, size_t buffer_size)
{
@@ -61298,7 +61286,7 @@ index 55e611c..cfad16d 100644
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
const struct xattr_handler *handler =
-@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+@@ -411,9 +411,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
buffer += size;
}
rest -= size;
@@ -63063,7 +63051,7 @@ index 4a6cf28..d3a29d3 100644
jffs2_prealloc_raw_node_refs(c, jeb, 1);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
-index a6597d6..41b30ec 100644
+index 09ed551..45684f8 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
@@ -63283,7 +63271,7 @@ index b29e42f..5ea7fdf 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index dd2f2c5..27e6c48 100644
+index 0dd72c8..34dd17d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,34 @@ int generic_permission(struct inode *inode, int mask)
@@ -63626,7 +63614,7 @@ index dd2f2c5..27e6c48 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3180,7 +3285,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3181,7 +3286,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -63635,7 +63623,7 @@ index dd2f2c5..27e6c48 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3198,7 +3303,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3199,7 +3304,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -63644,7 +63632,7 @@ index dd2f2c5..27e6c48 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3298,9 +3403,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3299,9 +3404,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -63658,7 +63646,7 @@ index dd2f2c5..27e6c48 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3352,6 +3459,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3353,6 +3460,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -63679,7 +63667,7 @@ index dd2f2c5..27e6c48 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3414,6 +3535,17 @@ retry:
+@@ -3415,6 +3536,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63697,7 +63685,7 @@ index dd2f2c5..27e6c48 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3430,6 +3562,8 @@ retry:
+@@ -3431,6 +3563,8 @@ retry:
break;
}
out:
@@ -63706,7 +63694,7 @@ index dd2f2c5..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3482,9 +3616,16 @@ retry:
+@@ -3483,9 +3617,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63723,7 +63711,7 @@ index dd2f2c5..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3565,6 +3706,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3566,6 +3707,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -63732,7 +63720,7 @@ index dd2f2c5..27e6c48 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3597,10 +3740,21 @@ retry:
+@@ -3598,10 +3741,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -63754,7 +63742,7 @@ index dd2f2c5..27e6c48 100644
exit3:
dput(dentry);
exit2:
-@@ -3690,6 +3844,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3691,6 +3845,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -63763,7 +63751,7 @@ index dd2f2c5..27e6c48 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3716,10 +3872,22 @@ retry_deleg:
+@@ -3717,10 +3873,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -63786,7 +63774,7 @@ index dd2f2c5..27e6c48 100644
exit2:
dput(dentry);
}
-@@ -3807,9 +3975,17 @@ retry:
+@@ -3808,9 +3976,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -63804,7 +63792,7 @@ index dd2f2c5..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3912,6 +4088,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3913,6 +4089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -63812,7 +63800,7 @@ index dd2f2c5..27e6c48 100644
int how = 0;
int error;
-@@ -3935,7 +4112,7 @@ retry:
+@@ -3936,7 +4113,7 @@ retry:
if (error)
return error;
@@ -63821,7 +63809,7 @@ index dd2f2c5..27e6c48 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3947,11 +4124,28 @@ retry:
+@@ -3948,11 +4125,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -63850,7 +63838,7 @@ index dd2f2c5..27e6c48 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4238,6 +4432,12 @@ retry_deleg:
+@@ -4239,6 +4433,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -63863,7 +63851,7 @@ index dd2f2c5..27e6c48 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
-@@ -4245,6 +4445,9 @@ retry_deleg:
+@@ -4246,6 +4446,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode);
@@ -63873,7 +63861,7 @@ index dd2f2c5..27e6c48 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4281,6 +4484,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4282,6 +4485,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -63882,7 +63870,7 @@ index dd2f2c5..27e6c48 100644
int len;
len = PTR_ERR(link);
-@@ -4290,7 +4495,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -4291,7 +4496,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -63899,7 +63887,7 @@ index dd2f2c5..27e6c48 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index c7d4a0a..93207ab 100644
+index d9bf3ef..93207ab 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1371,6 +1371,9 @@ static int do_umount(struct mount *mnt, int flags)
@@ -64017,17 +64005,7 @@ index c7d4a0a..93207ab 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -2831,6 +2855,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
- /* make sure we can reach put_old from new_root */
- if (!is_path_reachable(old_mnt, old.dentry, &new))
- goto out4;
-+ /* make certain new is below the root */
-+ if (!is_path_reachable(new_mnt, new.dentry, &root))
-+ goto out4;
- root_mp->m_count++; /* pin it so it won't go away */
- lock_mount_hash();
- detach_mnt(new_mnt, &parent_path);
-@@ -3062,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+@@ -3065,7 +3089,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -64074,7 +64052,7 @@ index 15f9d98..082c625 100644
void nfs_fattr_init(struct nfs_fattr *fattr)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index f23a6ca..730ddcc 100644
+index 86f5d3e..ae2d35a 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1169,7 +1169,7 @@ struct nfsd4_operation {
@@ -67191,19 +67169,6 @@ index ae0c3ce..9ee641c 100644
generic_fillattr(inode, stat);
return 0;
-diff --git a/fs/super.c b/fs/super.c
-index 7624267..88a6bc6 100644
---- a/fs/super.c
-+++ b/fs/super.c
-@@ -81,6 +81,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
- inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
- dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
- total_objects = dentries + inodes + fs_objects + 1;
-+ if (!total_objects)
-+ total_objects = 1;
-
- /* proportion the scan between the caches */
- dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index ee0d761..b346c58 100644
--- a/fs/sysfs/dir.c
@@ -67605,6 +67570,28 @@ index 78e62cc..eec3706 100644
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
goto out_put;
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index f9bb590..af3c389 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -229,7 +229,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
+ * of the compiler which do not like us using do_div in the middle
+ * of large functions.
+ */
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+ __u32 mod;
+
+@@ -285,7 +285,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+ return 0;
+ }
+ #else
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+ __u32 mod;
+
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
index 0000000..cdaa3ef
@@ -79556,10 +79543,10 @@ index be5fd38..d71192a 100644
if (sizeof(l) == 4)
return fls(l);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 4afa4f8..1ed7824 100644
+index a693c6d..cec897f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -1572,7 +1572,7 @@ struct block_device_operations {
+@@ -1571,7 +1571,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
struct module *owner;
@@ -82551,7 +82538,7 @@ index 5bba088..7ad4ae7 100644
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index c1b7414..5ea2ad8 100644
+index 0a0b024..ebee54f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -82596,7 +82583,7 @@ index c1b7414..5ea2ad8 100644
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1152,9 +1158,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1153,9 +1159,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@@ -82609,7 +82596,7 @@ index c1b7414..5ea2ad8 100644
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
-@@ -1186,34 +1192,6 @@ int set_page_dirty(struct page *page);
+@@ -1187,34 +1193,6 @@ int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -82644,7 +82631,7 @@ index c1b7414..5ea2ad8 100644
extern pid_t
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
-@@ -1313,6 +1291,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
+@@ -1314,6 +1292,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
@@ -82660,7 +82647,7 @@ index c1b7414..5ea2ad8 100644
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-@@ -1331,8 +1318,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1332,8 +1319,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
{
return 0;
}
@@ -82676,7 +82663,7 @@ index c1b7414..5ea2ad8 100644
#endif
#ifdef __PAGETABLE_PMD_FOLDED
-@@ -1341,8 +1335,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1342,8 +1336,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
{
return 0;
}
@@ -82692,7 +82679,7 @@ index c1b7414..5ea2ad8 100644
#endif
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-@@ -1360,11 +1361,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1361,11 +1362,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
NULL: pud_offset(pgd, address);
}
@@ -82716,7 +82703,7 @@ index c1b7414..5ea2ad8 100644
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-@@ -1754,7 +1767,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1755,7 +1768,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
@@ -82725,7 +82712,7 @@ index c1b7414..5ea2ad8 100644
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1762,6 +1775,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1763,6 +1776,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
@@ -82733,7 +82720,7 @@ index c1b7414..5ea2ad8 100644
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1790,10 +1804,11 @@ struct vm_unmapped_area_info {
+@@ -1791,10 +1805,11 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
@@ -82747,7 +82734,7 @@ index c1b7414..5ea2ad8 100644
/*
* Search for an unmapped address range.
-@@ -1805,7 +1820,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1806,7 +1821,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long
@@ -82756,7 +82743,7 @@ index c1b7414..5ea2ad8 100644
{
if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
return unmapped_area(info);
-@@ -1868,6 +1883,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1869,6 +1884,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
@@ -82767,7 +82754,7 @@ index c1b7414..5ea2ad8 100644
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1896,15 +1915,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1897,15 +1916,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
@@ -82783,7 +82770,7 @@ index c1b7414..5ea2ad8 100644
#ifdef CONFIG_NUMA_BALANCING
unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-@@ -1956,6 +1966,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -1957,6 +1967,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
@@ -82795,7 +82782,7 @@ index c1b7414..5ea2ad8 100644
mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -2037,7 +2052,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2038,7 +2053,7 @@ extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
@@ -82804,7 +82791,7 @@ index c1b7414..5ea2ad8 100644
extern int soft_offline_page(struct page *page, int flags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2072,5 +2087,11 @@ void __init setup_nr_node_ids(void);
+@@ -2073,5 +2088,11 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
@@ -84633,29 +84620,6 @@ index 680f9a3..f13aeb0 100644
__SONET_ITEMS
#undef __HANDLE_ITEM
};
-diff --git a/include/linux/string.h b/include/linux/string.h
-index ac889c5..0ed878d 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
- #endif
-
- extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
-- const void *from, size_t available);
-+ const void *from, size_t available);
-
- /**
- * strstarts - does @str start with @prefix?
-@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
- return strncmp(str, prefix, strlen(prefix)) == 0;
- }
-
--extern size_t memweight(const void *ptr, size_t bytes);
-+size_t memweight(const void *ptr, size_t bytes);
-+void memzero_explicit(void *s, size_t count);
-
- /**
- * kbasename - return the last part of a pathname.
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53..dc934c9 100644
--- a/include/linux/sunrpc/addr.h
@@ -85993,8 +85957,24 @@ index 4a5b9a3..ca27d73 100644
.update = sctp_csum_update,
.combine = sctp_csum_combine,
};
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index a3353f4..ba41e01 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -433,6 +433,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
+ asoc->pmtu_pending = 0;
+ }
+
++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
++{
++ return !list_empty(&chunk->list);
++}
++
+ /* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
-index 7f4eeb3..37e8fe1 100644
+index 7f4eeb3..aaa63d9 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
@@ -86006,6 +85986,19 @@ index 7f4eeb3..37e8fe1 100644
/* A naming convention of "sctp_sf_xxx" applies to all the state functions
* currently in use.
+@@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
+ int, __be16);
+ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp);
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp);
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+ int sctp_process_asconf_ack(struct sctp_association *asoc,
@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
__u32 sctp_generate_tsn(const struct sctp_endpoint *);
@@ -89670,7 +89663,7 @@ index 1d96dd0..994ff19 100644
default:
diff --git a/kernel/module.c b/kernel/module.c
-index 6716a1f..acc7443 100644
+index 1d679a6..acc7443 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -61,6 +61,7 @@
@@ -89865,17 +89858,7 @@ index 6716a1f..acc7443 100644
set_memory_ro);
}
}
-@@ -1841,7 +1860,9 @@ static void free_module(struct module *mod)
-
- /* We leave it in list to prevent duplicate loads, but make sure
- * that noone uses it while it's being deconstructed. */
-+ mutex_lock(&module_mutex);
- mod->state = MODULE_STATE_UNFORMED;
-+ mutex_unlock(&module_mutex);
-
- /* Remove dynamic debug info */
- ddebug_remove_module(mod->name);
-@@ -1862,16 +1883,19 @@ static void free_module(struct module *mod)
+@@ -1864,16 +1883,19 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
@@ -89898,7 +89881,7 @@ index 6716a1f..acc7443 100644
#ifdef CONFIG_MPU
update_protections(current->mm);
-@@ -1940,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1942,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
int ret = 0;
const struct kernel_symbol *ksym;
@@ -89930,7 +89913,7 @@ index 6716a1f..acc7443 100644
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* We compiled with -fno-common. These are not
-@@ -1963,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1965,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
if (ksym && !IS_ERR(ksym)) {
@@ -89940,7 +89923,7 @@ index 6716a1f..acc7443 100644
break;
}
-@@ -1982,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1984,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
secbase = (unsigned long)mod_percpu(mod);
else
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
@@ -89961,7 +89944,7 @@ index 6716a1f..acc7443 100644
return ret;
}
-@@ -2070,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
+@@ -2072,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
|| s->sh_entsize != ~0UL
|| strstarts(sname, ".init"))
continue;
@@ -89988,7 +89971,7 @@ index 6716a1f..acc7443 100644
}
pr_debug("Init section allocation order:\n");
-@@ -2099,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
+@@ -2101,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
|| s->sh_entsize != ~0UL
|| !strstarts(sname, ".init"))
continue;
@@ -90017,7 +90000,7 @@ index 6716a1f..acc7443 100644
}
}
-@@ -2288,7 +2325,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2290,7 +2325,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
@@ -90026,7 +90009,7 @@ index 6716a1f..acc7443 100644
info->index.sym) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
-@@ -2305,13 +2342,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2307,13 +2342,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
}
/* Append room for core symbols at end of core part. */
@@ -90044,7 +90027,7 @@ index 6716a1f..acc7443 100644
info->index.str) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
}
-@@ -2329,12 +2366,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2331,12 +2366,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
/* Make sure we get permanent strtab: don't use info->strtab. */
mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
@@ -90061,7 +90044,7 @@ index 6716a1f..acc7443 100644
src = mod->symtab;
for (ndst = i = 0; i < mod->num_symtab; i++) {
if (i == 0 ||
-@@ -2346,6 +2385,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2348,6 +2385,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
}
}
mod->core_num_syms = ndst;
@@ -90070,7 +90053,7 @@ index 6716a1f..acc7443 100644
}
#else
static inline void layout_symtab(struct module *mod, struct load_info *info)
-@@ -2379,17 +2420,33 @@ void * __weak module_alloc(unsigned long size)
+@@ -2381,17 +2420,33 @@ void * __weak module_alloc(unsigned long size)
return vmalloc_exec(size);
}
@@ -90109,7 +90092,7 @@ index 6716a1f..acc7443 100644
mutex_unlock(&module_mutex);
}
return ret;
-@@ -2646,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2648,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
mod = (void *)info->sechdrs[info->index.mod].sh_addr;
if (info->index.sym == 0) {
@@ -90125,7 +90108,7 @@ index 6716a1f..acc7443 100644
return ERR_PTR(-ENOEXEC);
}
-@@ -2662,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2664,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
const char *modmagic = get_modinfo(info, "vermagic");
@@ -90140,7 +90123,7 @@ index 6716a1f..acc7443 100644
if (flags & MODULE_INIT_IGNORE_VERMAGIC)
modmagic = NULL;
-@@ -2688,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+@@ -2690,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
}
/* Set up license info based on the info section */
@@ -90149,7 +90132,7 @@ index 6716a1f..acc7443 100644
return 0;
}
-@@ -2782,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2784,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
void *ptr;
/* Do the allocs. */
@@ -90158,7 +90141,7 @@ index 6716a1f..acc7443 100644
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
-@@ -2792,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2794,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
if (!ptr)
return -ENOMEM;
@@ -90174,7 +90157,7 @@ index 6716a1f..acc7443 100644
/*
* The pointer to this block is stored in the module structure
* which is inside the block. This block doesn't need to be
-@@ -2805,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2807,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
@@ -90224,7 +90207,7 @@ index 6716a1f..acc7443 100644
/* Transfer each section which specifies SHF_ALLOC */
pr_debug("final section addresses:\n");
-@@ -2822,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2824,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
@@ -90277,7 +90260,7 @@ index 6716a1f..acc7443 100644
pr_debug("\t0x%lx %s\n",
(long)shdr->sh_addr, info->secstrings + shdr->sh_name);
}
-@@ -2888,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
+@@ -2890,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
@@ -90296,7 +90279,7 @@ index 6716a1f..acc7443 100644
set_fs(old_fs);
}
-@@ -2950,8 +3082,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
+@@ -2952,8 +3082,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
@@ -90309,7 +90292,7 @@ index 6716a1f..acc7443 100644
}
int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -2964,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2966,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
static int post_relocation(struct module *mod, const struct load_info *info)
{
/* Sort exception table now relocations are done. */
@@ -90319,7 +90302,7 @@ index 6716a1f..acc7443 100644
/* Copy relocated percpu area over. */
percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
-@@ -3018,16 +3154,16 @@ static int do_init_module(struct module *mod)
+@@ -3020,16 +3154,16 @@ static int do_init_module(struct module *mod)
MODULE_STATE_COMING, mod);
/* Set RO and NX regions for core */
@@ -90344,7 +90327,7 @@ index 6716a1f..acc7443 100644
do_mod_ctors(mod);
/* Start the module */
-@@ -3088,11 +3224,12 @@ static int do_init_module(struct module *mod)
+@@ -3090,11 +3224,12 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
@@ -90362,7 +90345,7 @@ index 6716a1f..acc7443 100644
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
-@@ -3235,9 +3372,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3237,9 +3372,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err)
goto free_unload;
@@ -90401,7 +90384,7 @@ index 6716a1f..acc7443 100644
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(mod, info);
if (err < 0)
-@@ -3253,13 +3419,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3255,13 +3419,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
flush_module_icache(mod);
@@ -90415,7 +90398,7 @@ index 6716a1f..acc7443 100644
dynamic_debug_setup(info->debug, info->num_debug);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
-@@ -3297,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3299,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
dynamic_debug_remove(info->debug);
synchronize_sched();
@@ -90428,7 +90411,7 @@ index 6716a1f..acc7443 100644
free_unload:
module_unload_free(mod);
unlink_mod:
-@@ -3384,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
@@ -90448,7 +90431,7 @@ index 6716a1f..acc7443 100644
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
-@@ -3638,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
return 0;
seq_printf(m, "%s %u",
@@ -90457,7 +90440,7 @@ index 6716a1f..acc7443 100644
print_unload_info(m, mod);
/* Informative for users. */
-@@ -3647,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading":
"Live");
/* Used by oprofile and other similar tools. */
@@ -90466,7 +90449,7 @@ index 6716a1f..acc7443 100644
/* Taints info */
if (mod->taints)
-@@ -3683,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -3685,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
static int __init proc_modules_init(void)
{
@@ -90484,7 +90467,7 @@ index 6716a1f..acc7443 100644
return 0;
}
module_init(proc_modules_init);
-@@ -3744,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
+@@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -90502,7 +90485,7 @@ index 6716a1f..acc7443 100644
return mod;
}
return NULL;
-@@ -3786,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
+@@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
*/
struct module *__module_text_address(unsigned long addr)
{
@@ -90727,7 +90710,7 @@ index 3b89464..5e38379 100644
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 424c2d4..679242f 100644
+index 77e6b83..fc021bd 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -43,6 +43,7 @@
@@ -90828,7 +90811,7 @@ index 424c2d4..679242f 100644
int it_id_set = IT_ID_NOT_SET;
if (!kc)
-@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+@@ -1012,6 +1013,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
if (copy_from_user(&new_tp, tp, sizeof (*tp)))
return -EFAULT;
@@ -90856,7 +90839,7 @@ index 2fac9cc..56fef29 100644
select LZO_COMPRESS
select LZO_DECOMPRESS
diff --git a/kernel/power/process.c b/kernel/power/process.c
-index 14f9a8d..98ee610 100644
+index f1fe7ec..7d4e641 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
@@ -91904,7 +91887,7 @@ index a63f4dc..349bbb0 100644
unsigned long timeout)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 677ebad..e39b352 100644
+index 9a3f3c4..943fa11 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1775,7 +1775,7 @@ void set_numabalancing_state(bool enabled)
@@ -91916,7 +91899,7 @@ index 677ebad..e39b352 100644
int err;
int state = numabalancing_enabled;
-@@ -2251,8 +2251,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+@@ -2255,8 +2255,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
@@ -91928,7 +91911,7 @@ index 677ebad..e39b352 100644
if (!prev->mm) {
prev->active_mm = NULL;
-@@ -3049,6 +3051,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -3053,6 +3055,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
@@ -91937,7 +91920,7 @@ index 677ebad..e39b352 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -3082,7 +3086,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -3086,7 +3090,8 @@ SYSCALL_DEFINE1(nice, int, increment)
if (nice > 19)
nice = 19;
@@ -91947,7 +91930,7 @@ index 677ebad..e39b352 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -3355,6 +3360,7 @@ recheck:
+@@ -3359,6 +3364,7 @@ recheck:
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
@@ -91955,7 +91938,7 @@ index 677ebad..e39b352 100644
/* can't increase priority */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
-@@ -4727,8 +4733,10 @@ void idle_task_exit(void)
+@@ -4732,8 +4738,10 @@ void idle_task_exit(void)
BUG_ON(cpu_online(smp_processor_id()));
@@ -91967,7 +91950,7 @@ index 677ebad..e39b352 100644
mmdrop(mm);
}
-@@ -4806,7 +4814,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -4811,7 +4819,7 @@ static void migrate_tasks(unsigned int dead_cpu)
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -91976,7 +91959,7 @@ index 677ebad..e39b352 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -4823,17 +4831,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -4828,17 +4836,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -91998,7 +91981,7 @@ index 677ebad..e39b352 100644
/*
* In the intermediate directories, both the child directory and
-@@ -4841,22 +4849,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -4846,22 +4854,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -92030,7 +92013,7 @@ index 677ebad..e39b352 100644
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -4876,7 +4887,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -4881,7 +4892,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -92039,7 +92022,7 @@ index 677ebad..e39b352 100644
if (table == NULL)
return NULL;
-@@ -4911,9 +4922,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -4916,9 +4927,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -92051,7 +92034,7 @@ index 677ebad..e39b352 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -4940,11 +4951,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -4945,11 +4956,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -92066,7 +92049,7 @@ index 677ebad..e39b352 100644
if (entry == NULL)
return;
-@@ -4967,8 +4980,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -4972,8 +4985,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -93493,36 +93476,9 @@ index e6be585..d73ae5e 100644
local_irq_save(flags);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
-index 759d5e0..5156a5fe 100644
+index 7e3cd7a..5156a5fe 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
-@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
-
- /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
-@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
- int syscall_nr;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
-
- /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
-@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
- if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
- return;
@@ -602,6 +602,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
int num;
@@ -93541,15 +93497,6 @@ index 759d5e0..5156a5fe 100644
mutex_lock(&syscall_trace_lock);
sys_perf_refcount_enter--;
-@@ -641,7 +645,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
- if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
- return;
@@ -674,6 +678,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
int num;
@@ -93749,32 +93696,10 @@ index 114d1be..ab0350c 100644
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
diff --git a/lib/bitmap.c b/lib/bitmap.c
-index 06f7e4f..9078e42 100644
+index e5c4ebe..9078e42 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
-@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
- lower = src[off + k];
- if (left && off + k == lim - 1)
- lower &= mask;
-- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
-+ dst[k] = lower >> rem;
-+ if (rem)
-+ dst[k] |= upper << (BITS_PER_LONG - rem);
- if (left && k == lim - 1)
- dst[k] &= mask;
- }
-@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
- upper = src[k];
- if (left && k == lim - 1)
- upper &= (1UL << left) - 1;
-- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
-+ dst[k + off] = upper << rem;
-+ if (rem)
-+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
- if (left && k + off == lim - 1)
- dst[k + off] &= (1UL << left) - 1;
- }
-@@ -422,7 +426,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+@@ -426,7 +426,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
@@ -93783,7 +93708,7 @@ index 06f7e4f..9078e42 100644
bitmap_zero(maskp, nmaskbits);
-@@ -507,7 +511,7 @@ int bitmap_parse_user(const char __user *ubuf,
+@@ -511,7 +511,7 @@ int bitmap_parse_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -93792,7 +93717,7 @@ index 06f7e4f..9078e42 100644
ulen, 1, maskp, nmaskbits);
}
-@@ -598,7 +602,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+@@ -602,7 +602,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
{
unsigned a, b;
int c, old_c, totaldigits;
@@ -93801,7 +93726,7 @@ index 06f7e4f..9078e42 100644
int exp_digit, in_range;
totaldigits = c = 0;
-@@ -698,7 +702,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -702,7 +702,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -94382,33 +94307,10 @@ index 0922579..9d7adb9 100644
#endif
}
diff --git a/lib/string.c b/lib/string.c
-index e5878de..64941b2 100644
+index 43d0781..64941b2 100644
--- a/lib/string.c
+++ b/lib/string.c
-@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
- EXPORT_SYMBOL(memset);
- #endif
-
-+/**
-+ * memzero_explicit - Fill a region of memory (e.g. sensitive
-+ * keying data) with 0s.
-+ * @s: Pointer to the start of the area.
-+ * @count: The size of the area.
-+ *
-+ * memzero_explicit() doesn't need an arch-specific version as
-+ * it just invokes the one of memset() implicitly.
-+ */
-+void memzero_explicit(void *s, size_t count)
-+{
-+ memset(s, 0, count);
-+ OPTIMIZER_HIDE_VAR(s);
-+}
-+EXPORT_SYMBOL(memzero_explicit);
-+
- #ifndef __HAVE_ARCH_MEMCPY
- /**
- * memcpy - Copy one area of memory to another
-@@ -789,9 +805,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
+@@ -805,9 +805,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
return check_bytes8(start, value, bytes);
value64 = value;
@@ -97717,7 +97619,7 @@ index 9f45f87..749bfd8 100644
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index ff0f6b1..8a67124 100644
+index 7b2611a..4407637 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -97813,7 +97715,16 @@ index ff0f6b1..8a67124 100644
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
-@@ -2414,7 +2454,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
+@@ -1957,7 +1997,7 @@ zonelist_scan:
+ if (alloc_flags & ALLOC_FAIR) {
+ if (!zone_local(preferred_zone, zone))
+ continue;
+- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0)
++ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0)
+ continue;
+ }
+ /*
+@@ -2422,7 +2462,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
continue;
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -97822,7 +97733,16 @@ index ff0f6b1..8a67124 100644
}
}
-@@ -6606,4 +6646,4 @@ void dump_page(struct page *page, char *reason)
+@@ -5671,7 +5711,7 @@ static void __setup_per_zone_wmarks(void)
+
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH,
+ high_wmark_pages(zone) - low_wmark_pages(zone) -
+- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
++ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
+
+ setup_zone_migrate_reserve(zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+@@ -6613,4 +6653,4 @@ void dump_page(struct page *page, char *reason)
{
dump_page_badflags(page, reason, 0);
}
@@ -97842,7 +97762,7 @@ index 7c59ef6..1358905 100644
};
diff --git a/mm/percpu.c b/mm/percpu.c
-index 8cd4308..ab22f17 100644
+index a2a54a8..43ecb68 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -100314,8 +100234,325 @@ index b543470..d2ddae2 100644
if (!can_dir) {
printk(KERN_INFO "can: failed to create /proc/net/can . "
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index 6e7a236..06f19b9 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+
++/*
++ * Should be used for buffers allocated with ceph_kvmalloc().
++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
++ * in-buffer (msg front).
++ *
++ * Dispose of @sgt with teardown_sgtable().
++ *
++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
++ * in cases where a single sg is sufficient. No attempt to reduce the
++ * number of sgs by squeezing physically contiguous pages together is
++ * made though, for simplicity.
++ */
++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
++ const void *buf, unsigned int buf_len)
++{
++ struct scatterlist *sg;
++ const bool is_vmalloc = is_vmalloc_addr(buf);
++ unsigned int off = offset_in_page(buf);
++ unsigned int chunk_cnt = 1;
++ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
++ int i;
++ int ret;
++
++ if (buf_len == 0) {
++ memset(sgt, 0, sizeof(*sgt));
++ return -EINVAL;
++ }
++
++ if (is_vmalloc) {
++ chunk_cnt = chunk_len >> PAGE_SHIFT;
++ chunk_len = PAGE_SIZE;
++ }
++
++ if (chunk_cnt > 1) {
++ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
++ if (ret)
++ return ret;
++ } else {
++ WARN_ON(chunk_cnt != 1);
++ sg_init_table(prealloc_sg, 1);
++ sgt->sgl = prealloc_sg;
++ sgt->nents = sgt->orig_nents = 1;
++ }
++
++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++ struct page *page;
++ unsigned int len = min(chunk_len - off, buf_len);
++
++ if (is_vmalloc)
++ page = vmalloc_to_page(buf);
++ else
++ page = virt_to_page(buf);
++
++ sg_set_page(sg, page, len, off);
++
++ off = 0;
++ buf += len;
++ buf_len -= len;
++ }
++ WARN_ON(buf_len != 0);
++
++ return 0;
++}
++
++static void teardown_sgtable(struct sg_table *sgt)
++{
++ if (sgt->orig_nents > 1)
++ sg_free_table(sgt);
++}
++
+ static int ceph_aes_encrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[2], sg_out[1];
++ struct scatterlist sg_in[2], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+
+ *dst_len = src_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], src, src_len);
+ sg_set_buf(&sg_in[1], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len)
+ {
+- struct scatterlist sg_in[3], sg_out[1];
++ struct scatterlist sg_in[3], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+
+ *dst_len = src1_len + src2_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 3);
+ sg_set_buf(&sg_in[0], src1, src1_len);
+ sg_set_buf(&sg_in[1], src2, src2_len);
+ sg_set_buf(&sg_in[2], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src1_len + src2_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt2 failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[2];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[2], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+- sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+- sg_set_buf(sg_in, src, src_len);
+ sg_set_buf(&sg_out[0], dst, *dst_len);
+ sg_set_buf(&sg_out[1], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst_len)
+@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt2(const void *key, int key_len,
+@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[3];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[3], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- sg_init_table(sg_in, 1);
+- sg_set_buf(sg_in, src, src_len);
+ sg_init_table(sg_out, 3);
+ sg_set_buf(&sg_out[0], dst1, *dst1_len);
+ sg_set_buf(&sg_out[1], dst2, *dst2_len);
+ sg_set_buf(&sg_out[2], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst1_len)
+@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ dst2, *dst2_len, 1);
+ */
+
+- return 0;
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
-index 0a31298..6301eb0 100644
+index 2e87eec..6301eb0 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
@@ -100336,19 +100573,6 @@ index 0a31298..6301eb0 100644
s = addr_str[i];
switch (ss->ss_family) {
-@@ -291,7 +291,11 @@ int ceph_msgr_init(void)
- if (ceph_msgr_slab_init())
- return -ENOMEM;
-
-- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
-+ /*
-+ * The number of active work items is limited by the number of
-+ * connections, so leave @max_active at default.
-+ */
-+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
- if (ceph_msgr_wq)
- return 0;
-
diff --git a/net/compat.c b/net/compat.c
index cbc1a2a..ab7644e 100644
--- a/net/compat.c
@@ -100578,7 +100802,7 @@ index 3ed11a5..c177c8f 100644
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
-index cf999e0..c59a975 100644
+index cf999e0..c59a9754 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
@@ -101468,7 +101692,7 @@ index c7539e2..b455e51 100644
break;
case NETDEV_DOWN:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index 9d43468..ffa28cc 100644
+index 017fa5e..d61ebac 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
@@ -101481,7 +101705,7 @@ index 9d43468..ffa28cc 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index 2d24f29..70fee98 100644
+index 8c8493e..d5214a4 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -56,13 +56,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
@@ -101664,43 +101888,6 @@ index 3d4da2c..40f9c29 100644
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0);
}
-diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
-index ed88d78..844323b 100644
---- a/net/ipv4/ip_output.c
-+++ b/net/ipv4/ip_output.c
-@@ -1487,6 +1487,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- struct sk_buff *nskb;
- struct sock *sk;
- struct inet_sock *inet;
-+ int err;
-
- if (ip_options_echo(&replyopts.opt.opt, skb))
- return;
-@@ -1525,8 +1526,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- sock_net_set(sk, net);
- __skb_queue_head_init(&sk->sk_write_queue);
- sk->sk_sndbuf = sysctl_wmem_default;
-- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
-- &ipc, &rt, MSG_DONTWAIT);
-+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
-+ len, 0, &ipc, &rt, MSG_DONTWAIT);
-+ if (unlikely(err)) {
-+ ip_flush_pending_frames(sk);
-+ goto out;
-+ }
-+
- nskb = skb_peek(&sk->sk_write_queue);
- if (nskb) {
- if (arg->csumoffset >= 0)
-@@ -1538,7 +1544,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
- ip_push_pending_frames(sk, &fl4);
- }
--
-+out:
- put_cpu_var(unicast_sock);
-
- ip_rt_put(rt);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 580dd96..9fcef7e 100644
--- a/net/ipv4/ip_sockglue.c
@@ -101724,24 +101911,6 @@ index 580dd96..9fcef7e 100644
msg.msg_controllen = len;
msg.msg_flags = flags;
-diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
-index 65b664d..791a419 100644
---- a/net/ipv4/ip_tunnel_core.c
-+++ b/net/ipv4/ip_tunnel_core.c
-@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
- skb_pull_rcsum(skb, hdr_len);
-
- if (inner_proto == htons(ETH_P_TEB)) {
-- struct ethhdr *eh = (struct ethhdr *)skb->data;
-+ struct ethhdr *eh;
-
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- return -ENOMEM;
-
-+ eh = (struct ethhdr *)skb->data;
- if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
- skb->protocol = eh->h_proto;
- else
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e4a8f76..dd8ad72 100644
--- a/net/ipv4/ip_vti.c
@@ -103772,7 +103941,7 @@ index d478b88..8c8d157 100644
suspend:
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
-index 22b223f..ab70070 100644
+index 74350c3..512e9f5 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
@@ -104415,7 +104584,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index c375d73..d4abd23 100644
+index 7c177bc..d4abd23 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk)
@@ -104427,15 +104596,6 @@ index c375d73..d4abd23 100644
}
static void netlink_rcv_wake(struct sock *sk)
-@@ -707,7 +707,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
- * after validation, the socket and the ring may only be used by a
- * single process, otherwise we fall back to copying.
- */
-- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
-+ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
- atomic_read(&nlk->mapped) > 1)
- excl = false;
-
@@ -3003,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb_running,
@@ -105053,6 +105213,87 @@ index f226709..0e735a8 100644
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 5d97d8f..d477d47 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1627,6 +1627,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ * ack chunk whose serial number matches that of the request.
+ */
+ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
++ if (sctp_chunk_pending(ack))
++ continue;
+ if (ack->subh.addip_hdr->serial == serial) {
+ sctp_chunk_hold(ack);
+ return ack;
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 0e85291..fb7976a 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
+ list_add(&cur_key->key_list, sh_keys);
+
+ cur_key->key = key;
+- sctp_auth_key_hold(key);
+-
+ return 0;
+ nomem:
+ if (!replace)
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 4de12af..7e8a16c 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ } else {
+ /* Nothing to do. Next chunk in the packet, please. */
+ ch = (sctp_chunkhdr_t *) chunk->chunk_end;
+-
+ /* Force chunk->skb->data to chunk->chunk_end. */
+- skb_pull(chunk->skb,
+- chunk->chunk_end - chunk->skb->data);
+-
+- /* Verify that we have at least chunk headers
+- * worth of buffer left.
+- */
+- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+- }
++ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
++ /* We are guaranteed to pull a SCTP header. */
+ }
+ }
+
+@@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ chunk->subh.v = NULL; /* Subheader is no longer valid. */
+
+- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++ skb_tail_pointer(chunk->skb)) {
+ /* This is not a singleton */
+ chunk->singleton = 0;
+ } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+- /* RFC 2960, Section 6.10 Bundling
+- *
+- * Partial chunks MUST NOT be placed in an SCTP packet.
+- * If the receiver detects a partial chunk, it MUST drop
+- * the chunk.
+- *
+- * Since the end of the chunk is past the end of our buffer
+- * (which contains the whole packet, we can freely discard
+- * the whole packet.
+- */
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+-
+- return NULL;
++ /* Discard inside state machine. */
++ chunk->pdiscard = 1;
++ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ } else {
+ /* We are at the end of the packet, so mark the chunk
+ * in case we need to send a SACK.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2b1738e..a9d0fc9 100644
--- a/net/sctp/ipv6.c
@@ -105127,6 +105368,182 @@ index a62a215..0976540 100644
}
static int sctp_v4_protosw_init(void)
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index fee5552..43abb64 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2609,6 +2609,9 @@ do_addr_param:
+ addr_param = param.v + sizeof(sctp_addip_param_t);
+
+ af = sctp_get_af_specific(param_type2af(param.p->type));
++ if (af == NULL)
++ break;
++
+ af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0);
+
+@@ -3110,50 +3113,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ return SCTP_ERROR_NO_ERROR;
+ }
+
+-/* Verify the ASCONF packet before we process it. */
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp) {
+- sctp_addip_param_t *asconf_param;
++/* Verify the ASCONF packet before we process it. */
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp)
++{
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
+ union sctp_params param;
+- int length, plen;
++ bool addr_param_seen = false;
++
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ size_t length = ntohs(param.p->length);
+
+- param.v = (sctp_paramhdr_t *) param_hdr;
+- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+- length = ntohs(param.p->length);
+ *errp = param.p;
+-
+- if (param.v > chunk_end - length ||
+- length < sizeof(sctp_paramhdr_t))
+- return 0;
+-
+ switch (param.p->type) {
++ case SCTP_PARAM_ERR_CAUSE:
++ break;
++ case SCTP_PARAM_IPV4_ADDRESS:
++ if (length != sizeof(sctp_ipv4addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
++ case SCTP_PARAM_IPV6_ADDRESS:
++ if (length != sizeof(sctp_ipv6addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+- asconf_param = (sctp_addip_param_t *)param.v;
+- plen = ntohs(asconf_param->param_hdr.length);
+- if (plen < sizeof(sctp_addip_param_t) +
+- sizeof(sctp_paramhdr_t))
+- return 0;
++ /* In ASCONF chunks, these need to be first. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ length = ntohs(param.addip->param_hdr.length);
++ if (length < sizeof(sctp_addip_param_t) +
++ sizeof(sctp_paramhdr_t))
++ return false;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(sctp_addip_param_t))
+- return 0;
+-
++ return false;
+ break;
+ default:
+- break;
++ /* This is unkown to us, reject! */
++ return false;
+ }
+-
+- param.v += WORD_ROUND(length);
+ }
+
+- if (param.v != chunk_end)
+- return 0;
++ /* Remaining sanity checks. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ if (!addr_param_needed && addr_param_seen)
++ return false;
++ if (param.v != chunk->chunk_end)
++ return false;
+
+- return 1;
++ return true;
+ }
+
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
+@@ -3162,16 +3178,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf)
+ {
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
++ bool all_param_pass = true;
++ union sctp_params param;
+ sctp_addiphdr_t *hdr;
+ union sctp_addr_param *addr_param;
+ sctp_addip_param_t *asconf_param;
+ struct sctp_chunk *asconf_ack;
+-
+ __be16 err_code;
+ int length = 0;
+ int chunk_len;
+ __u32 serial;
+- int all_param_pass = 1;
+
+ chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
+ hdr = (sctp_addiphdr_t *)asconf->skb->data;
+@@ -3199,9 +3216,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ goto done;
+
+ /* Process the TLVs contained within the ASCONF chunk. */
+- while (chunk_len > 0) {
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ /* Skip preceeding address parameters. */
++ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
++ continue;
++
+ err_code = sctp_process_asconf_param(asoc, asconf,
+- asconf_param);
++ param.addip);
+ /* ADDIP 4.1 A7)
+ * If an error response is received for a TLV parameter,
+ * all TLVs with no response before the failed TLV are
+@@ -3209,28 +3231,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ * the failed response are considered unsuccessful unless
+ * a specific success indication is present for the parameter.
+ */
+- if (SCTP_ERROR_NO_ERROR != err_code)
+- all_param_pass = 0;
+-
++ if (err_code != SCTP_ERROR_NO_ERROR)
++ all_param_pass = false;
+ if (!all_param_pass)
+- sctp_add_asconf_response(asconf_ack,
+- asconf_param->crr_id, err_code,
+- asconf_param);
++ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
++ err_code, param.addip);
+
+ /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ * an IP address sends an 'Out of Resource' in its response, it
+ * MUST also fail any subsequent add or delete requests bundled
+ * in the ASCONF.
+ */
+- if (SCTP_ERROR_RSRC_LOW == err_code)
++ if (err_code == SCTP_ERROR_RSRC_LOW)
+ goto done;
+-
+- /* Move to the next ASCONF param. */
+- length = ntohs(asconf_param->param_hdr.length);
+- asconf_param = (void *)asconf_param + length;
+- chunk_len -= length;
+ }
+-
+ done:
+ asoc->peer.addip_serial++;
+
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fef2acd..c705c4f 100644
--- a/net/sctp/sm_sideeffect.c
@@ -105140,6 +105557,61 @@ index fef2acd..c705c4f 100644
NULL,
sctp_generate_t1_cookie_event,
sctp_generate_t1_init_event,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 7194fe85..3e287a3 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
+ {
+ __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+
++ /* Previously already marked? */
++ if (unlikely(chunk->pdiscard))
++ return 0;
+ if (unlikely(chunk_length < required_length))
+ return 0;
+
+@@ -3591,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_paramhdr *err_param = NULL;
+ sctp_addiphdr_t *hdr;
+- union sctp_addr_param *addr_param;
+ __u32 serial;
+- int length;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+@@ -3618,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ hdr = (sctp_addiphdr_t *)chunk->skb->data;
+ serial = ntohl(hdr->serial);
+
+- addr_param = (union sctp_addr_param *)hdr->params;
+- length = ntohs(addr_param->p.length);
+- if (length < sizeof(sctp_paramhdr_t))
+- return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+- (void *)addr_param, commands);
+-
+ /* Verify the ASCONF chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)((void *)addr_param + length),
+- (void *)chunk->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+@@ -3745,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+ rcvd_serial = ntohl(addip_hdr->serial);
+
+ /* Verify the ASCONF-ACK chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)addip_hdr->params,
+- (void *)asconf_ack->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 604a6ac..f87f0a3 100644
--- a/net/sctp/socket.c
@@ -105597,10 +106069,10 @@ index ae333c1..18521f0 100644
goto out_nomem;
cd->u.procfs.channel_ent = NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 3ea5cda..bfb3e08 100644
+index 5ff8b87..35af642 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
-@@ -1415,7 +1415,9 @@ call_start(struct rpc_task *task)
+@@ -1418,7 +1418,9 @@ call_start(struct rpc_task *task)
(RPC_IS_ASYNC(task) ? "async" : "sync"));
/* Increment call count */
@@ -106787,6 +107259,45 @@ index 152d4d2..791684c 100644
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 1395760..e4f4ac4 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -82,6 +82,16 @@ echo ""
+ fi
+
+ echo "%install"
++echo 'chmod -f 0500 /boot'
++echo 'if [ -d /lib/modules ]; then'
++echo 'chmod -f 0500 /lib/modules'
++echo 'fi'
++echo 'if [ -d /lib32/modules ]; then'
++echo 'chmod -f 0500 /lib32/modules'
++echo 'fi'
++echo 'if [ -d /lib64/modules ]; then'
++echo 'chmod -f 0500 /lib64/modules'
++echo 'fi'
+ echo 'KBUILD_IMAGE=$(make image_name)'
+ echo "%ifarch ia64"
+ echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
+@@ -139,7 +149,7 @@ echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm
+ echo "fi"
+ echo ""
+ echo "%files"
+-echo '%defattr (-, root, root)'
++echo '%defattr (400, root, root, 500)'
+ echo "%dir /lib/modules"
+ echo "/lib/modules/$KERNELRELEASE"
+ echo "%exclude /lib/modules/$KERNELRELEASE/build"
+@@ -152,7 +162,7 @@ echo '%defattr (-, root, root)'
+ echo "/usr/include"
+ echo ""
+ echo "%files devel"
+-echo '%defattr (-, root, root)'
++echo '%defattr (400, root, root, 500)'
+ echo "/usr/src/kernels/$KERNELRELEASE"
+ echo "/lib/modules/$KERNELRELEASE/build"
+ echo "/lib/modules/$KERNELRELEASE/source"
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 68bb4ef..2f419e1 100644
--- a/scripts/pnmtologo.c
@@ -108183,7 +108694,7 @@ index fc3e662..7844c60 100644
lock = &avc_cache.slots_lock[hvalue];
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index e294b86..4fc9b7f 100644
+index 47b5c69..4fc9b7f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -95,8 +95,6 @@
@@ -108195,22 +108706,6 @@ index e294b86..4fc9b7f 100644
/* SECMARK reference count */
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
-@@ -470,6 +468,7 @@ next_inode:
- list_entry(sbsec->isec_head.next,
- struct inode_security_struct, list);
- struct inode *inode = isec->inode;
-+ list_del_init(&isec->list);
- spin_unlock(&sbsec->isec_lock);
- inode = igrab(inode);
- if (inode) {
-@@ -478,7 +477,6 @@ next_inode:
- iput(inode);
- }
- spin_lock(&sbsec->isec_lock);
-- list_del_init(&isec->list);
- goto next_inode;
- }
- spin_unlock(&sbsec->isec_lock);
@@ -5759,7 +5757,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
#endif
@@ -108613,7 +109108,7 @@ index 4c1cc51..16040040 100644
}
} else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
-index af49721..e85058e 100644
+index c4ac3c1..5266261 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
@@ -116973,10 +117468,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..2f37382
+index 0000000..d14887a6
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5996 @@
+@@ -0,0 +1,6033 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -117343,6 +117838,7 @@ index 0000000..2f37382
+userspace_status_4004 userspace_status 4 4004 NULL
+xfs_check_block_4005 xfs_check_block 4 4005 NULL nohasharray
+mei_write_4005 mei_write 3 4005 &xfs_check_block_4005
++gfs2_dir_get_existing_buffer_4007 gfs2_dir_get_existing_buffer 0 4007 NULL
+snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
+blk_end_request_4024 blk_end_request 3 4024 NULL
+ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
@@ -117350,6 +117846,7 @@ index 0000000..2f37382
+mtip_hw_read_registers_4037 mtip_hw_read_registers 3 4037 NULL
+read_file_queues_4078 read_file_queues 3 4078 NULL
+fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
++C_SYSC_rt_sigpending_4114 C_SYSC_rt_sigpending 2 4114 NULL
+tm6000_read_4151 tm6000_read 3 4151 NULL
+mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
+msg_bits_4158 msg_bits 0-3-4 4158 NULL
@@ -117490,6 +117987,7 @@ index 0000000..2f37382
+ll_statahead_one_5962 ll_statahead_one 3 5962 NULL
+__apu_get_register_5967 __apu_get_register 0 5967 NULL
+ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
++SyS_semop_5980 SyS_semop 3 5980 NULL
+alloc_msg_6072 alloc_msg 1 6072 NULL
+sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
+rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
@@ -117521,6 +118019,7 @@ index 0000000..2f37382
+mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL
+_proc_do_string_6376 _proc_do_string 2 6376 NULL
+osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
++gfs2_dir_read_stuffed_6380 gfs2_dir_read_stuffed 3 6380 NULL
+xfs_bmap_extents_to_btree_6387 xfs_bmap_extents_to_btree 0 6387 NULL
+posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
+add_transaction_credits_6422 add_transaction_credits 2-3 6422 NULL
@@ -117538,6 +118037,7 @@ index 0000000..2f37382
+SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
+xfs_iozero_6573 xfs_iozero 0 6573 NULL
+ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
++xfs_do_div_6649 xfs_do_div 0-2 6649 NULL
+process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
+btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
+ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
@@ -117568,6 +118068,7 @@ index 0000000..2f37382
+acm_alloc_minor_6911 acm_alloc_minor 0 6911 &spi_show_regs_6911
+__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
+lops_scan_elements_6916 lops_scan_elements 0 6916 NULL
++do_msgrcv_6921 do_msgrcv 3 6921 NULL
+cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
+ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
+qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
@@ -117661,6 +118162,7 @@ index 0000000..2f37382
+qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 NULL
+venus_lookup_8121 venus_lookup 4 8121 NULL
+ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
++xfs_file_fallocate_8150 xfs_file_fallocate 3-4 8150 NULL
+__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
+ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
+recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
@@ -118152,6 +118654,7 @@ index 0000000..2f37382
+biovec_create_pool_13079 biovec_create_pool 2 13079 NULL
+xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
+ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL
++SyS_msgrcv_13109 SyS_msgrcv 3 13109 NULL
+snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
+bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
+blk_update_request_13146 blk_update_request 3 13146 NULL
@@ -118191,6 +118694,7 @@ index 0000000..2f37382
+sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL
+data_read_13494 data_read 3 13494 NULL
+ioat_chansts_32_13506 ioat_chansts_32 0 13506 NULL
++ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 0-2 13512 NULL
+core_status_13515 core_status 4 13515 NULL
+smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
+bm_init_13529 bm_init 2 13529 NULL
@@ -118207,6 +118711,7 @@ index 0000000..2f37382
+blk_msg_write_13655 blk_msg_write 3 13655 NULL
+cache_downcall_13666 cache_downcall 3 13666 NULL
+ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0 13682 NULL
++nv94_aux_13689 nv94_aux 2-5 13689 NULL
+usb_get_string_13693 usb_get_string 0 13693 NULL
+fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
+audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
@@ -118355,6 +118860,7 @@ index 0000000..2f37382
+smscore_load_firmware_family2_15086 smscore_load_firmware_family2 3 15086 NULL
+xfs_btree_insrec_15090 xfs_btree_insrec 0 15090 NULL
+btrfs_readpage_15094 btrfs_readpage 0 15094 NULL
++compat_SyS_pwritev_15118 compat_SyS_pwritev 3 15118 NULL
+hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
+start_port_15124 start_port 0 15124 NULL
+ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
@@ -118687,6 +119193,7 @@ index 0000000..2f37382
+ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
+SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL
+alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
++prealloc_18800 prealloc 0 18800 NULL
+dm_stats_print_18815 dm_stats_print 7 18815 NULL
+sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
+mtf_test_write_18844 mtf_test_write 3 18844 NULL
@@ -118920,6 +119427,7 @@ index 0000000..2f37382
+use_debug_keys_read_21251 use_debug_keys_read 3 21251 NULL
+fru_length_21257 fru_length 0 21257 NULL
+rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL
++ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
+xfs_alloc_ag_vextent_size_21276 xfs_alloc_ag_vextent_size 0 21276 NULL
+do_msg_fill_21307 do_msg_fill 3 21307 NULL
+add_res_range_21310 add_res_range 4 21310 NULL
@@ -118954,6 +119462,7 @@ index 0000000..2f37382
+ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
+filemap_get_page_21606 filemap_get_page 2 21606 NULL
+gfs2_glock_nq_init_21624 gfs2_glock_nq_init 0 21624 NULL
++ocfs2_refcount_cow_hunk_21630 ocfs2_refcount_cow_hunk 3-4 21630 NULL
+__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
+atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
+ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
@@ -119011,6 +119520,7 @@ index 0000000..2f37382
+mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
+lov_setstripe_22307 lov_setstripe 2 22307 NULL
+udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
++C_SYSC_msgrcv_22320 C_SYSC_msgrcv 3 22320 NULL
+atomic_read_22342 atomic_read 0 22342 NULL
+ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL
+snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
@@ -119035,6 +119545,7 @@ index 0000000..2f37382
+wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
+pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
+iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
++compat_SyS_msgrcv_22661 compat_SyS_msgrcv 3 22661 NULL
+ext4_ext_direct_IO_22679 ext4_ext_direct_IO 4 22679 NULL
+l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
+bch_dump_read_22685 bch_dump_read 3 22685 NULL
@@ -119072,7 +119583,7 @@ index 0000000..2f37382
+remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
+viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
+cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
-+ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0 23029 NULL
++ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0-6-7 23029 NULL
+st_status_23032 st_status 5 23032 NULL
+nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
+comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL
@@ -119209,6 +119720,7 @@ index 0000000..2f37382
+reserve_metadata_bytes_24313 reserve_metadata_bytes 0 24313 NULL
+ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
+si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL
++C_SYSC_pwritev_24345 C_SYSC_pwritev 3 24345 NULL
+prepare_pages_24349 prepare_pages 0 24349 NULL
+kzalloc_node_24352 kzalloc_node 1 24352 NULL
+qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
@@ -119255,6 +119767,7 @@ index 0000000..2f37382
+simple_attr_read_24738 simple_attr_read 3 24738 NULL
+qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
+get_dma_residue_24749 get_dma_residue 0 24749 NULL
++ocfs2_cow_file_pos_24751 ocfs2_cow_file_pos 3 24751 NULL
+kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
+ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
+datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
@@ -119471,7 +119984,7 @@ index 0000000..2f37382
+seq_read_27411 seq_read 3 27411 NULL
+ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
+ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
-+ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0 27422 NULL
++ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0-3-4 27422 NULL
+cypress_write_27423 cypress_write 4 27423 NULL
+sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
+xfs_btree_lookup_get_block_27448 xfs_btree_lookup_get_block 0 27448 NULL
@@ -119669,7 +120182,7 @@ index 0000000..2f37382
+add_to_page_cache_lru_29534 add_to_page_cache_lru 0 29534 NULL
+ftrace_write_29551 ftrace_write 3 29551 NULL
+idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
-+leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
++leaf_dealloc_29566 leaf_dealloc 3-2 29566 NULL
+kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
+lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
+security_path_chmod_29578 security_path_chmod 0 29578 NULL
@@ -119728,6 +120241,7 @@ index 0000000..2f37382
+__genwqe_readq_30197 __genwqe_readq 0 30197 NULL
+usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
+read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
++SyS_semop_30227 SyS_semop 3 30227 NULL
+bitmap_file_set_bit_30228 bitmap_file_set_bit 2 30228 NULL
+shmem_unuse_inode_30263 shmem_unuse_inode 0 30263 NULL
+rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
@@ -119770,6 +120284,7 @@ index 0000000..2f37382
+set_le_30581 set_le 4 30581 NULL
+blk_init_tags_30592 blk_init_tags 1 30592 NULL
+sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
++SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
+macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
+ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL
+compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
@@ -119969,6 +120484,7 @@ index 0000000..2f37382
+generic_readlink_32654 generic_readlink 3 32654 NULL
+move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
+apei_res_add_32674 apei_res_add 0 32674 NULL
++compat_SyS_preadv_32679 compat_SyS_preadv 3 32679 NULL
+jfs_readpages_32702 jfs_readpages 4 32702 NULL
+xfs_filestream_new_ag_32711 xfs_filestream_new_ag 0 32711 NULL
+rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
@@ -120236,6 +120752,7 @@ index 0000000..2f37382
+ptlrpcd_steal_rqset_35637 ptlrpcd_steal_rqset 0 35637 NULL
+spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
+rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
++compat_sys_kexec_load_35674 compat_sys_kexec_load 2 35674 NULL
+SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL
+rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
+md_super_write_35703 md_super_write 4 35703 NULL
@@ -120490,7 +121007,8 @@ index 0000000..2f37382
+_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
+xfs_qm_dqrepair_38262 xfs_qm_dqrepair 0 38262 NULL
+mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray
-+ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268
++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268 nohasharray
++SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268
+xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
+xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
+ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
@@ -120499,6 +121017,7 @@ index 0000000..2f37382
+ida_simple_get_38326 ida_simple_get 0 38326 NULL
+__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
+btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
++xfs_free_file_space_38383 xfs_free_file_space 2-3 38383 NULL
+dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
+ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
+pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
@@ -120575,6 +121094,7 @@ index 0000000..2f37382
+insert_reserved_file_extent_39327 insert_reserved_file_extent 3 39327 NULL
+wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
+ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
++gfs2_dir_write_data_39357 gfs2_dir_write_data 3-4 39357 NULL
+do_write_log_from_user_39362 do_write_log_from_user 3-0 39362 NULL
+vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
+regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
@@ -120654,7 +121174,7 @@ index 0000000..2f37382
+compress_file_range_40225 compress_file_range 3-4 40225 NULL
+osst_read_40237 osst_read 3 40237 NULL
+lpage_info_slot_40243 lpage_info_slot 3-1 40243 NULL
-+ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
++ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4-3 40248 NULL
+rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
+ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
+usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
@@ -120913,6 +121433,7 @@ index 0000000..2f37382
+ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
+calculate_node_totalpages_43118 calculate_node_totalpages 2-3 43118 NULL
+read_file_dfs_43145 read_file_dfs 3 43145 NULL
++gfs2_dir_write_stuffed_43147 gfs2_dir_write_stuffed 0-4 43147 NULL
+cfs_cpt_table_alloc_43159 cfs_cpt_table_alloc 1 43159 NULL
+usb_string_sub_43164 usb_string_sub 0 43164 NULL
+il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
@@ -120938,6 +121459,7 @@ index 0000000..2f37382
+gfs2_rgrp_bh_get_43375 gfs2_rgrp_bh_get 0 43375 NULL
+xfs_btree_new_iroot_43392 xfs_btree_new_iroot 0 43392 NULL
+xenfb_write_43412 xenfb_write 3 43412 NULL
++ext4_xattr_check_names_43422 ext4_xattr_check_names 0 43422 NULL
+__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
+usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
+cifs_writev_43437 cifs_writev 4 43437 NULL
@@ -121022,6 +121544,7 @@ index 0000000..2f37382
+radix_tree_maybe_preload_44346 radix_tree_maybe_preload 0 44346 NULL
+blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 NULL nohasharray
+nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 &blk_queue_init_tags_44355
++alloc_requests_44372 alloc_requests 0 44372 NULL
+rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
+mtip_hw_read_flags_44396 mtip_hw_read_flags 3 44396 NULL
+aoedev_flush_44398 aoedev_flush 2 44398 NULL
@@ -121092,7 +121615,7 @@ index 0000000..2f37382
+cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL
+gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
+device_write_45156 device_write 3 45156 NULL nohasharray
-+ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3 45156 &device_write_45156
++ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 &device_write_45156
+tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
+sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
+snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray
@@ -121278,6 +121801,7 @@ index 0000000..2f37382
+ablkcipher_next_slow_47274 ablkcipher_next_slow 4-3 47274 NULL
+gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
+vsnprintf_47291 vsnprintf 0 47291 NULL
++SYSC_semop_47292 SYSC_semop 3 47292 NULL
+tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
+xfs_trans_reserve_quota_nblks_47313 xfs_trans_reserve_quota_nblks 0 47313 NULL
+nouveau_fb_create__47316 nouveau_fb_create_ 4 47316 NULL
@@ -121393,6 +121917,7 @@ index 0000000..2f37382
+compat_SyS_preadv64_48469 compat_SyS_preadv64 3 48469 NULL
+ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
+r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
++ocfs2_refcount_cow_48495 ocfs2_refcount_cow 3 48495 NULL
+send_control_msg_48498 send_control_msg 6 48498 NULL
+count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
+diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
@@ -122051,6 +122576,7 @@ index 0000000..2f37382
+gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
+wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
+qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 NULL
++gfs2_dir_read_data_55327 gfs2_dir_read_data 3 55327 NULL
+__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL
+vme_user_read_55338 vme_user_read 3 55338 NULL
+__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 NULL nohasharray
@@ -122317,6 +122843,7 @@ index 0000000..2f37382
+key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
+ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
+ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
++do_rx_dma_57996 do_rx_dma 5 57996 NULL
+rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
+iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
+io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
@@ -122475,6 +123002,7 @@ index 0000000..2f37382
+cap_inode_need_killpriv_59766 cap_inode_need_killpriv 0 59766 &long_retry_limit_read_59766
+venus_remove_59781 venus_remove 4 59781 NULL
+mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL
++C_SYSC_preadv_59801 C_SYSC_preadv 3 59801 NULL
+ipw_write_59807 ipw_write 3 59807 NULL
+scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
+ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
@@ -122612,6 +123140,7 @@ index 0000000..2f37382
+f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
+debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
+system_enable_write_61396 system_enable_write 3 61396 NULL
++xfs_zero_remaining_bytes_61423 xfs_zero_remaining_bytes 3 61423 NULL
+unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
+snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
+btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
@@ -122667,6 +123196,7 @@ index 0000000..2f37382
+il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
+squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
+fix_read_error_61965 fix_read_error 4 61965 NULL
++ocfs2_quota_write_61972 ocfs2_quota_write 4-5 61972 NULL
+fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
+cow_file_range_61979 cow_file_range 3 61979 NULL
+set_extent_delalloc_61982 set_extent_delalloc 0 61982 NULL
@@ -122722,6 +123252,7 @@ index 0000000..2f37382
+link_send_sections_long_62557 link_send_sections_long 3 62557 NULL
+compute_bitstructs_62570 compute_bitstructs 0 62570 NULL
+xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
++compat_SyS_rt_sigpending_62580 compat_SyS_rt_sigpending 2 62580 NULL
+get_subdir_62581 get_subdir 3 62581 NULL
+nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL
+tipc_port_recv_sections_62609 tipc_port_recv_sections 3 62609 NULL
@@ -122814,6 +123345,7 @@ index 0000000..2f37382
+spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
+mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
+copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
++prepare_copy_63826 prepare_copy 2 63826 NULL
+sel_write_load_63830 sel_write_load 3 63830 NULL
+ll_readlink_63836 ll_readlink 3 63836 NULL
+proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
@@ -124442,44 +124974,6 @@ index 0a578fe..b81f62d 100644
0; \
})
-diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
-index 714b949..1f0dc1e 100644
---- a/virt/kvm/iommu.c
-+++ b/virt/kvm/iommu.c
-@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages);
-
- static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
-- unsigned long size)
-+ unsigned long npages)
- {
- gfn_t end_gfn;
- pfn_t pfn;
-
- pfn = gfn_to_pfn_memslot(slot, gfn);
-- end_gfn = gfn + (size >> PAGE_SHIFT);
-+ end_gfn = gfn + npages;
- gfn += 1;
-
- if (is_error_noslot_pfn(pfn))
-@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
- * Pin all pages we are about to map in memory. This is
- * important because we unmap and unpin in 4kb steps later.
- */
-- pfn = kvm_pin_pages(slot, gfn, page_size);
-+ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
- if (is_error_noslot_pfn(pfn)) {
- gfn += 1;
- continue;
-@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
- if (r) {
- printk(KERN_ERR "kvm_iommu_map_address:"
- "iommu failed to map pfn=%llx\n", pfn);
-- kvm_unpin_pages(kvm, pfn, page_size);
-+ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
- goto unmap_pages;
- }
-
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6611253..eb4bc0f 100644
--- a/virt/kvm/kvm_main.c
diff --git a/3.14.23/4425_grsec_remove_EI_PAX.patch b/3.14.24/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.14.23/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.24/4425_grsec_remove_EI_PAX.patch
diff --git a/3.14.23/4427_force_XATTR_PAX_tmpfs.patch b/3.14.24/4427_force_XATTR_PAX_tmpfs.patch
index dcc7fb5..dcc7fb5 100644
--- a/3.14.23/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.14.24/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.23/4430_grsec-remove-localversion-grsec.patch b/3.14.24/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.14.23/4430_grsec-remove-localversion-grsec.patch
+++ b/3.14.24/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.23/4435_grsec-mute-warnings.patch b/3.14.24/4435_grsec-mute-warnings.patch
index 392cefb..392cefb 100644
--- a/3.14.23/4435_grsec-mute-warnings.patch
+++ b/3.14.24/4435_grsec-mute-warnings.patch
diff --git a/3.14.23/4440_grsec-remove-protected-paths.patch b/3.14.24/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.14.23/4440_grsec-remove-protected-paths.patch
+++ b/3.14.24/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.23/4450_grsec-kconfig-default-gids.patch b/3.14.24/4450_grsec-kconfig-default-gids.patch
index ff7afeb..ff7afeb 100644
--- a/3.14.23/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.24/4450_grsec-kconfig-default-gids.patch
diff --git a/3.14.23/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.24/4465_selinux-avc_audit-log-curr_ip.patch
index f92c155..f92c155 100644
--- a/3.14.23/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.24/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.23/4470_disable-compat_vdso.patch b/3.14.24/4470_disable-compat_vdso.patch
index d5eed75..d5eed75 100644
--- a/3.14.23/4470_disable-compat_vdso.patch
+++ b/3.14.24/4470_disable-compat_vdso.patch
diff --git a/3.14.23/4475_emutramp_default_on.patch b/3.14.24/4475_emutramp_default_on.patch
index cf88fd9..cf88fd9 100644
--- a/3.14.23/4475_emutramp_default_on.patch
+++ b/3.14.24/4475_emutramp_default_on.patch
diff --git a/3.14.23/0000_README b/3.17.3/0000_README
index 3f5888e..854f93e 100644
--- a/3.14.23/0000_README
+++ b/3.17.3/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.23-201411062033.patch
+Patch: 1002_linux-3.17.3.patch
+From: http://www.kernel.org
+Desc: Linux 3.17.3
+
+Patch: 4420_grsecurity-3.0-3.17.3-201411150027.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.17.3/1002_linux-3.17.3.patch b/3.17.3/1002_linux-3.17.3.patch
new file mode 100644
index 0000000..b3f3caa
--- /dev/null
+++ b/3.17.3/1002_linux-3.17.3.patch
@@ -0,0 +1,11840 @@
+diff --git a/Makefile b/Makefile
+index 390afde..57a45b1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 17
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Shuffling Zombie Juror
+
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index 4f31b2e..398064c 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -20,7 +20,7 @@
+ /* this is for console on PGU */
+ /* bootargs = "console=tty0 consoleblank=0"; */
+ /* this is for console on serial */
+- bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
++ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ };
+
+ aliases {
+diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
+index 372466b..a9cbabe 100644
+--- a/arch/arc/include/asm/arcregs.h
++++ b/arch/arc/include/asm/arcregs.h
+@@ -191,14 +191,6 @@
+ #define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+ #define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+-#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+-/* These DPFP regs need to be saved/restored across ctx-sw */
+-struct arc_fpu {
+- struct {
+- unsigned int l, h;
+- } aux_dpfp[2];
+-};
+-#endif
+
+ /*
+ ***************************************************************
+diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
+index b65fca7..fea9316 100644
+--- a/arch/arc/include/asm/kgdb.h
++++ b/arch/arc/include/asm/kgdb.h
+@@ -19,7 +19,7 @@
+ * register API yet */
+ #undef DBG_MAX_REG_NUM
+
+-#define GDB_MAX_REGS 39
++#define GDB_MAX_REGS 87
+
+ #define BREAK_INSTR_SIZE 2
+ #define CACHE_FLUSH_IS_SAFE 1
+@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
+
+ extern void kgdb_trap(struct pt_regs *regs);
+
+-enum arc700_linux_regnums {
++/* This is the numbering of registers according to the GDB. See GDB's
++ * arc-tdep.h for details.
++ *
++ * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
++enum arc_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+- _BTA = 27,
+- _LP_START = 28,
+- _LP_END = 29,
+- _LP_COUNT = 30,
+- _STATUS32 = 31,
+- _BLINK = 32,
+- _FP = 33,
+- __SP = 34,
+- _EFA = 35,
+- _RET = 36,
+- _ORIG_R8 = 37,
+- _STOP_PC = 38
++ _FP = 27,
++ __SP = 28,
++ _R30 = 30,
++ _BLINK = 31,
++ _LP_COUNT = 60,
++ _STOP_PC = 64,
++ _RET = 64,
++ _LP_START = 65,
++ _LP_END = 66,
++ _STATUS32 = 67,
++ _ECR = 76,
++ _BTA = 82,
+ };
+
+ #else
+diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
+index 82588f3..38175c0 100644
+--- a/arch/arc/include/asm/processor.h
++++ b/arch/arc/include/asm/processor.h
+@@ -20,6 +20,15 @@
+
+ #include <asm/ptrace.h>
+
++#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
++/* These DPFP regs need to be saved/restored across ctx-sw */
++struct arc_fpu {
++ struct {
++ unsigned int l, h;
++ } aux_dpfp[2];
++};
++#endif
++
+ /* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index b11ad54..2f78e54 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -1142,7 +1142,7 @@ config DEBUG_UART_VIRT
+ default 0xf1c28000 if DEBUG_SUNXI_UART0
+ default 0xf1c28400 if DEBUG_SUNXI_UART1
+ default 0xf1f02800 if DEBUG_SUNXI_R_UART
+- default 0xf2100000 if DEBUG_PXA_UART1
++ default 0xf6200000 if DEBUG_PXA_UART1
+ default 0xf4090000 if ARCH_LPC32XX
+ default 0xf4200000 if ARCH_GEMINI
+ default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \
+diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
+index 41afd9d..229140b 100644
+--- a/arch/arm/boot/dts/zynq-parallella.dts
++++ b/arch/arm/boot/dts/zynq-parallella.dts
+@@ -34,6 +34,10 @@
+ };
+ };
+
++&clkc {
++ fclk-enable = <0xf>;
++};
++
+ &gem0 {
+ status = "okay";
+ phy-mode = "rgmii-id";
+diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
+index bbf9df3..d28fe29 100644
+--- a/arch/arm/mach-pxa/include/mach/addr-map.h
++++ b/arch/arm/mach-pxa/include/mach/addr-map.h
+@@ -39,6 +39,11 @@
+ #define DMEMC_SIZE 0x00100000
+
+ /*
++ * Reserved space for low level debug virtual addresses within
++ * 0xf6200000..0xf6201000
++ */
++
++/*
+ * Internal Memory Controller (PXA27x and later)
+ */
+ #define IMEMC_PHYS 0x58000000
+diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
+index 992aaba..b463f2a 100644
+--- a/arch/mips/include/asm/ftrace.h
++++ b/arch/mips/include/asm/ftrace.h
+@@ -24,7 +24,7 @@ do { \
+ asm volatile ( \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
+- "2:\n" \
++ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+@@ -46,7 +46,7 @@ do { \
+ asm volatile ( \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
+- "2:\n" \
++ "2: .insn\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[tmp_err], 1\n" \
+diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
+index bbcfb8b..91a3d19 100644
+--- a/arch/mips/include/uapi/asm/ptrace.h
++++ b/arch/mips/include/uapi/asm/ptrace.h
+@@ -9,6 +9,8 @@
+ #ifndef _UAPI_ASM_PTRACE_H
+ #define _UAPI_ASM_PTRACE_H
+
++#include <linux/types.h>
++
+ /* 0 - 31 are integer registers, 32 - 63 are fp registers. */
+ #define FPR_BASE 32
+ #define PC 64
+diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
+index a217061..462e34d 100644
+--- a/arch/mips/loongson/lemote-2f/clock.c
++++ b/arch/mips/loongson/lemote-2f/clock.c
+@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
+
+ int clk_set_rate(struct clk *clk, unsigned long rate)
+ {
++ unsigned int rate_khz = rate / 1000;
+ struct cpufreq_frequency_table *pos;
+ int ret = 0;
+ int regval;
+@@ -107,9 +108,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
+ propagate_rate(clk);
+
+ cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
+- if (rate == pos->frequency)
++ if (rate_khz == pos->frequency)
+ break;
+- if (rate != pos->frequency)
++ if (rate_khz != pos->frequency)
+ return -ENOTSUPP;
+
+ clk->rate = rate;
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 7a47277..51a0fde 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1023,7 +1023,7 @@ emul:
+ goto emul;
+
+ case cop1x_op:
+- if (cpu_has_mips_4_5 || cpu_has_mips64)
++ if (cpu_has_mips_4_5 || cpu_has_mips64 || cpu_has_mips32r2)
+ /* its one of ours */
+ goto emul;
+
+@@ -1068,7 +1068,7 @@ emul:
+ break;
+
+ case cop1x_op:
+- if (!cpu_has_mips_4_5 && !cpu_has_mips64)
++ if (!cpu_has_mips_4_5 && !cpu_has_mips64 && !cpu_has_mips32r2)
+ return SIGILL;
+
+ sig = fpux_emu(xcp, ctx, ir, fault_addr);
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index a08dd53..b5f228e 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1062,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+ struct mips_huge_tlb_info {
+ int huge_pte;
+ int restore_scratch;
++ bool need_reload_pte;
+ };
+
+ static struct mips_huge_tlb_info
+@@ -1076,6 +1077,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
+
+ rv.huge_pte = scratch;
+ rv.restore_scratch = 0;
++ rv.need_reload_pte = false;
+
+ if (check_for_high_segbits) {
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+@@ -1264,6 +1266,7 @@ static void build_r4000_tlb_refill_handler(void)
+ } else {
+ htlb_info.huge_pte = K0;
+ htlb_info.restore_scratch = 0;
++ htlb_info.need_reload_pte = true;
+ vmalloc_mode = refill_noscratch;
+ /*
+ * create the plain linear handler
+@@ -1300,7 +1303,8 @@ static void build_r4000_tlb_refill_handler(void)
+ }
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ uasm_l_tlb_huge_update(&l, p);
+- UASM_i_LW(&p, K0, 0, K1);
++ if (htlb_info.need_reload_pte)
++ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
+ build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ htlb_info.restore_scratch);
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 5bbd1bc..0905c8d 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
+ 3:
+ #endif
+ bl save_nvgprs
++ /*
++ * Use a non volatile GPR to save and restore our thread_info flags
++ * across the call to restore_interrupts.
++ */
++ mr r30,r4
+ bl restore_interrupts
++ mr r4,r30
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_notify_resume
+ b ret_from_except
+diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
+index ad4b31d..e4169d6 100644
+--- a/arch/powerpc/platforms/powernv/opal-lpc.c
++++ b/arch/powerpc/platforms/powernv/opal-lpc.c
+@@ -216,14 +216,54 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
+ &data, len);
+ if (rc)
+ return -ENXIO;
++
++ /*
++ * Now there is some trickery with the data returned by OPAL
++ * as it's the desired data right justified in a 32-bit BE
++ * word.
++ *
++ * This is a very bad interface and I'm to blame for it :-(
++ *
++ * So we can't just apply a 32-bit swap to what comes from OPAL,
++ * because user space expects the *bytes* to be in their proper
++ * respective positions (ie, LPC position).
++ *
++ * So what we really want to do here is to shift data right
++ * appropriately on a LE kernel.
++ *
++ * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that
++ * order, we have in memory written to by OPAL at the "data"
++ * pointer:
++ *
++ * Bytes: OPAL "data" LE "data"
++ * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0
++ * 16-bit: B0 B1 0000B0B1 B1B00000
++ * 8-bit: B0 000000B0 B0000000
++ *
++ * So a BE kernel will have the leftmost of the above in the MSB
++ * and rightmost in the LSB and can just then "cast" the u32 "data"
++ * down to the appropriate quantity and write it.
++ *
++ * However, an LE kernel can't. It doesn't need to swap because a
++ * load from data followed by a store to user are going to preserve
++ * the byte ordering which is the wire byte order which is what the
++ * user wants, but in order to "crop" to the right size, we need to
++ * shift right first.
++ */
+ switch(len) {
+ case 4:
+ rc = __put_user((u32)data, (u32 __user *)ubuf);
+ break;
+ case 2:
++#ifdef __LITTLE_ENDIAN__
++ data >>= 16;
++#endif
+ rc = __put_user((u16)data, (u16 __user *)ubuf);
+ break;
+ default:
++#ifdef __LITTLE_ENDIAN__
++ data >>= 24;
++#endif
+ rc = __put_user((u8)data, (u8 __user *)ubuf);
+ break;
+ }
+@@ -263,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf,
+ else if (todo > 1 && (pos & 1) == 0)
+ len = 2;
+ }
++
++ /*
++ * Similarly to the read case, we have some trickery here but
++ * it's different to handle. We need to pass the value to OPAL in
++ * a register whose layout depends on the access size. We want
++ * to reproduce the memory layout of the user, however we aren't
++ * doing a load from user and a store to another memory location
++ * which would achieve that. Here we pass the value to OPAL via
++ * a register which is expected to contain the "BE" interpretation
++ * of the byte sequence. IE: for a 32-bit access, byte 0 should be
++ * in the MSB. So here we *do* need to byteswap on LE.
++ *
++ * User bytes: LE "data" OPAL "data"
++ * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3
++ * 16-bit: B0 B1 0000B1B0 0000B0B1
++ * 8-bit: B0 000000B0 000000B0
++ */
+ switch(len) {
+ case 4:
+ rc = __get_user(data, (u32 __user *)ubuf);
++ data = cpu_to_be32(data);
+ break;
+ case 2:
+ rc = __get_user(data, (u16 __user *)ubuf);
++ data = cpu_to_be16(data);
+ break;
+ default:
+ rc = __get_user(data, (u8 __user *)ubuf);
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index a2450b8..92eb35e 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -379,7 +379,7 @@ static int dlpar_online_cpu(struct device_node *dn)
+ BUG_ON(get_cpu_current_state(cpu)
+ != CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+- rc = cpu_up(cpu);
++ rc = device_online(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+@@ -462,7 +462,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
+ if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+- rc = cpu_down(cpu);
++ rc = device_offline(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
+index 355a16c..b93bed7 100644
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
+
+ static int __init topology_init(void)
+ {
+- if (!MACHINE_HAS_TOPOLOGY) {
++ if (MACHINE_HAS_TOPOLOGY)
++ set_topology_timer();
++ else
+ topology_update_polarization_simple();
+- goto out;
+- }
+- set_topology_timer();
+-out:
+-
+- set_sched_topology(s390_topology);
+-
+ return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
+ }
+ device_initcall(topology_init);
++
++static int __init early_topology_init(void)
++{
++ set_sched_topology(s390_topology);
++ return 0;
++}
++early_initcall(early_topology_init);
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+index 9139d14..538c10d 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
+ };
+
+ static struct resource scif0_resources[] = {
+- DEFINE_RES_MEM(0xfffffe80, 0x100),
++ DEFINE_RES_MEM(0xfffffe80, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x4e0)),
+ };
+
+@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
+ };
+
+ static struct resource scif1_resources[] = {
+- DEFINE_RES_MEM(0xa4000150, 0x100),
++ DEFINE_RES_MEM(0xa4000150, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+ };
+
+@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
+ };
+
+ static struct resource scif2_resources[] = {
+- DEFINE_RES_MEM(0xa4000140, 0x100),
++ DEFINE_RES_MEM(0xa4000140, 0x10),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+ };
+
+diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
+index 3716e69..e8ab93c 100644
+--- a/arch/um/drivers/ubd_kern.c
++++ b/arch/um/drivers/ubd_kern.c
+@@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q)
+
+ while(1){
+ struct ubd *dev = q->queuedata;
+- if(dev->end_sg == 0){
++ if(dev->request == NULL){
+ struct request *req = blk_fetch_request(q);
+ if(req == NULL)
+ return;
+@@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q)
+ return;
+ }
+ prepare_flush_request(req, io_req);
+- submit_request(io_req, dev);
++ if (submit_request(io_req, dev) == false)
++ return;
+ }
+
+ while(dev->start_sg < dev->end_sg){
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 4299eb0..92a2e93 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
+ 1: movl (%rbp),%ebp
+ _ASM_EXTABLE(1b,ia32_badarg)
+ ASM_CLAC
++
++ /*
++ * Sysenter doesn't filter flags, so we need to clear NT
++ * ourselves. To save a few cycles, we can check whether
++ * NT was set instead of doing an unconditional popfq.
++ */
++ testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
++ jnz sysenter_fix_flags
++sysenter_flags_fixed:
++
+ orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ CFI_REMEMBER_STATE
+@@ -184,6 +194,8 @@ sysexit_from_sys_call:
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS_SYSEXIT32
+
++ CFI_RESTORE_STATE
++
+ #ifdef CONFIG_AUDITSYSCALL
+ .macro auditsys_entry_common
+ movl %esi,%r9d /* 6th arg: 4th syscall arg */
+@@ -226,7 +238,6 @@ sysexit_from_sys_call:
+ .endm
+
+ sysenter_auditsys:
+- CFI_RESTORE_STATE
+ auditsys_entry_common
+ movl %ebp,%r9d /* reload 6th syscall arg */
+ jmp sysenter_dispatch
+@@ -235,6 +246,11 @@ sysexit_audit:
+ auditsys_exit sysexit_from_sys_call
+ #endif
+
++sysenter_fix_flags:
++ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
++ popfq_cfi
++ jmp sysenter_flags_fixed
++
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 1a055c8..ca3347a 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -160,8 +160,9 @@ do { \
+ #define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+-#define compat_elf_check_arch(x) \
+- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
++#define compat_elf_check_arch(x) \
++ (elf_check_arch_ia32(x) || \
++ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+
+ #if __USER32_DS != __USER_DS
+ # error "The following code assumes __USER32_DS == __USER_DS"
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 92d3486..0d47ae1 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+
++static inline u64 get_canonical(u64 la)
++{
++ return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++ return get_canonical(la) != la;
++#else
++ return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+@@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index 0e79420..990a2fe 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -67,6 +67,7 @@
+ #define EXIT_REASON_EPT_MISCONFIG 49
+ #define EXIT_REASON_INVEPT 50
+ #define EXIT_REASON_PREEMPTION_TIMER 52
++#define EXIT_REASON_INVVPID 53
+ #define EXIT_REASON_WBINVD 54
+ #define EXIT_REASON_XSETBV 55
+ #define EXIT_REASON_APIC_WRITE 56
+@@ -114,6 +115,7 @@
+ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
+ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_INVD, "INVD" }, \
++ { EXIT_REASON_INVVPID, "INVVPID" }, \
+ { EXIT_REASON_INVPCID, "INVPCID" }
+
+ #endif /* _UAPIVMX_H */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index b436fc7..a142e77 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
+
+ /* Don't set up the ACPI SCI because it's already set up */
+ if (acpi_gbl_FADT.sci_interrupt == gsi)
+- return gsi;
++ return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
+
+ trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
+ polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
+@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
+
+ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
+ {
+- int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
++ int irq;
+
+- if (irq >= 0) {
++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++ *irqp = gsi;
++ } else {
++ irq = mp_map_gsi_to_irq(gsi,
++ IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
++ if (irq < 0)
++ return -1;
+ *irqp = irq;
+- return 0;
+ }
+-
+- return -1;
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
+index af5b08a..7c38324 100644
+--- a/arch/x86/kernel/apb_timer.c
++++ b/arch/x86/kernel/apb_timer.c
+@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
+
+ irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
+ irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
+- /* APB timer irqs are set up as mp_irqs, timer is edge type */
+- __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
+ }
+
+ /* Should be called with per cpu */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 6776027..24b5894 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
+ unsigned int value, queued;
+ int i, j, acked = 0;
+ unsigned long long tsc = 0, ntsc;
+- long long max_loops = cpu_khz;
++ long long max_loops = cpu_khz ? cpu_khz : 1000000;
+
+ if (cpu_has_tsc)
+ rdtscll(tsc);
+@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
+ break;
+ }
+ if (queued) {
+- if (cpu_has_tsc) {
++ if (cpu_has_tsc && cpu_khz) {
+ rdtscll(ntsc);
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e4ab2b4..3126558 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1184,7 +1184,7 @@ void syscall_init(void)
+ /* Flags to clear on syscall */
+ wrmsrl(MSR_SYSCALL_MASK,
+ X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
+- X86_EFLAGS_IOPL|X86_EFLAGS_AC);
++ X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ }
+
+ /*
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 50ce751..1ef4562 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -397,6 +397,13 @@ static void init_intel(struct cpuinfo_x86 *c)
+ }
+
+ l2 = init_intel_cacheinfo(c);
++
++ /* Detect legacy cache sizes if init_intel_cacheinfo did not */
++ if (l2 == 0) {
++ cpu_detect_cache_sizes(c);
++ l2 = c->x86_cache_size;
++ }
++
+ if (c->cpuid_level > 9) {
+ unsigned eax = cpuid_eax(10);
+ /* Check for version and the number of counters */
+@@ -500,6 +507,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
+ */
+ if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+ size = 256;
++
++ /*
++ * Intel Quark SoC X1000 contains a 4-way set associative
++ * 16K cache with a 16 byte cache line and 256 lines per tag
++ */
++ if ((c->x86 == 5) && (c->x86_model == 9))
++ size = 16;
+ return size;
+ }
+ #endif
+@@ -701,7 +715,8 @@ static const struct cpu_dev intel_cpu_dev = {
+ [3] = "OverDrive PODP5V83",
+ [4] = "Pentium MMX",
+ [7] = "Mobile Pentium 75 - 200",
+- [8] = "Mobile Pentium MMX"
++ [8] = "Mobile Pentium MMX",
++ [9] = "Quark SoC X1000",
+ }
+ },
+ { .family = 6, .model_names =
+diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
+index 9030e83..c957e11 100644
+--- a/arch/x86/kernel/iosf_mbi.c
++++ b/arch/x86/kernel/iosf_mbi.c
+@@ -26,6 +26,7 @@
+ #include <asm/iosf_mbi.h>
+
+ #define PCI_DEVICE_ID_BAYTRAIL 0x0F00
++#define PCI_DEVICE_ID_BRASWELL 0x2280
+ #define PCI_DEVICE_ID_QUARK_X1000 0x0958
+
+ static DEFINE_SPINLOCK(iosf_mbi_lock);
+@@ -204,6 +205,7 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
+
+ static const struct pci_device_id iosf_mbi_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
+ { 0, },
+ };
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 2851d63..ed37a76 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ * handler too.
+ */
+ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
++ /*
++ * Ensure the signal handler starts with the new fpu state.
++ */
++ if (used_math())
++ drop_init_fpu(current);
+ }
+ signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
+ }
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index b6025f9..b7e50bb 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
+
+ x86_init.timers.tsc_pre_init();
+
+- if (!cpu_has_tsc)
++ if (!cpu_has_tsc) {
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
++ }
+
+ tsc_khz = x86_platform.calibrate_tsc();
+ cpu_khz = tsc_khz;
+
+ if (!tsc_khz) {
+ mark_tsc_unstable("could not calculate TSC khz");
++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ return;
+ }
+
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index 940b142..4c540c4 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -271,8 +271,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
+ return -1;
+
+- drop_init_fpu(tsk); /* trigger finit */
+-
+ return 0;
+ }
+
+@@ -402,8 +400,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ set_used_math();
+ }
+
+- if (use_eager_fpu())
++ if (use_eager_fpu()) {
++ preempt_disable();
+ math_state_restore();
++ preempt_enable();
++ }
+
+ return err;
+ } else {
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 03954f7..77c77fe 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+ masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
+ }
+
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+- register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ u32 limit = get_desc_limit(desc);
+@@ -568,6 +563,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
+ return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++ int cs_l)
++{
++ switch (ctxt->op_bytes) {
++ case 2:
++ ctxt->_eip = (u16)dst;
++ break;
++ case 4:
++ ctxt->_eip = (u32)dst;
++ break;
++#ifdef CONFIG_X86_64
++ case 8:
++ if ((cs_l && is_noncanonical_address(dst)) ||
++ (!cs_l && (dst >> 32) != 0))
++ return emulate_gp(ctxt, 0);
++ ctxt->_eip = dst;
++ break;
++#endif
++ default:
++ WARN(1, "unsupported eip assignment size\n");
++ }
++ return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
++}
++
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++ return assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ u16 selector;
+@@ -613,7 +642,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+
+ static int __linearize(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+- unsigned size, bool write, bool fetch,
++ unsigned *max_size, unsigned size,
++ bool write, bool fetch,
+ ulong *linear)
+ {
+ struct desc_struct desc;
+@@ -624,10 +654,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
+ unsigned cpl;
+
+ la = seg_base(ctxt, addr.seg) + addr.ea;
++ *max_size = 0;
+ switch (ctxt->mode) {
+ case X86EMUL_MODE_PROT64:
+ if (((signed long)la << 16) >> 16 != la)
+ return emulate_gp(ctxt, 0);
++
++ *max_size = min_t(u64, ~0u, (1ull << 48) - la);
++ if (size > *max_size)
++ goto bad;
+ break;
+ default:
+ usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
+@@ -645,20 +680,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
+ if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
+ (ctxt->d & NoBigReal)) {
+ /* la is between zero and 0xffff */
+- if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
++ if (la > 0xffff)
+ goto bad;
++ *max_size = 0x10000 - la;
+ } else if ((desc.type & 8) || !(desc.type & 4)) {
+ /* expand-up segment */
+- if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
++ if (addr.ea > lim)
+ goto bad;
++ *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
+ } else {
+ /* expand-down segment */
+- if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
++ if (addr.ea <= lim)
+ goto bad;
+ lim = desc.d ? 0xffffffff : 0xffff;
+- if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
++ if (addr.ea > lim)
+ goto bad;
++ *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
+ }
++ if (size > *max_size)
++ goto bad;
+ cpl = ctxt->ops->cpl(ctxt);
+ if (!(desc.type & 8)) {
+ /* data segment */
+@@ -693,7 +733,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
+ unsigned size, bool write,
+ ulong *linear)
+ {
+- return __linearize(ctxt, addr, size, write, false, linear);
++ unsigned max_size;
++ return __linearize(ctxt, addr, &max_size, size, write, false, linear);
+ }
+
+
+@@ -718,17 +759,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
+ {
+ int rc;
+- unsigned size;
++ unsigned size, max_size;
+ unsigned long linear;
+ int cur_size = ctxt->fetch.end - ctxt->fetch.data;
+ struct segmented_address addr = { .seg = VCPU_SREG_CS,
+ .ea = ctxt->eip + cur_size };
+
+- size = 15UL ^ cur_size;
+- rc = __linearize(ctxt, addr, size, false, true, &linear);
++ /*
++ * We do not know exactly how many bytes will be needed, and
++ * __linearize is expensive, so fetch as much as possible. We
++ * just have to avoid going beyond the 15 byte limit, the end
++ * of the segment, or the end of the page.
++ *
++ * __linearize is called with size 0 so that it does not do any
++ * boundary check itself. Instead, we use max_size to check
++ * against op_size.
++ */
++ rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
+ if (unlikely(rc != X86EMUL_CONTINUE))
+ return rc;
+
++ size = min_t(unsigned, 15UL ^ cur_size, max_size);
+ size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
+
+ /*
+@@ -738,7 +789,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
+ * still, we must have hit the 15-byte boundary.
+ */
+ if (unlikely(size < op_size))
+- return X86EMUL_UNHANDLEABLE;
++ return emulate_gp(ctxt, 0);
++
+ rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
+ size, &ctxt->exception);
+ if (unlikely(rc != X86EMUL_CONTINUE))
+@@ -750,8 +802,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
+ static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
+ unsigned size)
+ {
+- if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
+- return __do_insn_fetch_bytes(ctxt, size);
++ unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
++
++ if (unlikely(done_size < size))
++ return __do_insn_fetch_bytes(ctxt, size - done_size);
+ else
+ return X86EMUL_CONTINUE;
+ }
+@@ -1415,7 +1469,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+
+ /* Does not support long mode */
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+- u16 selector, int seg, u8 cpl, bool in_task_switch)
++ u16 selector, int seg, u8 cpl,
++ bool in_task_switch,
++ struct desc_struct *desc)
+ {
+ struct desc_struct seg_desc, old_desc;
+ u8 dpl, rpl;
+@@ -1547,6 +1603,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ }
+ load:
+ ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
++ if (desc)
++ *desc = seg_desc;
+ return X86EMUL_CONTINUE;
+ exception:
+ emulate_exception(ctxt, err_vec, err_code, true);
+@@ -1557,7 +1615,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ u16 selector, int seg)
+ {
+ u8 cpl = ctxt->ops->cpl(ctxt);
+- return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
++ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
+ }
+
+ static void write_register_operand(struct operand *op)
+@@ -1951,17 +2009,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned short sel;
++ unsigned short sel, old_sel;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ u8 cpl = ctxt->ops->cpl(ctxt);
++
++ /* Assignment of RIP may only fail in 64-bit mode */
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++ VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+
+- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++ &new_desc);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+- return X86EMUL_CONTINUE;
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
++ /* assigning eip failed; restore the old cs */
++ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++ }
++ return rc;
+ }
+
+ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+@@ -1972,13 +2044,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ case 2: /* call near abs */ {
+ long int old_eip;
+ old_eip = ctxt->_eip;
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
++ if (rc != X86EMUL_CONTINUE)
++ break;
+ ctxt->src.val = old_eip;
+ rc = em_push(ctxt);
+ break;
+ }
+ case 4: /* jmp abs */
+- ctxt->_eip = ctxt->src.val;
++ rc = assign_eip_near(ctxt, ctxt->src.val);
+ break;
+ case 5: /* jmp far */
+ rc = em_jmp_far(ctxt);
+@@ -2013,30 +2087,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
+
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- return em_pop(ctxt);
++ int rc;
++ unsigned long eip;
++
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ return assign_eip_near(ctxt, eip);
+ }
+
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
+- unsigned long cs;
++ unsigned long eip, cs;
++ u16 old_cs;
+ int cpl = ctxt->ops->cpl(ctxt);
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
+
+- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++ VCPU_SREG_CS);
++
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- if (ctxt->op_bytes == 4)
+- ctxt->_eip = (u32)ctxt->_eip;
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ /* Outer-privilege level return is not implemented */
+ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ return X86EMUL_UNHANDLEABLE;
+- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_far(ctxt, eip, new_desc.l);
++ if (rc != X86EMUL_CONTINUE) {
++ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ }
+ return rc;
+ }
+
+@@ -2297,7 +2388,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ {
+ const struct x86_emulate_ops *ops = ctxt->ops;
+ struct desc_struct cs, ss;
+- u64 msr_data;
++ u64 msr_data, rcx, rdx;
+ int usermode;
+ u16 cs_sel = 0, ss_sel = 0;
+
+@@ -2313,6 +2404,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ else
+ usermode = X86EMUL_MODE_PROT32;
+
++ rcx = reg_read(ctxt, VCPU_REGS_RCX);
++ rdx = reg_read(ctxt, VCPU_REGS_RDX);
++
+ cs.dpl = 3;
+ ss.dpl = 3;
+ ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2330,6 +2424,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ss_sel = cs_sel + 8;
+ cs.d = 0;
+ cs.l = 1;
++ if (is_noncanonical_address(rcx) ||
++ is_noncanonical_address(rdx))
++ return emulate_gp(ctxt, 0);
+ break;
+ }
+ cs_sel |= SELECTOR_RPL_MASK;
+@@ -2338,8 +2435,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
+- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
++ ctxt->_eip = rdx;
++ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+
+ return X86EMUL_CONTINUE;
+ }
+@@ -2457,19 +2554,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ * Now load segment descriptors. If fault happens at this stage
+ * it is handled in a context of new task
+ */
+- ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2594,25 +2696,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+- ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++ cpl, true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+- ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
++ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
++ true, NULL);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+@@ -2880,10 +2989,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
+
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc;
+ long rel = ctxt->src.val;
+
+ ctxt->src.val = (unsigned long)ctxt->_eip;
+- jmp_rel(ctxt, rel);
++ rc = jmp_rel(ctxt, rel);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+ return em_push(ctxt);
+ }
+
+@@ -2892,34 +3004,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ u16 sel, old_cs;
+ ulong old_eip;
+ int rc;
++ struct desc_struct old_desc, new_desc;
++ const struct x86_emulate_ops *ops = ctxt->ops;
++ int cpl = ctxt->ops->cpl(ctxt);
+
+- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ old_eip = ctxt->_eip;
++ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++ &new_desc);
++ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_CONTINUE;
+
+- ctxt->_eip = 0;
+- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
+
+ ctxt->src.val = old_cs;
+ rc = em_push(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+- return rc;
++ goto fail;
+
+ ctxt->src.val = old_eip;
+- return em_push(ctxt);
++ rc = em_push(ctxt);
++ /* If we failed, we tainted the memory, but the very least we should
++ restore cs */
++ if (rc != X86EMUL_CONTINUE)
++ goto fail;
++ return rc;
++fail:
++ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++ return rc;
++
+ }
+
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ int rc;
++ unsigned long eip;
+
+- ctxt->dst.type = OP_REG;
+- ctxt->dst.addr.reg = &ctxt->_eip;
+- ctxt->dst.bytes = ctxt->op_bytes;
+- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ rc = assign_eip_near(ctxt, eip);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ rsp_increment(ctxt, ctxt->src.val);
+@@ -3250,20 +3378,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++ int rc = X86EMUL_CONTINUE;
++
+ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+
+- return X86EMUL_CONTINUE;
++ return rc;
+ }
+
+ static int em_in(struct x86_emulate_ctxt *ctxt)
+@@ -3351,6 +3483,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
+ return X86EMUL_CONTINUE;
+ }
+
++static int em_clflush(struct x86_emulate_ctxt *ctxt)
++{
++ /* emulating clflush regardless of cpuid */
++ return X86EMUL_CONTINUE;
++}
++
+ static bool valid_cr(int nr)
+ {
+ switch (nr) {
+@@ -3683,6 +3821,16 @@ static const struct opcode group11[] = {
+ X7(D(Undefined)),
+ };
+
++static const struct gprefix pfx_0f_ae_7 = {
++ I(SrcMem | ByteOp, em_clflush), N, N, N,
++};
++
++static const struct group_dual group15 = { {
++ N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
++}, {
++ N, N, N, N, N, N, N, N,
++} };
++
+ static const struct gprefix pfx_0f_6f_0f_7f = {
+ I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
+ };
+@@ -3887,10 +4035,11 @@ static const struct opcode twobyte_table[256] = {
+ N, I(ImplicitOps | EmulateOnUD, em_syscall),
+ II(ImplicitOps | Priv, em_clts, clts), N,
+ DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
+- N, D(ImplicitOps | ModRM), N, N,
++ N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
+ /* 0x10 - 0x1F */
+ N, N, N, N, N, N, N, N,
+- D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
++ D(ImplicitOps | ModRM | SrcMem | NoAccess),
++ N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
+ /* 0x20 - 0x2F */
+ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
+ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
+@@ -3942,7 +4091,7 @@ static const struct opcode twobyte_table[256] = {
+ F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
+ F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
+ F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
+- D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
++ GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
+ /* 0xB0 - 0xB7 */
+ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
+ I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
+@@ -4458,10 +4607,10 @@ done_prefixes:
+ /* Decode and fetch the destination operand: register or memory. */
+ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
+
+-done:
+ if (ctxt->rip_relative)
+ ctxt->memopp->addr.mem.ea += ctxt->_eip;
+
++done:
+ return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
+ }
+
+@@ -4711,7 +4860,7 @@ special_insn:
+ break;
+ case 0x70 ... 0x7f: /* jcc (short) */
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x8d: /* lea r16/r32, m */
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -4741,7 +4890,7 @@ special_insn:
+ break;
+ case 0xe9: /* jmp rel */
+ case 0xeb: /* jmp rel short */
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xf4: /* hlt */
+@@ -4864,13 +5013,11 @@ twobyte_insn:
+ break;
+ case 0x80 ... 0x8f: /* jnz rel, etc*/
+ if (test_cc(ctxt->b, ctxt->eflags))
+- jmp_rel(ctxt, ctxt->src.val);
++ rc = jmp_rel(ctxt, ctxt->src.val);
+ break;
+ case 0x90 ... 0x9f: /* setcc r/m8 */
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
+ break;
+- case 0xae: /* clflush */
+- break;
+ case 0xb6 ... 0xb7: /* movzx */
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 518d864..298781d 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+ return;
+
+ timer = &pit->pit_state.timer;
++ mutex_lock(&pit->pit_state.lock);
+ if (hrtimer_cancel(timer))
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++ mutex_unlock(&pit->pit_state.lock);
+ }
+
+ static void destroy_pit_timer(struct kvm_pit *pit)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index ddf7427..78dadc3 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
+ msr.host_initiated = false;
+
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+- if (svm_set_msr(&svm->vcpu, &msr)) {
++ if (kvm_set_msr(&svm->vcpu, &msr)) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(&svm->vcpu, 0);
+ } else {
+@@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
+
+ if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+ || !svm_exit_handlers[exit_code]) {
+- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+- kvm_run->hw.hardware_exit_reason = exit_code;
+- return 0;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+
+ return svm_exit_handlers[exit_code](svm);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 6a118fa..41a5426 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2632,12 +2632,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ default:
+ msr = find_msr_entry(vmx, msr_index);
+ if (msr) {
++ u64 old_msr_data = msr->data;
+ msr->data = data;
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
+- kvm_set_shared_msr(msr->index, msr->data,
+- msr->mask);
++ ret = kvm_set_shared_msr(msr->index, msr->data,
++ msr->mask);
+ preempt_enable();
++ if (ret)
++ msr->data = old_msr_data;
+ }
+ break;
+ }
+@@ -5263,7 +5266,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+ msr.data = data;
+ msr.index = ecx;
+ msr.host_initiated = false;
+- if (vmx_set_msr(vcpu, &msr) != 0) {
++ if (kvm_set_msr(vcpu, &msr) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+@@ -6636,6 +6639,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ return 1;
+ }
+
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++}
++
+ /*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6681,6 +6690,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
+ [EXIT_REASON_INVEPT] = handle_invept,
++ [EXIT_REASON_INVVPID] = handle_invvpid,
+ };
+
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6914,7 +6924,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+- case EXIT_REASON_INVEPT:
++ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -7055,10 +7065,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ && kvm_vmx_exit_handlers[exit_reason])
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ else {
+- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+- vcpu->run->hw.hardware_exit_reason = exit_reason;
++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
+ }
+- return 0;
+ }
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8f1e22d..9d292e8 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
+ }
+
+-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ {
+ unsigned int cpu = smp_processor_id();
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++ int err;
+
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
+- return;
++ return 0;
+ smsr->values[slot].curr = value;
+- wrmsrl(shared_msrs_global.msrs[slot], value);
++ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
++ if (err)
++ return 1;
++
+ if (!smsr->registered) {
+ smsr->urn.on_user_return = kvm_on_user_return;
+ user_return_notifier_register(&smsr->urn);
+ smsr->registered = true;
+ }
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
+
+@@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
+-
+ /*
+ * Writes msr value into into the appropriate "register".
+ * Returns 0 on success, non-0 otherwise.
+@@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ {
++ switch (msr->index) {
++ case MSR_FS_BASE:
++ case MSR_GS_BASE:
++ case MSR_KERNEL_GS_BASE:
++ case MSR_CSTAR:
++ case MSR_LSTAR:
++ if (is_noncanonical_address(msr->data))
++ return 1;
++ break;
++ case MSR_IA32_SYSENTER_EIP:
++ case MSR_IA32_SYSENTER_ESP:
++ /*
++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++ * non-canonical address is written on Intel but not on
++ * AMD (which ignores the top 32-bits, because it does
++ * not implement 64-bit SYSENTER).
++ *
++ * 64-bit code should hence be able to write a non-canonical
++ * value on AMD. Making the address canonical ensures that
++ * vmentry does not fail on Intel after writing a non-canonical
++ * value, and that something deterministic happens if the guest
++ * invokes 64-bit SYSENTER.
++ */
++ msr->data = get_canonical(msr->data);
++ }
+ return kvm_x86_ops->set_msr(vcpu, msr);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+
+ /*
+ * Adapt set_msr() to msr_io()'s calling convention
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index ae242a7..36de293 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
+ psize = page_level_size(level);
+ pmask = page_level_mask(level);
+ offset = virt_addr & ~pmask;
+- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
++ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ return (phys_addr | offset);
+ }
+ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 5c8cb80..c881ba8 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -211,12 +211,17 @@ struct jit_context {
+ bool seen_ld_abs;
+ };
+
++/* maximum number of bytes emitted while JITing one eBPF insn */
++#define BPF_MAX_INSN_SIZE 128
++#define BPF_INSN_SAFETY 64
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ int oldproglen, struct jit_context *ctx)
+ {
+ struct bpf_insn *insn = bpf_prog->insnsi;
+ int insn_cnt = bpf_prog->len;
+- u8 temp[64];
++ bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
++ u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
+ int i;
+ int proglen = 0;
+ u8 *prog = temp;
+@@ -254,7 +259,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ EMIT2(0x31, 0xc0); /* xor eax, eax */
+ EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+
+- if (ctx->seen_ld_abs) {
++ if (seen_ld_abs) {
+ /* r9d : skb->len - skb->data_len (headlen)
+ * r10 : skb->data
+ */
+@@ -655,7 +660,7 @@ xadd: if (is_imm8(insn->off))
+ case BPF_JMP | BPF_CALL:
+ func = (u8 *) __bpf_call_base + imm32;
+ jmp_offset = func - (image + addrs[i]);
+- if (ctx->seen_ld_abs) {
++ if (seen_ld_abs) {
+ EMIT2(0x41, 0x52); /* push %r10 */
+ EMIT2(0x41, 0x51); /* push %r9 */
+ /* need to adjust jmp offset, since
+@@ -669,7 +674,7 @@ xadd: if (is_imm8(insn->off))
+ return -EINVAL;
+ }
+ EMIT1_off32(0xE8, jmp_offset);
+- if (ctx->seen_ld_abs) {
++ if (seen_ld_abs) {
+ EMIT2(0x41, 0x59); /* pop %r9 */
+ EMIT2(0x41, 0x5A); /* pop %r10 */
+ }
+@@ -774,7 +779,8 @@ emit_jmp:
+ goto common_load;
+ case BPF_LD | BPF_ABS | BPF_W:
+ func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
+-common_load: ctx->seen_ld_abs = true;
++common_load:
++ ctx->seen_ld_abs = seen_ld_abs = true;
+ jmp_offset = func - (image + addrs[i]);
+ if (!func || !is_simm32(jmp_offset)) {
+ pr_err("unsupported bpf func %d addr %p image %p\n",
+@@ -848,6 +854,11 @@ common_load: ctx->seen_ld_abs = true;
+ }
+
+ ilen = prog - temp;
++ if (ilen > BPF_MAX_INSN_SIZE) {
++ pr_err("bpf_jit_compile fatal insn size error\n");
++ return -EFAULT;
++ }
++
+ if (image) {
+ if (unlikely(proglen + ilen > oldproglen)) {
+ pr_err("bpf_jit_compile fatal error\n");
+@@ -904,9 +915,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+ goto out;
+ }
+ if (image) {
+- if (proglen != oldproglen)
++ if (proglen != oldproglen) {
+ pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+ proglen, oldproglen);
++ goto out;
++ }
+ break;
+ }
+ if (proglen == oldproglen) {
+diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
+index 3c53a90..c14ad34 100644
+--- a/arch/x86/platform/intel-mid/sfi.c
++++ b/arch/x86/platform/intel-mid/sfi.c
+@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
++ mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+ }
+
+ return 0;
+@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
+ mp_irq.dstapic = MP_APIC_ALL;
+ mp_irq.dstirq = pentry->irq;
+ mp_save_irq(&mp_irq);
++ mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+ }
+ return 0;
+ }
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index c1b9242..74a4168 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -463,8 +463,8 @@ static void bt_update_count(struct blk_mq_bitmap_tags *bt,
+ }
+
+ bt->wake_cnt = BT_WAIT_BATCH;
+- if (bt->wake_cnt > depth / 4)
+- bt->wake_cnt = max(1U, depth / 4);
++ if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
++ bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
+
+ bt->depth = depth;
+ }
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index f1a1795..aa02247 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -574,7 +574,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ bottom = max(b->physical_block_size, b->io_min) + alignment;
+
+ /* Verify that top and bottom intervals line up */
+- if (max(top, bottom) & (min(top, bottom) - 1)) {
++ if (max(top, bottom) % min(top, bottom)) {
+ t->misaligned = 1;
+ ret = -1;
+ }
+@@ -619,7 +619,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+
+ /* Find lowest common alignment_offset */
+ t->alignment_offset = lcm(t->alignment_offset, alignment)
+- & (max(t->physical_block_size, t->io_min) - 1);
++ % max(t->physical_block_size, t->io_min);
+
+ /* Verify that new alignment_offset is on a logical block boundary */
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 9b8eaec..a6d6270 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -509,7 +509,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ err = DRIVER_ERROR << 24;
+- goto out;
++ goto error;
+ }
+
+ memset(sense, 0, sizeof(sense));
+@@ -518,7 +518,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+
+ blk_execute_rq(q, disk, rq, 0);
+
+-out:
+ err = rq->errors & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (rq->sense_len && rq->sense) {
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a19c027..83187f4 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -49,7 +49,7 @@ struct skcipher_ctx {
+ struct ablkcipher_request req;
+ };
+
+-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
++#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
+ sizeof(struct scatterlist) - 1)
+
+ static inline int skcipher_sndbuf(struct sock *sk)
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 67075f8..5e9cbd6 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -710,7 +710,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
+ return -ENODEV;
+ }
+
+- return acpi_device_wakeup(adev, enable, ACPI_STATE_S0);
++ return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
+ }
+ EXPORT_SYMBOL(acpi_pm_device_run_wake);
+ #endif /* CONFIG_PM_RUNTIME */
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index cb6066c..c874859 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -126,6 +126,7 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
++static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+
+ /* --------------------------------------------------------------------------
+ Transaction Management
+@@ -210,13 +211,8 @@ static bool advance_transaction(struct acpi_ec *ec)
+ }
+ return wakeup;
+ } else {
+- /*
+- * There is firmware refusing to respond QR_EC when SCI_EVT
+- * is not set, for which case, we complete the QR_EC
+- * without issuing it to the firmware.
+- * https://bugzilla.kernel.org/show_bug.cgi?id=86211
+- */
+- if (!(status & ACPI_EC_FLAG_SCI) &&
++ if (EC_FLAGS_QUERY_HANDSHAKE &&
++ !(status & ACPI_EC_FLAG_SCI) &&
+ (t->command == ACPI_EC_COMMAND_QUERY)) {
+ t->flags |= ACPI_EC_COMMAND_POLL;
+ t->rdata[t->ri++] = 0x00;
+@@ -981,6 +977,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ }
+
+ /*
++ * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
++ * which case, we complete the QR_EC without issuing it to the firmware.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=86211
++ */
++static int ec_flag_query_handshake(const struct dmi_system_id *id)
++{
++ pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
++ EC_FLAGS_QUERY_HANDSHAKE = 1;
++ return 0;
++}
++
++/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+@@ -1054,6 +1062,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
++ {
++ ec_flag_query_handshake, "Acer hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
+ {},
+ };
+
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 1121153..db90aa3 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+- /* software reset. causes dev0 to be selected */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+- udelay(20); /* FIXME: flush */
+- iowrite8(ap->ctl, ioaddr->ctl_addr);
+- ap->last_ctl = ap->ctl;
++ if (ap->ioaddr.ctl_addr) {
++ /* software reset. causes dev0 to be selected */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
++ udelay(20); /* FIXME: flush */
++ iowrite8(ap->ctl, ioaddr->ctl_addr);
++ ap->last_ctl = ap->ctl;
++ }
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+- /* ignore ata_sff_softreset if ctl isn't accessible */
+- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+- softreset = NULL;
+-
+ /* ignore built-in hardresets if SCR access is not available */
+ if ((hardreset == sata_std_hardreset ||
+ hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
+diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
+index fc5f31d..57de021 100644
+--- a/drivers/ata/pata_serverworks.c
++++ b/drivers/ata/pata_serverworks.c
+@@ -251,12 +251,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
+ pci_write_config_byte(pdev, 0x54, ultra_cfg);
+ }
+
+-static struct scsi_host_template serverworks_sht = {
++static struct scsi_host_template serverworks_osb4_sht = {
++ ATA_BMDMA_SHT(DRV_NAME),
++ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
++};
++
++static struct scsi_host_template serverworks_csb_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ };
+
+ static struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
++ .qc_prep = ata_bmdma_dumb_qc_prep,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+ .set_piomode = serverworks_set_piomode,
+@@ -265,6 +271,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
+
+ static struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
++ .qc_prep = ata_bmdma_qc_prep,
+ .mode_filter = serverworks_csb_filter,
+ };
+
+@@ -404,6 +411,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ }
+ };
+ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
++ struct scsi_host_template *sht = &serverworks_csb_sht;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+@@ -417,6 +425,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ /* Select non UDMA capable OSB4 if we can't do fixups */
+ if (rc < 0)
+ ppi[0] = &info[1];
++ sht = &serverworks_osb4_sht;
+ }
+ /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+@@ -433,7 +442,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
+ ppi[1] = &ata_dummy_port_info;
+ }
+
+- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
++ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 20da3ad..0e9468c 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ return &dir->kobj;
+ }
+
++static DEFINE_MUTEX(gdp_mutex);
+
+ static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+ {
+ if (dev->class) {
+- static DEFINE_MUTEX(gdp_mutex);
+ struct kobject *kobj = NULL;
+ struct kobject *parent_kobj;
+ struct kobject *k;
+@@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ glue_dir->kset != &dev->class->p->glue_dirs)
+ return;
+
++ mutex_lock(&gdp_mutex);
+ kobject_put(glue_dir);
++ mutex_unlock(&gdp_mutex);
+ }
+
+ static void cleanup_device_parent(struct device *dev)
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index b67d9ae..ebc2f9d 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1266,6 +1266,8 @@ static int dpm_suspend_late(pm_message_t state)
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
++ if (!error)
++ error = async_error;
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
+index 89c497c..04a14e0 100644
+--- a/drivers/block/drbd/drbd_interval.c
++++ b/drivers/block/drbd/drbd_interval.c
+@@ -79,6 +79,7 @@ bool
+ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ {
+ struct rb_node **new = &root->rb_node, *parent = NULL;
++ sector_t this_end = this->sector + (this->size >> 9);
+
+ BUG_ON(!IS_ALIGNED(this->size, 512));
+
+@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ rb_entry(*new, struct drbd_interval, rb);
+
+ parent = *new;
++ if (here->end < this_end)
++ here->end = this_end;
+ if (this->sector < here->sector)
+ new = &(*new)->rb_left;
+ else if (this->sector > here->sector)
+@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
+ return false;
+ }
+
++ this->end = this_end;
+ rb_link_node(&this->rb, parent, new);
+ rb_insert_augmented(&this->rb, root, &augment_callbacks);
+ return true;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 4b97baf..33f0f97 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3382,7 +3382,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
+ page_count = (u32) calc_pages_for(offset, length);
+ pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ if (IS_ERR(pages))
+- ret = PTR_ERR(pages);
++ return PTR_ERR(pages);
+
+ ret = -ENOMEM;
+ obj_request = rbd_obj_request_create(object_name, offset, length,
+@@ -5087,7 +5087,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
+ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
+ set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+
+- rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
++ rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
++ rbd_dev->disk->disk_name);
+ if (!rbd_dev->rq_wq) {
+ ret = -ENOMEM;
+ goto err_out_mapping;
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 64c60ed..63fc7f0 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -763,6 +763,7 @@ again:
+ BUG_ON(new_map_idx >= segs_to_map);
+ if (unlikely(map[new_map_idx].status != 0)) {
+ pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
++ put_free_pages(blkif, &pages[seg_idx]->page, 1);
+ pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ ret |= 1;
+ goto next;
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 3a8b810..54f4089 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -270,6 +270,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
+ blkif->blk_rings.common.sring = NULL;
+ }
+
++ /* Remove all persistent grants and the cache of ballooned pages. */
++ xen_blkbk_free_caches(blkif);
++
+ return 0;
+ }
+
+@@ -281,9 +284,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
+ xen_blkif_disconnect(blkif);
+ xen_vbd_free(&blkif->vbd);
+
+- /* Remove all persistent grants and the cache of ballooned pages. */
+- xen_blkbk_free_caches(blkif);
+-
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index c18d41d..8c86a95 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1106,7 +1106,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ __mix_pool_bytes(r, hash.w, sizeof(hash.w));
+ spin_unlock_irqrestore(&r->lock, flags);
+
+- memset(workspace, 0, sizeof(workspace));
++ memzero_explicit(workspace, sizeof(workspace));
+
+ /*
+ * In case the hash function has some recognizable output
+@@ -1118,7 +1118,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ hash.w[2] ^= rol32(hash.w[2], 16);
+
+ memcpy(out, &hash, EXTRACT_SIZE);
+- memset(&hash, 0, sizeof(hash));
++ memzero_explicit(&hash, sizeof(hash));
+ }
+
+ /*
+@@ -1175,7 +1175,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+@@ -1218,7 +1218,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ }
+
+ /* Wipe data just returned from memory */
+- memset(tmp, 0, sizeof(tmp));
++ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+ }
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 61190f6..c05821e 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+ show_one(scaling_min_freq, min);
+ show_one(scaling_max_freq, max);
+-show_one(scaling_cur_freq, cur);
++
++static ssize_t show_scaling_cur_freq(
++ struct cpufreq_policy *policy, char *buf)
++{
++ ssize_t ret;
++
++ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
++ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
++ else
++ ret = sprintf(buf, "%u\n", policy->cur);
++ return ret;
++}
+
+ static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
+@@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
+ if (ret)
+ goto err_out_kobj_put;
+ }
+- if (has_target()) {
+- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+- if (ret)
+- goto err_out_kobj_put;
+- }
++
++ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
++ if (ret)
++ goto err_out_kobj_put;
++
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 0668b38..27bb6d3 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
+ return div_s64((int64_t)x << FRAC_BITS, y);
+ }
+
++static inline int ceiling_fp(int32_t x)
++{
++ int mask, ret;
++
++ ret = fp_toint(x);
++ mask = (1 << FRAC_BITS) - 1;
++ if (x & mask)
++ ret += 1;
++ return ret;
++}
++
+ struct sample {
+ int32_t core_pct_busy;
+ u64 aperf;
+@@ -64,6 +75,7 @@ struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
++ int scaling;
+ int turbo_pstate;
+ };
+
+@@ -113,6 +125,7 @@ struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
++ int (*get_scaling)(void);
+ void (*set)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
+ };
+@@ -138,6 +151,7 @@ struct perf_limits {
+
+ static struct perf_limits limits = {
+ .no_turbo = 0,
++ .turbo_disabled = 0,
+ .max_perf_pct = 100,
+ .max_perf = int_tofp(1),
+ .min_perf_pct = 0,
+@@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void)
+ }
+ }
+
++static inline void update_turbo_state(void)
++{
++ u64 misc_en;
++ struct cpudata *cpu;
++
++ cpu = all_cpu_data[0];
++ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
++ limits.turbo_disabled =
++ (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
++ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
++}
++
+ /************************** debugfs begin ************************/
+ static int pid_param_set(void *data, u64 val)
+ {
+@@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void)
+ return sprintf(buf, "%u\n", limits.object); \
+ }
+
++static ssize_t show_no_turbo(struct kobject *kobj,
++ struct attribute *attr, char *buf)
++{
++ ssize_t ret;
++
++ update_turbo_state();
++ if (limits.turbo_disabled)
++ ret = sprintf(buf, "%u\n", limits.turbo_disabled);
++ else
++ ret = sprintf(buf, "%u\n", limits.no_turbo);
++
++ return ret;
++}
++
+ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+ {
+@@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+- limits.no_turbo = clamp_t(int, input, 0 , 1);
++
++ update_turbo_state();
+ if (limits.turbo_disabled) {
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+- limits.no_turbo = limits.turbo_disabled;
++ return -EPERM;
+ }
++ limits.no_turbo = clamp_t(int, input, 0, 1);
++
+ return count;
+ }
+
+@@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ return count;
+ }
+
+-show_one(no_turbo, no_turbo);
+ show_one(max_perf_pct, max_perf_pct);
+ show_one(min_perf_pct, min_perf_pct);
+
+@@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+- vid = fp_toint(vid_fp);
++ vid = ceiling_fp(vid_fp);
+
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+@@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ wrmsrl(MSR_IA32_PERF_CTL, val);
+ }
+
++#define BYT_BCLK_FREQS 5
++static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
++
++static int byt_get_scaling(void)
++{
++ u64 value;
++ int i;
++
++ rdmsrl(MSR_FSB_FREQ, value);
++ i = value & 0x3;
++
++ BUG_ON(i > BYT_BCLK_FREQS);
++
++ return byt_freq_table[i] * 100;
++}
++
+ static void byt_get_vid(struct cpudata *cpudata)
+ {
+ u64 value;
+@@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void)
+ return ret;
+ }
+
++static inline int core_get_scaling(void)
++{
++ return 100000;
++}
++
+ static void core_set_pstate(struct cpudata *cpudata, int pstate)
+ {
+ u64 val;
+@@ -473,6 +536,7 @@ static struct cpu_defaults core_params = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
++ .get_scaling = core_get_scaling,
+ .set = core_set_pstate,
+ },
+ };
+@@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = {
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_turbo_pstate,
+ .set = byt_set_pstate,
++ .get_scaling = byt_get_scaling,
+ .get_vid = byt_get_vid,
+ },
+ };
+@@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+ int max_perf_adj;
+ int min_perf;
+
+- if (limits.no_turbo)
++ if (limits.no_turbo || limits.turbo_disabled)
+ max_perf = cpu->pstate.max_pstate;
+
+ max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+@@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+ {
+ int max_perf, min_perf;
+
++ update_turbo_state();
++
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+@@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+- trace_cpu_frequency(pstate * 100000, cpu->cpu);
++ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+
+ cpu->pstate.current_pstate = pstate;
+
+@@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
++ cpu->pstate.scaling = pstate_funcs.get_scaling();
+
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+@@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+ core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
+
+ sample->freq = fp_toint(
+- mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
++ mul_fp(int_tofp(
++ cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
++ core_pct));
+
+ sample->core_pct_busy = (int32_t)core_pct;
+ }
+@@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
+ {
+ struct cpudata *cpu;
+
+- all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
++ if (!all_cpu_data[cpunum])
++ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
++ GFP_KERNEL);
+ if (!all_cpu_data[cpunum])
+ return -ENOMEM;
+
+@@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
++ limits.max_policy_pct = 100;
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+- limits.no_turbo = limits.turbo_disabled;
++ limits.no_turbo = 0;
+ return 0;
+ }
+ limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+@@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
+
+ del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+- kfree(all_cpu_data[cpu_num]);
+- all_cpu_data[cpu_num] = NULL;
+ }
+
+ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+ {
+ struct cpudata *cpu;
+ int rc;
+- u64 misc_en;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+@@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+
+ cpu = all_cpu_data[policy->cpu];
+
+- rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+- if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+- limits.turbo_disabled = 1;
+- limits.no_turbo = 1;
+- }
+ if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+- policy->min = cpu->pstate.min_pstate * 100000;
+- policy->max = cpu->pstate.turbo_pstate * 100000;
++ policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
++ policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
+- policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
++ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
++ policy->cpuinfo.max_freq =
++ cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+@@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
++ pstate_funcs.get_scaling = funcs->get_scaling;
+ pstate_funcs.set = funcs->set;
+ pstate_funcs.get_vid = funcs->get_vid;
+ }
+diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
+index df6575f..682288c 100644
+--- a/drivers/edac/cpc925_edac.c
++++ b/drivers/edac/cpc925_edac.c
+@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
+
+ if (apiexcp & UECC_EXCP_DETECTED) {
+ cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
+- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "");
+diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
+index 3cda79b..ece3aef 100644
+--- a/drivers/edac/e7xxx_edac.c
++++ b/drivers/edac/e7xxx_edac.c
+@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+ static void process_ce_no_info(struct mem_ctl_info *mci)
+ {
+ edac_dbg(3, "\n");
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "");
+ }
+
+diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
+index 022a702..aa98b13 100644
+--- a/drivers/edac/i3200_edac.c
++++ b/drivers/edac/i3200_edac.c
+@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
+ -1, -1,
+ "i3000 UE", "");
+ } else if (log & I3200_ECCERRLOG_CE) {
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+- "i3000 UE", "");
++ "i3000 CE", "");
+ }
+ }
+ }
+diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
+index 3382f63..4382343 100644
+--- a/drivers/edac/i82860_edac.c
++++ b/drivers/edac/i82860_edac.c
+@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "");
+ else
+- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "");
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 5389350..70bedf9 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -1080,8 +1080,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
++ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
++ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
+index 919c73b..4977631 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
+@@ -32,6 +32,8 @@ static struct drm_driver driver;
+ static const struct pci_device_id pciidlist[] = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
++ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
++ 0x0001, 0, 0, 0 },
+ {0,}
+ };
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index d384139..d182058 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
+ static struct i915_mmu_notifier *
+ i915_mmu_notifier_find(struct i915_mm_struct *mm)
+ {
+- if (mm->mn == NULL) {
+- down_write(&mm->mm->mmap_sem);
+- mutex_lock(&to_i915(mm->dev)->mm_lock);
+- if (mm->mn == NULL)
+- mm->mn = i915_mmu_notifier_create(mm->mm);
+- mutex_unlock(&to_i915(mm->dev)->mm_lock);
+- up_write(&mm->mm->mmap_sem);
++ struct i915_mmu_notifier *mn = mm->mn;
++
++ mn = mm->mn;
++ if (mn)
++ return mn;
++
++ down_write(&mm->mm->mmap_sem);
++ mutex_lock(&to_i915(mm->dev)->mm_lock);
++ if ((mn = mm->mn) == NULL) {
++ mn = i915_mmu_notifier_create(mm->mm);
++ if (!IS_ERR(mn))
++ mm->mn = mn;
+ }
+- return mm->mn;
++ mutex_unlock(&to_i915(mm->dev)->mm_lock);
++ up_write(&mm->mm->mmap_sem);
++
++ return mn;
+ }
+
+ static int
+@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+ static void
+ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+ {
+- struct scatterlist *sg;
+- int i;
++ struct sg_page_iter sg_iter;
+
+ BUG_ON(obj->userptr.work != NULL);
+
+ if (obj->madv != I915_MADV_WILLNEED)
+ obj->dirty = 0;
+
+- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
+- struct page *page = sg_page(sg);
++ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
++ struct page *page = sg_page_iter_page(&sg_iter);
+
+ if (obj->dirty)
+ set_page_dirty(page);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 0050ee9..5d387a8 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -3482,12 +3482,13 @@ static void gen8_irq_reset(struct drm_device *dev)
+ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+ {
+ unsigned long irqflags;
++ uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+- ~dev_priv->de_irq_mask[PIPE_B]);
++ ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+- ~dev_priv->de_irq_mask[PIPE_C]);
++ ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index d8324c6..b71a026 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4470,7 +4470,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+- I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
++ I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
+ }
+
+ /* Adjust CDclk dividers to allow high res or save power if possible */
+@@ -12507,6 +12507,9 @@ static struct intel_quirk intel_quirks[] = {
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
++ /* Apple Macbook 2,1 (Core 2 T7400) */
++ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
++
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index fdff1d4..9222e20 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2364,6 +2364,13 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
+ ssize_t ret;
+ int i;
+
++ /*
++ * Sometime we just get the same incorrect byte repeated
++ * over the entire buffer. Doing just one throw away read
++ * initially seems to "solve" it.
++ */
++ drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
++
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret == size)
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 8e37444..cbe8a8d 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -398,6 +398,9 @@ intel_panel_detect(struct drm_device *dev)
+ }
+ }
+
++#define DIV_ROUND_CLOSEST_ULL(ll, d) \
++({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
++
+ /**
+ * scale - scale values from one range to another
+ *
+@@ -419,9 +422,8 @@ static uint32_t scale(uint32_t source_val,
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+- target_val = (uint64_t)(source_val - source_min) *
+- (target_max - target_min);
+- do_div(target_val, source_max - source_min);
++ target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
++ (target_max - target_min), source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
+index f5d7f7c..0197d6c 100644
+--- a/drivers/gpu/drm/nouveau/Makefile
++++ b/drivers/gpu/drm/nouveau/Makefile
+@@ -129,7 +129,7 @@ nouveau-y += core/subdev/fb/gddr5.o
+ nouveau-y += core/subdev/gpio/base.o
+ nouveau-y += core/subdev/gpio/nv10.o
+ nouveau-y += core/subdev/gpio/nv50.o
+-nouveau-y += core/subdev/gpio/nv92.o
++nouveau-y += core/subdev/gpio/nv94.o
+ nouveau-y += core/subdev/gpio/nvd0.o
+ nouveau-y += core/subdev/gpio/nve0.o
+ nouveau-y += core/subdev/i2c/base.o
+diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+index 932f84f..cbab586e 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
++++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+@@ -141,7 +141,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0x92:
+ device->cname = "G92";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -169,7 +169,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0x94:
+ device->cname = "G94";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -197,7 +197,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0x96:
+ device->cname = "G96";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -225,7 +225,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0x98:
+ device->cname = "G98";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -253,7 +253,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xa0:
+ device->cname = "G200";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -281,7 +281,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xaa:
+ device->cname = "MCP77/MCP78";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -309,7 +309,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xac:
+ device->cname = "MCP79/MCP7A";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
+@@ -337,7 +337,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xa3:
+ device->cname = "GT215";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -367,7 +367,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xa5:
+ device->cname = "GT216";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -396,7 +396,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xa8:
+ device->cname = "GT218";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -425,7 +425,7 @@ nv50_identify(struct nouveau_device *device)
+ case 0xaf:
+ device->cname = "MCP89";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+index b4a2917..da153a2 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
++++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+@@ -60,7 +60,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xc0:
+ device->cname = "GF100";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -92,7 +92,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xc4:
+ device->cname = "GF104";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -124,7 +124,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xc3:
+ device->cname = "GF106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -155,7 +155,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xce:
+ device->cname = "GF114";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -187,7 +187,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xcf:
+ device->cname = "GF116";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -219,7 +219,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xc1:
+ device->cname = "GF108";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+@@ -250,7 +250,7 @@ nvc0_identify(struct nouveau_device *device)
+ case 0xc8:
+ device->cname = "GF110";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+- device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
++ device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
+diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+index b73733d..f855140 100644
+--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
++++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+@@ -40,7 +40,7 @@ nouveau_gpio(void *obj)
+
+ extern struct nouveau_oclass *nv10_gpio_oclass;
+ extern struct nouveau_oclass *nv50_gpio_oclass;
+-extern struct nouveau_oclass *nv92_gpio_oclass;
++extern struct nouveau_oclass *nv94_gpio_oclass;
+ extern struct nouveau_oclass *nvd0_gpio_oclass;
+ extern struct nouveau_oclass *nve0_gpio_oclass;
+
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+index 88606bf..bd8d348 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+ struct dcb_output *outp)
+ {
+ u16 dcb = dcb_outp(bios, idx, ver, len);
++ memset(outp, 0x00, sizeof(*outp));
+ if (dcb) {
+ if (*ver >= 0x20) {
+ u32 conn = nv_ro32(bios, dcb + 0x00);
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
+deleted file mode 100644
+index 252083d..0000000
+--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
++++ /dev/null
+@@ -1,74 +0,0 @@
+-/*
+- * Copyright 2012 Red Hat Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: Ben Skeggs
+- */
+-
+-#include "priv.h"
+-
+-void
+-nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
+-{
+- u32 intr0 = nv_rd32(gpio, 0x00e054);
+- u32 intr1 = nv_rd32(gpio, 0x00e074);
+- u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
+- u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+- *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+- *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+- nv_wr32(gpio, 0x00e054, intr0);
+- nv_wr32(gpio, 0x00e074, intr1);
+-}
+-
+-void
+-nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
+-{
+- u32 inte0 = nv_rd32(gpio, 0x00e050);
+- u32 inte1 = nv_rd32(gpio, 0x00e070);
+- if (type & NVKM_GPIO_LO)
+- inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+- if (type & NVKM_GPIO_HI)
+- inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+- mask >>= 16;
+- data >>= 16;
+- if (type & NVKM_GPIO_LO)
+- inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+- if (type & NVKM_GPIO_HI)
+- inte1 = (inte1 & ~mask) | data;
+- nv_wr32(gpio, 0x00e050, inte0);
+- nv_wr32(gpio, 0x00e070, inte1);
+-}
+-
+-struct nouveau_oclass *
+-nv92_gpio_oclass = &(struct nouveau_gpio_impl) {
+- .base.handle = NV_SUBDEV(GPIO, 0x92),
+- .base.ofuncs = &(struct nouveau_ofuncs) {
+- .ctor = _nouveau_gpio_ctor,
+- .dtor = _nouveau_gpio_dtor,
+- .init = _nouveau_gpio_init,
+- .fini = _nouveau_gpio_fini,
+- },
+- .lines = 32,
+- .intr_stat = nv92_gpio_intr_stat,
+- .intr_mask = nv92_gpio_intr_mask,
+- .drive = nv50_gpio_drive,
+- .sense = nv50_gpio_sense,
+- .reset = nv50_gpio_reset,
+-}.base;
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
+new file mode 100644
+index 0000000..cae404c
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
+@@ -0,0 +1,74 @@
++/*
++ * Copyright 2012 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "priv.h"
++
++void
++nv94_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
++{
++ u32 intr0 = nv_rd32(gpio, 0x00e054);
++ u32 intr1 = nv_rd32(gpio, 0x00e074);
++ u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
++ u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
++ *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
++ *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
++ nv_wr32(gpio, 0x00e054, intr0);
++ nv_wr32(gpio, 0x00e074, intr1);
++}
++
++void
++nv94_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
++{
++ u32 inte0 = nv_rd32(gpio, 0x00e050);
++ u32 inte1 = nv_rd32(gpio, 0x00e070);
++ if (type & NVKM_GPIO_LO)
++ inte0 = (inte0 & ~(mask << 16)) | (data << 16);
++ if (type & NVKM_GPIO_HI)
++ inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
++ mask >>= 16;
++ data >>= 16;
++ if (type & NVKM_GPIO_LO)
++ inte1 = (inte1 & ~(mask << 16)) | (data << 16);
++ if (type & NVKM_GPIO_HI)
++ inte1 = (inte1 & ~mask) | data;
++ nv_wr32(gpio, 0x00e050, inte0);
++ nv_wr32(gpio, 0x00e070, inte1);
++}
++
++struct nouveau_oclass *
++nv94_gpio_oclass = &(struct nouveau_gpio_impl) {
++ .base.handle = NV_SUBDEV(GPIO, 0x94),
++ .base.ofuncs = &(struct nouveau_ofuncs) {
++ .ctor = _nouveau_gpio_ctor,
++ .dtor = _nouveau_gpio_dtor,
++ .init = _nouveau_gpio_init,
++ .fini = _nouveau_gpio_fini,
++ },
++ .lines = 32,
++ .intr_stat = nv94_gpio_intr_stat,
++ .intr_mask = nv94_gpio_intr_mask,
++ .drive = nv50_gpio_drive,
++ .sense = nv50_gpio_sense,
++ .reset = nv50_gpio_reset,
++}.base;
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+index a4682b0..480d6d2 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+@@ -77,8 +77,8 @@ nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
+ .fini = _nouveau_gpio_fini,
+ },
+ .lines = 32,
+- .intr_stat = nv92_gpio_intr_stat,
+- .intr_mask = nv92_gpio_intr_mask,
++ .intr_stat = nv94_gpio_intr_stat,
++ .intr_mask = nv94_gpio_intr_mask,
+ .drive = nvd0_gpio_drive,
+ .sense = nvd0_gpio_sense,
+ .reset = nvd0_gpio_reset,
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+index e1724df..bff98b8 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
++++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
+@@ -56,8 +56,8 @@ void nv50_gpio_reset(struct nouveau_gpio *, u8);
+ int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
+ int nv50_gpio_sense(struct nouveau_gpio *, int);
+
+-void nv92_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
+-void nv92_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
++void nv94_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
++void nv94_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
+
+ void nvd0_gpio_reset(struct nouveau_gpio *, u8);
+ int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
+index 3440fc9..497ea01 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
+@@ -400,15 +400,20 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ struct nouveau_channel **pchan)
+ {
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
++ bool super;
+ int ret;
+
++ /* hack until fencenv50 is fixed, and agp access relaxed */
++ super = cli->base.super;
++ cli->base.super = true;
++
+ ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
+ if (ret) {
+ NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+ ret = nouveau_channel_dma(drm, device, handle, pchan);
+ if (ret) {
+ NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
+- return ret;
++ goto done;
+ }
+ }
+
+@@ -416,8 +421,9 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ if (ret) {
+ NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
+ nouveau_channel_del(pchan);
+- return ret;
+ }
+
+- return 0;
++done:
++ cli->base.super = super;
++ return ret;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 03949ea..bca5d8c 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -1653,15 +1653,17 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+- struct {
+- struct nv50_disp_mthd_v1 base;
+- struct nv50_disp_sor_hda_eld_v0 eld;
++ struct __packed {
++ struct {
++ struct nv50_disp_mthd_v1 mthd;
++ struct nv50_disp_sor_hda_eld_v0 eld;
++ } base;
+ u8 data[sizeof(nv_connector->base.eld)];
+ } args = {
+- .base.version = 1,
+- .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+- .base.hasht = nv_encoder->dcb->hasht,
+- .base.hashm = nv_encoder->dcb->hashm,
++ .base.mthd.version = 1,
++ .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
++ .base.mthd.hasht = nv_encoder->dcb->hasht,
++ .base.mthd.hashm = nv_encoder->dcb->hashm,
+ };
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+@@ -1671,7 +1673,7 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
+
+- nvif_mthd(disp->disp, 0, &args, sizeof(args));
++ nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
+ }
+
+ static void
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index b8ced08..bac1fd4 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -523,7 +523,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ struct qxl_framebuffer *qfb;
+ struct qxl_bo *bo, *old_bo = NULL;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+- uint32_t width, height, base_offset;
+ bool recreate_primary = false;
+ int ret;
+ int surf_id;
+@@ -553,9 +552,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ if (qcrtc->index == 0)
+ recreate_primary = true;
+
+- width = mode->hdisplay;
+- height = mode->vdisplay;
+- base_offset = 0;
++ if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
++ DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
++ return -EINVAL;
++ }
+
+ ret = qxl_bo_reserve(bo, false);
+ if (ret != 0)
+@@ -569,10 +569,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ if (recreate_primary) {
+ qxl_io_destroy_primary(qdev);
+ qxl_io_log(qdev,
+- "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+- width, height, bo->surf.width,
+- bo->surf.height, bo->surf.stride, bo->surf.format);
+- qxl_io_create_primary(qdev, base_offset, bo);
++ "recreate primary: %dx%d,%d,%d\n",
++ bo->surf.width, bo->surf.height,
++ bo->surf.stride, bo->surf.format);
++ qxl_io_create_primary(qdev, 0, bo);
+ bo->is_primary = true;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index c4ffa54..e8eea36 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -610,16 +610,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
+ {
+ unsigned i;
+ int r;
+- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
++ unsigned index;
+ u32 tmp;
++ u64 gpu_addr;
+
+- if (!ptr) {
+- DRM_ERROR("invalid vram scratch pointer\n");
+- return -EINVAL;
+- }
++ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
++ index = R600_WB_DMA_RING_TEST_OFFSET;
++ else
++ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
++
++ gpu_addr = rdev->wb.gpu_addr + index;
+
+ tmp = 0xCAFEDEAD;
+- writel(tmp, ptr);
++ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
+
+ r = radeon_ring_lock(rdev, ring, 5);
+ if (r) {
+@@ -627,14 +630,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
+ return r;
+ }
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
++ radeon_ring_write(ring, lower_32_bits(gpu_addr));
++ radeon_ring_write(ring, upper_32_bits(gpu_addr));
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring, false);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+- tmp = readl(ptr);
++ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
+index 51800e3..71f4d26 100644
+--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
++++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
+@@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+- return;
++ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
++ sad_count = 0;
+ }
+
+ /* program the speaker allocation */
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index ab29f95..790d8ca 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
+- if (sad_count <= 0) {
+- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+- return;
++ if (sad_count < 0) {
++ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
++ sad_count = 0;
+ }
+
+ /* program the speaker allocation */
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 278c7a1..71ebdf8 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -118,9 +118,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
+- if (sad_count <= 0) {
+- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+- return;
++ if (sad_count < 0) {
++ DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
++ sad_count = 0;
+ }
+
+ /* program the speaker allocation */
+diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
+index 67cb472..e79b7eb 100644
+--- a/drivers/gpu/drm/radeon/kv_dpm.c
++++ b/drivers/gpu/drm/radeon/kv_dpm.c
+@@ -2725,7 +2725,11 @@ int kv_dpm_init(struct radeon_device *rdev)
+
+ pi->sram_end = SMC_RAM_END;
+
+- pi->enable_nb_dpm = true;
++ /* Enabling nb dpm on an asrock system prevents dpm from working */
++ if (rdev->pdev->subsystem_vendor == 0x1849)
++ pi->enable_nb_dpm = false;
++ else
++ pi->enable_nb_dpm = true;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = true;
+@@ -2740,10 +2744,19 @@ int kv_dpm_init(struct radeon_device *rdev)
+ pi->caps_sclk_ds = true;
+ pi->enable_auto_thermal_throttling = true;
+ pi->disable_nb_ps3_in_battery = false;
+- if (radeon_bapm == 0)
++ if (radeon_bapm == -1) {
++ /* There are stability issues reported on with
++ * bapm enabled on an asrock system.
++ */
++ if (rdev->pdev->subsystem_vendor == 0x1849)
++ pi->bapm_enable = false;
++ else
++ pi->bapm_enable = true;
++ } else if (radeon_bapm == 0) {
+ pi->bapm_enable = false;
+- else
++ } else {
+ pi->bapm_enable = true;
++ }
+ pi->voltage_drop_t = 0;
+ pi->caps_sclk_throttle_low_notification = false;
+ pi->caps_fps = false; /* true? */
+diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
+index a908daa..44379bf 100644
+--- a/drivers/gpu/drm/radeon/r600_dma.c
++++ b/drivers/gpu/drm/radeon/r600_dma.c
+@@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
+ {
+ unsigned i;
+ int r;
+- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
++ unsigned index;
+ u32 tmp;
++ u64 gpu_addr;
+
+- if (!ptr) {
+- DRM_ERROR("invalid vram scratch pointer\n");
+- return -EINVAL;
+- }
++ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
++ index = R600_WB_DMA_RING_TEST_OFFSET;
++ else
++ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
++
++ gpu_addr = rdev->wb.gpu_addr + index;
+
+ tmp = 0xCAFEDEAD;
+- writel(tmp, ptr);
++ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+@@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
++ radeon_ring_write(ring, lower_32_bits(gpu_addr));
++ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring, false);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+- tmp = readl(ptr);
++ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 3247bfd..e841058 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1120,6 +1120,8 @@ struct radeon_wb {
+ #define R600_WB_EVENT_OFFSET 3072
+ #define CIK_WB_CP1_WPTR_OFFSET 3328
+ #define CIK_WB_CP2_WPTR_OFFSET 3584
++#define R600_WB_DMA_RING_TEST_OFFSET 3588
++#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
+
+ /**
+ * struct radeon_pm - power management datas
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 83f382e..e244c2d 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -418,7 +418,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
+ kfree(parser->track);
+ kfree(parser->relocs);
+ kfree(parser->relocs_ptr);
+- kfree(parser->vm_bos);
++ drm_free_large(parser->vm_bos);
+ for (i = 0; i < parser->nchunks; i++)
+ drm_free_large(parser->chunks[i].kdata);
+ kfree(parser->chunks);
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 12c8329..6684fbf 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1130,7 +1130,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
+ if (radeon_vm_block_size == -1) {
+
+ /* Total bits covered by PD + PTs */
+- unsigned bits = ilog2(radeon_vm_size) + 17;
++ unsigned bits = ilog2(radeon_vm_size) + 18;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index d656079..9323435 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -335,7 +335,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
+ }
+
+ /* and then save the content of the ring */
+- *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
++ *data = drm_malloc_ab(size, sizeof(uint32_t));
+ if (!*data) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+@@ -377,7 +377,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ }
+
+ radeon_ring_unlock_commit(rdev, ring, false);
+- kfree(data);
++ drm_free_large(data);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 088ffdc..a3b3e09 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -132,8 +132,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+ struct radeon_cs_reloc *list;
+ unsigned i, idx;
+
+- list = kmalloc_array(vm->max_pde_used + 2,
+- sizeof(struct radeon_cs_reloc), GFP_KERNEL);
++ list = drm_malloc_ab(vm->max_pde_used + 2,
++ sizeof(struct radeon_cs_reloc));
+ if (!list)
+ return NULL;
+
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 70e61ff..1202e0f 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -6255,7 +6255,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
+ if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+ index == 0) {
+ /* XXX disable for A0 tahiti */
+- si_pi->ulv.supported = true;
++ si_pi->ulv.supported = false;
+ si_pi->ulv.pl = *pl;
+ si_pi->ulv.one_pcie_lane_in_ulv = false;
+ si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index 6be623b..000428e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
+ if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
+ /* oh nos! */
+ dev_err(dev->dev, "no encoders/connectors found\n");
++ drm_mode_config_cleanup(dev);
+ return -ENXIO;
+ }
+
+@@ -172,33 +173,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("tilcdc", 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto fail_free_priv;
++ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev->dev, "failed to get memory resource\n");
+ ret = -EINVAL;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ if (!priv->mmio) {
+ dev_err(dev->dev, "failed to ioremap\n");
+ ret = -ENOMEM;
+- goto fail;
++ goto fail_free_wq;
+ }
+
+ priv->clk = clk_get(dev->dev, "fck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get functional clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_iounmap;
+ }
+
+ priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev->dev, "failed to get display clock\n");
+ ret = -ENODEV;
+- goto fail;
++ goto fail_put_clk;
+ }
+
+ #ifdef CONFIG_CPU_FREQ
+@@ -208,7 +213,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ dev_err(dev->dev, "failed to register cpufreq notifier\n");
+- goto fail;
++ goto fail_put_disp_clk;
+ }
+ #endif
+
+@@ -253,13 +258,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ ret = modeset_init(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize mode setting\n");
+- goto fail;
++ goto fail_cpufreq_unregister;
+ }
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+- goto fail;
++ goto fail_mode_config_cleanup;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+@@ -267,7 +272,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+- goto fail;
++ goto fail_vblank_cleanup;
+ }
+
+ platform_set_drvdata(pdev, dev);
+@@ -283,13 +288,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+ priv->fbdev = drm_fbdev_cma_init(dev, bpp,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
++ if (IS_ERR(priv->fbdev)) {
++ ret = PTR_ERR(priv->fbdev);
++ goto fail_irq_uninstall;
++ }
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+-fail:
+- tilcdc_unload(dev);
++fail_irq_uninstall:
++ pm_runtime_get_sync(dev->dev);
++ drm_irq_uninstall(dev);
++ pm_runtime_put_sync(dev->dev);
++
++fail_vblank_cleanup:
++ drm_vblank_cleanup(dev);
++
++fail_mode_config_cleanup:
++ drm_mode_config_cleanup(dev);
++
++fail_cpufreq_unregister:
++ pm_runtime_disable(dev->dev);
++#ifdef CONFIG_CPU_FREQ
++ cpufreq_unregister_notifier(&priv->freq_transition,
++ CPUFREQ_TRANSITION_NOTIFIER);
++fail_put_disp_clk:
++ clk_put(priv->disp_clk);
++#endif
++
++fail_put_clk:
++ clk_put(priv->clk);
++
++fail_iounmap:
++ iounmap(priv->mmio);
++
++fail_free_wq:
++ flush_workqueue(priv->wq);
++ destroy_workqueue(priv->wq);
++
++fail_free_priv:
++ dev->dev_private = NULL;
++ kfree(priv);
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 18b54ac..14b2f50 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ goto out_err0;
+ }
+
+- if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
++ /*
++ * Limit back buffer size to VRAM size. Remove this once
++ * screen targets are implemented.
++ */
++ if (dev_priv->prim_bb_mem > dev_priv->vram_size)
+ dev_priv->prim_bb_mem = dev_priv->vram_size;
+
+ mutex_unlock(&dev_priv->hw_mutex);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index d2bc2b0..10fc4c3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1950,6 +1950,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+ };
+ int i;
++ u32 assumed_bpp = 2;
++
++ /*
++ * If using screen objects, then assume 32-bpp because that's what the
++ * SVGA device is assuming
++ */
++ if (dev_priv->sou_priv)
++ assumed_bpp = 4;
+
+ /* Add preferred mode */
+ {
+@@ -1960,8 +1968,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
+
+- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+- mode->vdisplay)) {
++ if (vmw_kms_validate_mode_vram(dev_priv,
++ mode->hdisplay * assumed_bpp,
++ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+@@ -1983,7 +1992,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
+ bmode->vdisplay > max_height)
+ continue;
+
+- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
++ if (!vmw_kms_validate_mode_vram(dev_priv,
++ bmode->hdisplay * assumed_bpp,
+ bmode->vdisplay))
+ continue;
+
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index 84c3cb1..8bf61d2 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = {
+ [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+ [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+ [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
++ [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
++ [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
++ [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
++ [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
++ [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
++ [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
+ };
+
+ static const char *relatives[REL_MAX + 1] = {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 25cd674..c3a712c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -296,6 +296,11 @@
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
+ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+
++#define USB_VENDOR_ID_ELAN 0x04f3
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
++
+ #define USB_VENDOR_ID_ELECOM 0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084 0x0061
+
+@@ -733,6 +738,8 @@
+ #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+
+ #define USB_VENDOR_ID_PIXART 0x093a
++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2 0x0137
++#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE 0x2510
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
+ #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 2619f7f..62e8286 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -689,7 +689,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ break;
+
+ case 0x5b: /* TransducerSerialNumber */
+- set_bit(MSC_SERIAL, input->mscbit);
++ usage->type = EV_MSC;
++ usage->code = MSC_SERIAL;
++ bit = input->mscbit;
++ max = MSC_MAX;
+ break;
+
+ default: goto unknown;
+@@ -856,6 +859,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
+ case 0x28c: map_key_clear(KEY_SEND); break;
+
++ case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
++ case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
++ case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
++ case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break;
++ case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
++ case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
++
+ default: goto ignore;
+ }
+ break;
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 79cf503..ddd547a 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
+ struct usbhid_device *usbhid = hid->driver_data;
+
+ spin_lock_irqsave(&usbhid->lock, flags);
+- if (hid->open > 0 &&
++ if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
+ !test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
+ !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
+ !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
+@@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb)
+ case 0: /* success */
+ usbhid_mark_busy(usbhid);
+ usbhid->retry_delay = 0;
++ if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
++ break;
+ hid_input_report(urb->context, HID_INPUT_REPORT,
+ urb->transfer_buffer,
+ urb->actual_length, 1);
+@@ -735,8 +737,10 @@ void usbhid_close(struct hid_device *hid)
+ if (!--hid->open) {
+ spin_unlock_irq(&usbhid->lock);
+ hid_cancel_delayed_stuff(usbhid);
+- usb_kill_urb(usbhid->urbin);
+- usbhid->intf->needs_remote_wakeup = 0;
++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
++ usb_kill_urb(usbhid->urbin);
++ usbhid->intf->needs_remote_wakeup = 0;
++ }
+ } else {
+ spin_unlock_irq(&usbhid->lock);
+ }
+@@ -1134,6 +1138,19 @@ static int usbhid_start(struct hid_device *hid)
+
+ set_bit(HID_STARTED, &usbhid->iofl);
+
++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
++ ret = usb_autopm_get_interface(usbhid->intf);
++ if (ret)
++ goto fail;
++ usbhid->intf->needs_remote_wakeup = 1;
++ ret = hid_start_in(hid);
++ if (ret) {
++ dev_err(&hid->dev,
++ "failed to start in urb: %d\n", ret);
++ }
++ usb_autopm_put_interface(usbhid->intf);
++ }
++
+ /* Some keyboards don't work until their LEDs have been set.
+ * Since BIOSes do set the LEDs, it must be safe for any device
+ * that supports the keyboard boot protocol.
+@@ -1166,6 +1183,9 @@ static void usbhid_stop(struct hid_device *hid)
+ if (WARN_ON(!usbhid))
+ return;
+
++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
++ usbhid->intf->needs_remote_wakeup = 0;
++
+ clear_bit(HID_STARTED, &usbhid->iofl);
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
+ set_bit(HID_DISCONNECTED, &usbhid->iofl);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 15225f3..5014bb5 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -70,6 +70,9 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+@@ -79,6 +82,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 917d545..e05a672 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ }
+ }
+
+- ret = wait_for_completion_io_timeout(&dev->cmd_complete,
++ ret = wait_for_completion_timeout(&dev->cmd_complete,
+ dev->adapter.timeout);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+index 1665c8e..e18bc67 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
++++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
+@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+ goto st_sensors_free_memory;
+ }
+
+- for (i = 0; i < n * num_data_channels; i++) {
++ for (i = 0; i < n * byte_for_channel; i++) {
+ if (i < n)
+ buf[i] = rx_array[i];
+ else
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index 5e780ef..8349cc0 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -330,7 +330,7 @@ static int as3935_probe(struct spi_device *spi)
+ return -EINVAL;
+ }
+
+- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(st));
++ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index bda5994..8b72cf3 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
+ err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
+ &mflow->reg_id[i]);
+ if (err)
+- goto err_free;
++ goto err_create_flow;
+ i++;
+ }
+
+ if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ if (err)
+- goto err_free;
++ goto err_create_flow;
++ i++;
+ }
+
+ return &mflow->ibflow;
+
++err_create_flow:
++ while (i) {
++ (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
++ i--;
++ }
+ err_free:
+ kfree(mflow);
+ return ERR_PTR(err);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index da8ff12..4d35bc7 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2185,7 +2185,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ isert_cmd->tx_desc.num_sge = 2;
+ }
+
+- isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
++ isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+
+ pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+@@ -2884,7 +2884,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_conn, isert_cmd,
+- &isert_cmd->tx_desc.send_wr, true);
++ &isert_cmd->tx_desc.send_wr, false);
+ isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
+ wr->send_wr_num += 1;
+ }
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 35a49bf..2b0ae8c 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -835,8 +835,8 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
+ f->fingers = alps_process_bitmap(priv, f);
+ }
+
+- f->left = packet[4] & 0x01;
+- f->right = packet[4] & 0x02;
++ f->left = !!(packet[4] & 0x01);
++ f->right = !!(packet[4] & 0x02);
+
+ f->st.x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index fd23181..b5b630c 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -618,6 +618,8 @@ static void synaptics_parse_agm(const unsigned char buf[],
+ priv->agm_pending = true;
+ }
+
++static bool is_forcepad;
++
+ static int synaptics_parse_hw_state(const unsigned char buf[],
+ struct synaptics_data *priv,
+ struct synaptics_hw_state *hw)
+@@ -647,7 +649,7 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->left = (buf[0] & 0x01) ? 1 : 0;
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+
+- if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
++ if (is_forcepad) {
+ /*
+ * ForcePads, like Clickpads, use middle button
+ * bits to report primary button clicks.
+@@ -1678,11 +1680,29 @@ static const struct dmi_system_id __initconst cr48_dmi_table[] = {
+ { }
+ };
+
++static const struct dmi_system_id forcepad_dmi_table[] __initconst = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"),
++ },
++ },
++#endif
++ { }
++};
++
+ void __init synaptics_module_init(void)
+ {
+ impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
+ broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+ cr48_profile_sensor = dmi_check_system(cr48_dmi_table);
++
++ /*
++ * Unfortunately ForcePad capability is not exported over PS/2,
++ * so we have to resort to checking DMI.
++ */
++ is_forcepad = dmi_check_system(forcepad_dmi_table);
+ }
+
+ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index fb2e076..1bd01f2 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -77,12 +77,9 @@
+ * for noise.
+ * 2 0x08 image sensor image sensor tracks 5 fingers, but only
+ * reports 2.
++ * 2 0x01 uniform clickpad whole clickpad moves instead of being
++ * hinged at the top.
+ * 2 0x20 report min query 0x0f gives min coord reported
+- * 2 0x80 forcepad forcepad is a variant of clickpad that
+- * does not have physical buttons but rather
+- * uses pressure above certain threshold to
+- * report primary clicks. Forcepads also have
+- * clickpad bit set.
+ */
+ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
+ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
+@@ -91,7 +88,6 @@
+ #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
+ #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
+ #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
+-#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
+
+ /* synaptics modes query bits */
+ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 40b7d6c..faeeb13 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ },
+ {
+ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
++ },
++ },
++ {
++ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
+ DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
+@@ -623,6 +629,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ },
+ },
+ {
++ /* Fujitsu A544 laptop */
++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
++ },
++ },
++ {
++ /* Fujitsu AH544 laptop */
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
++ },
++ },
++ {
+ /* Fujitsu U574 laptop */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
+ .matches = {
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index ecb0109..5aff937 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -260,17 +260,13 @@ static bool check_device(struct device *dev)
+ return true;
+ }
+
+-static int init_iommu_group(struct device *dev)
++static void init_iommu_group(struct device *dev)
+ {
+ struct iommu_group *group;
+
+ group = iommu_group_get_for_dev(dev);
+-
+- if (IS_ERR(group))
+- return PTR_ERR(group);
+-
+- iommu_group_put(group);
+- return 0;
++ if (!IS_ERR(group))
++ iommu_group_put(group);
+ }
+
+ static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+@@ -340,7 +336,6 @@ static int iommu_init_device(struct device *dev)
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iommu_dev_data *dev_data;
+ u16 alias;
+- int ret;
+
+ if (dev->archdata.iommu)
+ return 0;
+@@ -364,12 +359,6 @@ static int iommu_init_device(struct device *dev)
+ dev_data->alias_data = alias_data;
+ }
+
+- ret = init_iommu_group(dev);
+- if (ret) {
+- free_dev_data(dev_data);
+- return ret;
+- }
+-
+ if (pci_iommuv2_capable(pdev)) {
+ struct amd_iommu *iommu;
+
+@@ -455,6 +444,15 @@ int __init amd_iommu_init_devices(void)
+ goto out_free;
+ }
+
++ /*
++ * Initialize IOMMU groups only after iommu_init_device() has
++ * had a chance to populate any IVRS defined aliases.
++ */
++ for_each_pci_dev(pdev) {
++ if (check_device(&pdev->dev))
++ init_iommu_group(&pdev->dev);
++ }
++
+ return 0;
+
+ out_free:
+@@ -2415,6 +2413,7 @@ static int device_change_notifier(struct notifier_block *nb,
+ case BUS_NOTIFY_ADD_DEVICE:
+
+ iommu_init_device(dev);
++ init_iommu_group(dev);
+
+ /*
+ * dev_data is still NULL and
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 0639b92..690818d 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -30,6 +30,7 @@
+ #include <linux/notifier.h>
+ #include <linux/err.h>
+ #include <linux/pci.h>
++#include <linux/bitops.h>
+ #include <trace/events/iommu.h>
+
+ static struct kset *iommu_group_kset;
+@@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group)
+ }
+ EXPORT_SYMBOL_GPL(iommu_group_id);
+
++static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
++ unsigned long *devfns);
++
+ /*
+ * To consider a PCI device isolated, we require ACS to support Source
+ * Validation, Request Redirection, Completer Redirection, and Upstream
+@@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id);
+ */
+ #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
+
++/*
++ * For multifunction devices which are not isolated from each other, find
++ * all the other non-isolated functions and look for existing groups. For
++ * each function, we also need to look for aliases to or from other devices
++ * that may already have a group.
++ */
++static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
++ unsigned long *devfns)
++{
++ struct pci_dev *tmp = NULL;
++ struct iommu_group *group;
++
++ if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
++ return NULL;
++
++ for_each_pci_dev(tmp) {
++ if (tmp == pdev || tmp->bus != pdev->bus ||
++ PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
++ pci_acs_enabled(tmp, REQ_ACS_FLAGS))
++ continue;
++
++ group = get_pci_alias_group(tmp, devfns);
++ if (group) {
++ pci_dev_put(tmp);
++ return group;
++ }
++ }
++
++ return NULL;
++}
++
++/*
++ * Look for aliases to or from the given device for exisiting groups. The
++ * dma_alias_devfn only supports aliases on the same bus, therefore the search
++ * space is quite small (especially since we're really only looking at pcie
++ * device, and therefore only expect multiple slots on the root complex or
++ * downstream switch ports). It's conceivable though that a pair of
++ * multifunction devices could have aliases between them that would cause a
++ * loop. To prevent this, we use a bitmap to track where we've been.
++ */
++static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
++ unsigned long *devfns)
++{
++ struct pci_dev *tmp = NULL;
++ struct iommu_group *group;
++
++ if (test_and_set_bit(pdev->devfn & 0xff, devfns))
++ return NULL;
++
++ group = iommu_group_get(&pdev->dev);
++ if (group)
++ return group;
++
++ for_each_pci_dev(tmp) {
++ if (tmp == pdev || tmp->bus != pdev->bus)
++ continue;
++
++ /* We alias them or they alias us */
++ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
++ pdev->dma_alias_devfn == tmp->devfn) ||
++ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
++ tmp->dma_alias_devfn == pdev->devfn)) {
++
++ group = get_pci_alias_group(tmp, devfns);
++ if (group) {
++ pci_dev_put(tmp);
++ return group;
++ }
++
++ group = get_pci_function_alias_group(tmp, devfns);
++ if (group) {
++ pci_dev_put(tmp);
++ return group;
++ }
++ }
++ }
++
++ return NULL;
++}
++
+ struct group_for_pci_data {
+ struct pci_dev *pdev;
+ struct iommu_group *group;
+@@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
+ struct group_for_pci_data data;
+ struct pci_bus *bus;
+ struct iommu_group *group = NULL;
+- struct pci_dev *tmp;
++ u64 devfns[4] = { 0 };
+
+ /*
+ * Find the upstream DMA alias for the device. A device must not
+@@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
+ }
+
+ /*
+- * Next we need to consider DMA alias quirks. If one device aliases
+- * to another, they should be grouped together. It's theoretically
+- * possible that aliases could create chains of devices where each
+- * device aliases another device. If we then factor in multifunction
+- * ACS grouping requirements, each alias could incorporate a new slot
+- * with multiple functions, each with aliases. This is all extremely
+- * unlikely as DMA alias quirks are typically only used for PCIe
+- * devices where we usually have a single slot per bus. Furthermore,
+- * the alias quirk is usually to another function within the slot
+- * (and ACS multifunction is not supported) or to a different slot
+- * that doesn't physically exist. The likely scenario is therefore
+- * that everything on the bus gets grouped together. To reduce the
+- * problem space, share the IOMMU group for all devices on the bus
+- * if a DMA alias quirk is present on the bus.
+- */
+- tmp = NULL;
+- for_each_pci_dev(tmp) {
+- if (tmp->bus != pdev->bus ||
+- !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
+- continue;
+-
+- pci_dev_put(tmp);
+- tmp = NULL;
+-
+- /* We have an alias quirk, search for an existing group */
+- for_each_pci_dev(tmp) {
+- struct iommu_group *group_tmp;
+-
+- if (tmp->bus != pdev->bus)
+- continue;
+-
+- group_tmp = iommu_group_get(&tmp->dev);
+- if (!group) {
+- group = group_tmp;
+- continue;
+- }
+-
+- if (group_tmp) {
+- WARN_ON(group != group_tmp);
+- iommu_group_put(group_tmp);
+- }
+- }
+-
+- return group ? group : iommu_group_alloc();
+- }
+-
+- /*
+- * Non-multifunction devices or multifunction devices supporting
+- * ACS get their own group.
++ * Look for existing groups on device aliases. If we alias another
++ * device or another device aliases us, use the same group.
+ */
+- if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
+- return iommu_group_alloc();
++ group = get_pci_alias_group(pdev, (unsigned long *)devfns);
++ if (group)
++ return group;
+
+ /*
+- * Multifunction devices not supporting ACS share a group with other
+- * similar devices in the same slot.
++ * Look for existing groups on non-isolated functions on the same
++ * slot and aliases of those funcions, if any. No need to clear
++ * the search bitmap, the tested devfns are still valid.
+ */
+- tmp = NULL;
+- for_each_pci_dev(tmp) {
+- if (tmp == pdev || tmp->bus != pdev->bus ||
+- PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
+- pci_acs_enabled(tmp, REQ_ACS_FLAGS))
+- continue;
+-
+- group = iommu_group_get(&tmp->dev);
+- if (group) {
+- pci_dev_put(tmp);
+- return group;
+- }
+- }
++ group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
++ if (group)
++ return group;
+
+ /* No shared group found, allocate new */
+ return iommu_group_alloc();
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 574aba0..1cb538f 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -43,6 +43,7 @@
+ #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+ #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+ #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
++#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
+
+ #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+ #define ARMADA_375_PPI_CAUSE (0x10)
+@@ -410,19 +411,29 @@ static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
+ struct irq_desc *desc)
+ {
+ struct irq_chip *chip = irq_get_chip(irq);
+- unsigned long irqmap, irqn;
++ unsigned long irqmap, irqn, irqsrc, cpuid;
+ unsigned int cascade_irq;
+
+ chained_irq_enter(chip, desc);
+
+ irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+-
+- if (irqmap & BIT(0)) {
+- armada_370_xp_handle_msi_irq(NULL, true);
+- irqmap &= ~BIT(0);
+- }
++ cpuid = cpu_logical_map(smp_processor_id());
+
+ for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
++ irqsrc = readl_relaxed(main_int_base +
++ ARMADA_370_XP_INT_SOURCE_CTL(irqn));
++
++ /* Check if the interrupt is not masked on current CPU.
++ * Test IRQ (0-1) and FIQ (8-9) mask bits.
++ */
++ if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
++ continue;
++
++ if (irqn == 1) {
++ armada_370_xp_handle_msi_irq(NULL, true);
++ continue;
++ }
++
+ cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+ generic_handle_irq(cascade_irq);
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index ab472c5..9ea5b60 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -465,6 +465,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
+ c->n_buffers[dirty]++;
+ b->list_mode = dirty;
+ list_move(&b->lru_list, &c->lru[dirty]);
++ b->last_accessed = jiffies;
+ }
+
+ /*----------------------------------------------------------------
+@@ -1472,9 +1473,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+- break;
++ return freed;
++ dm_bufio_cond_resched();
+ }
+- dm_bufio_cond_resched();
+ }
+ return freed;
+ }
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index b428c0a..39ad966 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+- cn_del_callback(&ulog_cn_id);
++ kfree(prealloced_cn_msg);
+ return r;
+ }
+
+diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
+index 8276999..82c7a12 100644
+--- a/drivers/media/common/siano/sms-cards.c
++++ b/drivers/media/common/siano/sms-cards.c
+@@ -157,6 +157,12 @@ static struct sms_board sms_boards[] = {
+ .type = SMS_DENVER_2160,
+ .default_mode = DEVICE_MODE_DAB_TDMB,
+ },
++ [SMS1XXX_BOARD_PCTV_77E] = {
++ .name = "Hauppauge microStick 77e",
++ .type = SMS_NOVA_B0,
++ .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0,
++ .default_mode = DEVICE_MODE_DVBT_BDA,
++ },
+ };
+
+ struct sms_board *sms_get_board(unsigned id)
+diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h
+index c63b544..4c4cadd 100644
+--- a/drivers/media/common/siano/sms-cards.h
++++ b/drivers/media/common/siano/sms-cards.h
+@@ -45,6 +45,7 @@
+ #define SMS1XXX_BOARD_SIANO_RIO 18
+ #define SMS1XXX_BOARD_SIANO_DENVER_1530 19
+ #define SMS1XXX_BOARD_SIANO_DENVER_2160 20
++#define SMS1XXX_BOARD_PCTV_77E 21
+
+ struct sms_board_gpio_cfg {
+ int lna_vhf_exist;
+diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
+index 335daef..9d0d034 100644
+--- a/drivers/media/dvb-frontends/ds3000.c
++++ b/drivers/media/dvb-frontends/ds3000.c
+@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
+ memcpy(&state->frontend.ops, &ds3000_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
++
++ /*
++ * Some devices like T480 starts with voltage on. Be sure
++ * to turn voltage off during init, as this can otherwise
++ * interfere with Unicable SCR systems.
++ */
++ ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
+ return &state->frontend;
+
+ error3:
+diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
+index 72af644..cf93021 100644
+--- a/drivers/media/i2c/tda7432.c
++++ b/drivers/media/i2c/tda7432.c
+@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
+ if (t->mute->val) {
+ lf |= TDA7432_MUTE;
+ lr |= TDA7432_MUTE;
+- lf |= TDA7432_MUTE;
++ rf |= TDA7432_MUTE;
+ rr |= TDA7432_MUTE;
+ }
+ /* Mute & update balance*/
+diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
+index 6d86646..5d666af2 100644
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -158,7 +158,7 @@ config VIDEO_MEM2MEM_DEINTERLACE
+
+ config VIDEO_SAMSUNG_S5P_G2D
+ tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
+- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
++ depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ default n
+@@ -168,7 +168,7 @@ config VIDEO_SAMSUNG_S5P_G2D
+
+ config VIDEO_SAMSUNG_S5P_JPEG
+ tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
+- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
++ depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+@@ -177,7 +177,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
+
+ config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC Video Codec"
+- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
++ depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
+index 5dcaa0a..ec5d7c4 100644
+--- a/drivers/media/platform/exynos4-is/Kconfig
++++ b/drivers/media/platform/exynos4-is/Kconfig
+@@ -2,7 +2,7 @@
+ config VIDEO_SAMSUNG_EXYNOS4_IS
+ bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+- depends on (PLAT_S5P || ARCH_EXYNOS)
++ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ depends on OF && COMMON_CLK
+ help
+ Say Y here to enable camera host interface devices for
+diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
+index 369a4c1..dc28ad2 100644
+--- a/drivers/media/platform/s5p-tv/Kconfig
++++ b/drivers/media/platform/s5p-tv/Kconfig
+@@ -8,7 +8,7 @@
+
+ config VIDEO_SAMSUNG_S5P_TV
+ bool "Samsung TV driver for S5P platform"
+- depends on (PLAT_S5P || ARCH_EXYNOS) && PM_RUNTIME
++ depends on (ARCH_S5PV210 || ARCH_EXYNOS) && PM_RUNTIME
+ default n
+ ---help---
+ Say Y here to enable selecting the TV output devices for
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 7115e68..71c9039 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1579,7 +1579,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
+ if (press_type == 0)
+ rc_keyup(ictx->rdev);
+ else {
+- if (ictx->rc_type == RC_BIT_RC6_MCE)
++ if (ictx->rc_type == RC_BIT_RC6_MCE ||
++ ictx->rc_type == RC_BIT_OTHER)
+ rc_keydown(ictx->rdev,
+ ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER,
+ ictx->rc_scancode, ictx->rc_toggle);
+diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
+index e8fff2a..b732ac6 100644
+--- a/drivers/media/rc/rc-ir-raw.c
++++ b/drivers/media/rc/rc-ir-raw.c
+@@ -262,7 +262,6 @@ int ir_raw_event_register(struct rc_dev *dev)
+ return -ENOMEM;
+
+ dev->raw->dev = dev;
+- dev->enabled_protocols = ~0;
+ dev->change_protocol = change_protocol;
+ rc = kfifo_alloc(&dev->raw->kfifo,
+ sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index a7991c7..8d3b74c 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1421,6 +1421,8 @@ int rc_register_device(struct rc_dev *dev)
+
+ if (dev->change_protocol) {
+ u64 rc_type = (1 << rc_map->rc_type);
++ if (dev->driver_type == RC_DRIVER_IR_RAW)
++ rc_type |= RC_BIT_LIRC;
+ rc = dev->change_protocol(dev, &rc_type);
+ if (rc < 0)
+ goto out_raw;
+diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
+index 40c42de..7a62097 100644
+--- a/drivers/media/tuners/m88ts2022.c
++++ b/drivers/media/tuners/m88ts2022.c
+@@ -314,7 +314,7 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
+ div_min = gdiv28 * 78 / 100;
+ div_max = clamp_val(div_max, 0U, 63U);
+
+- f_3db_hz = c->symbol_rate * 135UL / 200UL;
++ f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
+ f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
+ f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
+
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 9da812b..9c61c3f 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -3098,16 +3098,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
+ }
+ }
+
+- if (dev->chip_id == CHIP_ID_EM2870 ||
+- dev->chip_id == CHIP_ID_EM2874 ||
+- dev->chip_id == CHIP_ID_EM28174 ||
+- dev->chip_id == CHIP_ID_EM28178) {
+- /* Digital only device - don't load any alsa module */
+- dev->audio_mode.has_audio = false;
+- dev->has_audio_class = false;
+- dev->has_alsa_audio = false;
+- }
+-
+ if (chip_name != default_chip_name)
+ printk(KERN_INFO DRIVER_NAME
+ ": chip ID is %s\n", chip_name);
+@@ -3377,7 +3367,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
+ dev->alt = -1;
+ dev->is_audio_only = has_audio && !(has_video || has_dvb);
+ dev->has_alsa_audio = has_audio;
+- dev->audio_mode.has_audio = has_audio;
+ dev->has_video = has_video;
+ dev->ifnum = ifnum;
+
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index 523d7e9..0f6caa4 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -506,8 +506,18 @@ int em28xx_audio_setup(struct em28xx *dev)
+ int vid1, vid2, feat, cfg;
+ u32 vid;
+
+- if (!dev->audio_mode.has_audio)
++ if (dev->chip_id == CHIP_ID_EM2870 ||
++ dev->chip_id == CHIP_ID_EM2874 ||
++ dev->chip_id == CHIP_ID_EM28174 ||
++ dev->chip_id == CHIP_ID_EM28178) {
++ /* Digital only device - don't load any alsa module */
++ dev->audio_mode.has_audio = false;
++ dev->has_audio_class = false;
++ dev->has_alsa_audio = false;
+ return 0;
++ }
++
++ dev->audio_mode.has_audio = true;
+
+ /* See how this device is configured */
+ cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 29abc37..5122cbe 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -435,7 +435,10 @@ static inline void finish_buffer(struct em28xx *dev,
+ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
+
+ buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++;
+- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
++ if (dev->v4l2->progressive)
++ buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
++ else
++ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+@@ -994,13 +997,16 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
++ if (dev->usb_ctl.vid_buf != NULL) {
++ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
++ dev->usb_ctl.vid_buf = NULL;
++ }
+ while (!list_empty(&vidq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+- dev->usb_ctl.vid_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ }
+
+@@ -1021,13 +1027,16 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
+ }
+
+ spin_lock_irqsave(&dev->slock, flags);
++ if (dev->usb_ctl.vbi_buf != NULL) {
++ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
++ dev->usb_ctl.vbi_buf = NULL;
++ }
+ while (!list_empty(&vbiq->active)) {
+ struct em28xx_buffer *buf;
+ buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+- dev->usb_ctl.vbi_buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ }
+
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index 1836a41..89c86ee 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -655,6 +655,8 @@ static const struct usb_device_id smsusb_id_table[] = {
+ .driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD },
+ { USB_DEVICE(0x3275, 0x0080),
+ .driver_info = SMS1XXX_BOARD_SIANO_RIO },
++ { USB_DEVICE(0x2013, 0x0257),
++ .driver_info = SMS1XXX_BOARD_PCTV_77E },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index f8135f4..f3c1269 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2229,6 +2229,15 @@ static struct usb_device_id uvc_ids[] = {
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
++ /* Dell XPS M1330 (OmniVision OV7670 webcam) */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x05a9,
++ .idProduct = 0x7670,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_QUIRK_PROBE_DEF },
+ /* Apple Built-In iSight */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
+index ccaa38f..2e9d81f 100644
+--- a/drivers/media/v4l2-core/v4l2-common.c
++++ b/drivers/media/v4l2-core/v4l2-common.c
+@@ -435,16 +435,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
++ /* Clamp to aligned min and max */
++ x = clamp(x, (min + ~mask) & mask, max & mask);
++
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+- /* Clamp to aligned value of min and max */
+- if (x < min)
+- x = (min + ~mask) & mask;
+- else if (x > max)
+- x = max & mask;
+-
+ return x;
+ }
+
+diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
+index 3c8cc02..3ff15f1 100644
+--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
+@@ -253,9 +253,11 @@ int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
+ return 0;
+ out_free_pages:
+ while (i > 0) {
+- void *addr = page_address(dma->vaddr_pages[i]);
+- dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
++ void *addr;
++
+ i--;
++ addr = page_address(dma->vaddr_pages[i]);
++ dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
+ }
+ kfree(dma->dma_addr);
+ dma->dma_addr = NULL;
+diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
+index d01b8c2..f2643c2 100644
+--- a/drivers/mfd/rtsx_pcr.c
++++ b/drivers/mfd/rtsx_pcr.c
+@@ -1197,7 +1197,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
+ pcr->msi_en = msi_en;
+ if (pcr->msi_en) {
+ ret = pci_enable_msi(pcidev);
+- if (ret < 0)
++ if (ret)
+ pcr->msi_en = false;
+ }
+
+diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
+index dd4bf58..121add8 100644
+--- a/drivers/mfd/ti_am335x_tscadc.c
++++ b/drivers/mfd/ti_am335x_tscadc.c
+@@ -53,11 +53,11 @@ void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsadc->reg_lock, flags);
+- tsadc->reg_se_cache = val;
++ tsadc->reg_se_cache |= val;
+ if (tsadc->adc_waiting)
+ wake_up(&tsadc->reg_se_wait);
+ else if (!tsadc->adc_in_use)
+- tscadc_writel(tsadc, REG_SE, val);
++ tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
+
+ spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ }
+@@ -96,6 +96,7 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
+ void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
+ {
+ spin_lock_irq(&tsadc->reg_lock);
++ tsadc->reg_se_cache |= val;
+ am335x_tscadc_need_adc(tsadc);
+
+ tscadc_writel(tsadc, REG_SE, val);
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index e636d9e..3fc40a7 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -992,8 +992,16 @@ static int mmc_sdio_resume(struct mmc_host *host)
+ }
+ }
+
+- if (!err && host->sdio_irqs)
+- wake_up_process(host->sdio_irq_thread);
++ if (!err && host->sdio_irqs) {
++ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
++ wake_up_process(host->sdio_irq_thread);
++ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
++ mmc_host_clk_hold(host);
++ host->ops->enable_sdio_irq(host, 1);
++ mmc_host_clk_release(host);
++ }
++ }
++
+ mmc_release_host(host);
+
+ host->pm_flags &= ~MMC_PM_KEEP_POWER;
+diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
+index 5cc13c8..696eca4 100644
+--- a/drivers/mmc/core/sdio_irq.c
++++ b/drivers/mmc/core/sdio_irq.c
+@@ -208,7 +208,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
+ host->sdio_irqs--;
+ return err;
+ }
+- } else {
++ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+@@ -229,7 +229,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ atomic_set(&host->sdio_irq_thread_abort, 1);
+ kthread_stop(host->sdio_irq_thread);
+- } else {
++ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
+index 5f89cb8..187f48a 100644
+--- a/drivers/mmc/core/slot-gpio.c
++++ b/drivers/mmc/core/slot-gpio.c
+@@ -221,8 +221,6 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ ctx->override_cd_active_level = true;
+ ctx->cd_gpio = gpio_to_desc(gpio);
+
+- mmc_gpiod_request_cd_irq(host);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(mmc_gpio_request_cd);
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index cc8d4a6..e4a0754 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1436,6 +1436,7 @@ static int mmc_spi_probe(struct spi_device *spi)
+ host->pdata->cd_debounce);
+ if (status != 0)
+ goto fail_add_host;
++ mmc_gpiod_request_cd_irq(mmc);
+ }
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index dfde4a2..b2537e2 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -412,6 +412,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
++ /*
++ * The controller offloads the last byte {CRC-7, end bit 1'b1}
++ * of response type R2. Assign dummy CRC, 0, and end bit to the
++ * byte(ptr[16], goes into the LSB of resp[3] later).
++ */
++ ptr[16] = 1;
++
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 5d3766e..d9153a7 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -435,6 +435,13 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
+ }
+
+ if (rsp_type == SD_RSP_TYPE_R2) {
++ /*
++ * The controller offloads the last byte {CRC-7, end bit 1'b1}
++ * of response type R2. Assign dummy CRC, 0, and end bit to the
++ * byte(ptr[16], goes into the LSB of resp[3] later).
++ */
++ ptr[16] = 1;
++
+ for (i = 0; i < 4; i++) {
+ cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
+ dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 6f842fb..3434c79 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -224,12 +224,11 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+
+ static const struct sdhci_ops pxav3_sdhci_ops = {
+ .set_clock = sdhci_set_clock,
+- .set_uhs_signaling = pxav3_set_uhs_signaling,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = pxav3_reset,
+- .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_uhs_signaling = pxav3_set_uhs_signaling,
+ };
+
+ static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index fa5954a..1e47903 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -606,8 +606,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(dev, "sdhci_add_host() failed\n");
+- pm_runtime_forbid(&pdev->dev);
+- pm_runtime_get_noresume(&pdev->dev);
+ goto err_req_regs;
+ }
+
+@@ -618,6 +616,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
+ return 0;
+
+ err_req_regs:
++ pm_runtime_disable(&pdev->dev);
++
+ err_no_busclks:
+ clk_disable_unprepare(sc->clk_io);
+
+diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
+index 1700453..b6db259 100644
+--- a/drivers/mmc/host/sdhci-sirf.c
++++ b/drivers/mmc/host/sdhci-sirf.c
+@@ -94,6 +94,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
+ ret);
+ goto err_request_cd;
+ }
++ mmc_gpiod_request_cd_irq(host->mmc);
+ }
+
+ return 0;
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index faf0924..59d9a72 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -1103,6 +1103,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
++ mmc_gpiod_request_cd_irq(mmc);
+ }
+
+ *host = _host;
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index a7543ba..3096f3d 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -2590,6 +2590,8 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
++ /* Refresh LH28F640BF Partition Config. Register */
++ fixup_LH28F640BF(mtd);
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state = FL_READY;
+ wake_up(&chip->wq);
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index ed7e0a1b..5935f0a 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -245,6 +245,56 @@ static int m25p_remove(struct spi_device *spi)
+ }
+
+
++/*
++ * XXX This needs to be kept in sync with spi_nor_ids. We can't share
++ * it with spi-nor, because if this is built as a module then modpost
++ * won't be able to read it and add appropriate aliases.
++ */
++static const struct spi_device_id m25p_ids[] = {
++ {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
++ {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"},
++ {"at26df321"}, {"at45db081d"},
++ {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"},
++ {"en25q64"}, {"en25qh128"}, {"en25qh256"},
++ {"f25l32pa"},
++ {"mr25h256"}, {"mr25h10"},
++ {"gd25q32"}, {"gd25q64"},
++ {"160s33b"}, {"320s33b"}, {"640s33b"},
++ {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"},
++ {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"},
++ {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"},
++ {"mx66l1g55g"},
++ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"},
++ {"n25q512a"}, {"n25q512ax3"}, {"n25q00"},
++ {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"},
++ {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"},
++ {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
++ {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
++ {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
++ {"s25fl016k"}, {"s25fl064k"},
++ {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
++ {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
++ {"sst25wf040"},
++ {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"},
++ {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"},
++ {"m25p128"}, {"n25q032"},
++ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
++ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
++ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
++ {"m45pe10"}, {"m45pe80"}, {"m45pe16"},
++ {"m25pe20"}, {"m25pe80"}, {"m25pe16"},
++ {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
++ {"m25px64"},
++ {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
++ {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
++ {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"},
++ {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"},
++ {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
++ { },
++};
++MODULE_DEVICE_TABLE(spi, m25p_ids);
++
++
+ static struct spi_driver m25p80_driver = {
+ .driver = {
+ .name = "m25p80",
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
+index 33c6495..5f9a1e2 100644
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -188,8 +188,9 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
+
+ ret = ubi_read(dev->desc, leb, buffer, offset, len);
+ if (ret) {
+- ubi_err("%s ubi_read error %d",
+- dev->gd->disk_name, ret);
++ ubi_err("%s: error %d while reading from LEB %d (offset %d, "
++ "length %d)", dev->gd->disk_name, ret, leb, offset,
++ len);
+ return ret;
+ }
+ return 0;
+@@ -378,7 +379,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
+ {
+ struct ubiblock *dev;
+ struct gendisk *gd;
+- u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
++ u64 disk_capacity = vi->used_bytes >> 9;
+ int ret;
+
+ if ((sector_t)disk_capacity != disk_capacity)
+@@ -502,7 +503,7 @@ int ubiblock_remove(struct ubi_volume_info *vi)
+ static int ubiblock_resize(struct ubi_volume_info *vi)
+ {
+ struct ubiblock *dev;
+- u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
++ u64 disk_capacity = vi->used_bytes >> 9;
+
+ if ((sector_t)disk_capacity != disk_capacity) {
+ ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)",
+@@ -522,8 +523,12 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
+ }
+
+ mutex_lock(&dev->dev_mutex);
+- set_capacity(dev->gd, disk_capacity);
+- ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
++
++ if (get_capacity(dev->gd) != disk_capacity) {
++ set_capacity(dev->gd, disk_capacity);
++ ubi_msg("%s resized to %lld bytes", dev->gd->disk_name,
++ vi->used_bytes);
++ }
+ mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+ return 0;
+@@ -547,6 +552,14 @@ static int ubiblock_notify(struct notifier_block *nb,
+ case UBI_VOLUME_RESIZED:
+ ubiblock_resize(&nt->vi);
+ break;
++ case UBI_VOLUME_UPDATED:
++ /*
++ * If the volume is static, a content update might mean the
++ * size (i.e. used_bytes) was also changed.
++ */
++ if (nt->vi.vol_type == UBI_STATIC_VOLUME)
++ ubiblock_resize(&nt->vi);
++ break;
+ default:
+ break;
+ }
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 7646220..20aeb27 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -425,8 +425,10 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ break;
+
+ err = ubi_start_update(ubi, vol, bytes);
+- if (bytes == 0)
++ if (bytes == 0) {
++ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
+ revoke_exclusive(desc, UBI_READWRITE);
++ }
+ break;
+ }
+
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 0431b46..c701369 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ av = tmp_av;
+ else {
+ ubi_err("orphaned volume in fastmap pool!");
++ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ return UBI_BAD_FASTMAP;
+ }
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index c6f6f69..2f8f251 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -135,6 +135,7 @@ config MACVLAN
+ config MACVTAP
+ tristate "MAC-VLAN based tap driver"
+ depends on MACVLAN
++ depends on INET
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+@@ -201,6 +202,7 @@ config RIONET_RX_SIZE
+
+ config TUN
+ tristate "Universal TUN/TAP device driver support"
++ depends on INET
+ select CRC32
+ ---help---
+ TUN/TAP provides packet reception and transmission for user space
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index e5be511..fac3821 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6557,6 +6557,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tid_release_lock);
++ spin_lock_init(&adapter->win0_lock);
+
+ INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+ INIT_WORK(&adapter->db_full_task, process_db_full);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index dae3da6..c2c7743 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -808,8 +808,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+- MLX4_WQE_CTRL_TCP_UDP_CSUM);
++ if (!skb->encapsulation)
++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
++ MLX4_WQE_CTRL_TCP_UDP_CSUM);
++ else
++ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
+ ring->tx_csum++;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+index ca0f98c..8728431 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
++++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
+ cur->ib.dst_gid_msk);
+ break;
+
++ case MLX4_NET_TRANS_RULE_ID_VXLAN:
++ len += snprintf(buf + len, BUF_SIZE - len,
++ "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
++ break;
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ break;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+index 655a23b..e17a970 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
+ static void stmmac_default_data(void)
+ {
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
++
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+@@ -47,6 +48,12 @@ static void stmmac_default_data(void)
+ dma_cfg.pbl = 32;
+ dma_cfg.burst_len = DMA_AXI_BLEN_256;
+ plat_dat.dma_cfg = &dma_cfg;
++
++ /* Set default value for multicast hash bins */
++ plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
++
++ /* Set default value for unicast filter entries */
++ plat_dat.unicast_filter_entries = 1;
+ }
+
+ /**
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 0fcb5e7..148fda3 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -556,6 +556,7 @@ do_lso:
+ do_send:
+ /* Start filling in the page buffers with the rndis hdr */
+ rndis_msg->msg_len += rndis_msg_size;
++ packet->total_data_buflen = rndis_msg->msg_len;
+ packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+ skb, &packet->page_buf[0]);
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 726edab..5f17ad0 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -201,7 +201,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+- skb_queue_head_init(&list);
++ __skb_queue_head_init(&list);
+
+ spin_lock_bh(&port->bc_queue.lock);
+ skb_queue_splice_tail_init(&port->bc_queue, &list);
+@@ -941,9 +941,15 @@ static void macvlan_port_destroy(struct net_device *dev)
+ {
+ struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+
+- cancel_work_sync(&port->bc_work);
+ dev->priv_flags &= ~IFF_MACVLAN_PORT;
+ netdev_rx_handler_unregister(dev);
++
++ /* After this point, no packet can schedule bc_work anymore,
++ * but we need to cancel it and purge left skbs if any.
++ */
++ cancel_work_sync(&port->bc_work);
++ __skb_queue_purge(&port->bc_queue);
++
+ kfree_rcu(port, rcu);
+ }
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 0c6adaa..9b5481c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -16,6 +16,7 @@
+ #include <linux/idr.h>
+ #include <linux/fs.h>
+
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/rtnetlink.h>
+ #include <net/sock.h>
+@@ -570,6 +571,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index fa0d717..90c639b 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ if (file == ppp->owner)
+ ppp_shutdown_interface(ppp);
+ }
+- if (atomic_long_read(&file->f_count) <= 2) {
++ if (atomic_long_read(&file->f_count) < 2) {
+ ppp_release(NULL, file);
+ err = 0;
+ } else
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index acaaf67..610d166 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -65,6 +65,7 @@
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
+ #include <linux/rcupdate.h>
++#include <net/ipv6.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ break;
+ }
+
++ skb_reset_network_header(skb);
++
+ if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ pr_debug("GSO!\n");
+ switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+@@ -1150,6 +1153,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
++ if (skb->protocol == htons(ETH_P_IPV6))
++ ipv6_proxy_select_ident(skb);
+ break;
+ default:
+ tun->dev->stats.rx_frame_errors++;
+@@ -1179,7 +1184,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+ }
+
+- skb_reset_network_header(skb);
+ skb_probe_transport_header(skb, 0);
+
+ rxhash = skb_get_hash(skb);
+diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
+index be42757..e6338c1 100644
+--- a/drivers/net/usb/ax88179_178a.c
++++ b/drivers/net/usb/ax88179_178a.c
+@@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ {
+ struct usbnet *dev = netdev_priv(net);
+ struct sockaddr *addr = p;
++ int ret;
+
+ if (netif_running(net))
+ return -EBUSY;
+@@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* Set the MAC address */
+- return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
++ ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
+ ETH_ALEN, net->dev_addr);
++ if (ret < 0)
++ return ret;
++
++ return 0;
+ }
+
+ static const struct net_device_ops ax88179_netdev_ops = {
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index beb377b..b483127 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1440,9 +1440,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ if (!in6_dev)
+ goto out;
+
+- if (!pskb_may_pull(skb, skb->len))
+- goto out;
+-
+ iphdr = ipv6_hdr(skb);
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+@@ -1717,6 +1714,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ struct pcpu_sw_netstats *tx_stats, *rx_stats;
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
++ struct net_device *dev = skb->dev;
++ int len = skb->len;
+
+ tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
+ rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+@@ -1740,16 +1739,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tx_packets++;
+- tx_stats->tx_bytes += skb->len;
++ tx_stats->tx_bytes += len;
+ u64_stats_update_end(&tx_stats->syncp);
+
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->rx_packets++;
+- rx_stats->rx_bytes += skb->len;
++ rx_stats->rx_bytes += len;
+ u64_stats_update_end(&rx_stats->syncp);
+ } else {
+- skb->dev->stats.rx_dropped++;
++ dev->stats.rx_dropped++;
+ }
+ }
+
+@@ -1927,7 +1926,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ return arp_reduce(dev, skb);
+ #if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
++ pskb_may_pull(skb, sizeof(struct ipv6hdr)
++ + sizeof(struct nd_msg)) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *msg;
+
+@@ -1936,6 +1936,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ return neigh_reduce(dev, skb);
+ }
++ eth = eth_hdr(skb);
+ #endif
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+index afb98f4..9133985 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+@@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+ {
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
++ u32 scd_queues;
+
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+@@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ goto done;
+ }
+
+- /*
+- * mac80211 will not push any more frames for transmit
+- * until the flush is completed
+- */
+- if (drop) {
+- IWL_DEBUG_MAC80211(priv, "send flush command\n");
+- if (iwlagn_txfifo_flush(priv, 0)) {
+- IWL_ERR(priv, "flush request fail\n");
+- goto done;
+- }
++ scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
++ scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
++ BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
++
++ if (vif)
++ scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
++
++ IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
++ if (iwlagn_txfifo_flush(priv, scd_queues)) {
++ IWL_ERR(priv, "flush request fail\n");
++ goto done;
+ }
+- IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
++ IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
+ done:
+ mutex_unlock(&priv->mutex);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
+index 656371a..86fb121 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
+@@ -548,6 +548,7 @@ enum iwl_trans_state {
+ * Set during transport allocation.
+ * @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @pm_support: set to true in start_hw if link pm is supported
++ * @ltr_enabled: set to true if the LTR is enabled
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_headroom: room needed for the transport's private use before the
+@@ -574,6 +575,7 @@ struct iwl_trans {
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+
+ bool pm_support;
++ bool ltr_enabled;
+
+ /* The following fields are internal only */
+ struct kmem_cache *dev_cmd_pool;
+diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
+index ce71625..103fc93 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
++++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
+@@ -301,8 +301,8 @@ static const __le64 iwl_ci_mask[][3] = {
+ };
+
+ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+- cpu_to_le32(0x28412201),
+- cpu_to_le32(0x11118451),
++ cpu_to_le32(0x2e402280),
++ cpu_to_le32(0x7711a751),
+ };
+
+ struct corunning_block_luts {
+diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+index a3be333..d55c2a8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
++++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+@@ -289,8 +289,8 @@ static const __le64 iwl_ci_mask[][3] = {
+ };
+
+ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+- cpu_to_le32(0x28412201),
+- cpu_to_le32(0x11118451),
++ cpu_to_le32(0x2e402280),
++ cpu_to_le32(0x7711a751),
+ };
+
+ struct corunning_block_luts {
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+index c3a8c86..4d8932c 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+@@ -66,13 +66,46 @@
+
+ /* Power Management Commands, Responses, Notifications */
+
++/**
++ * enum iwl_ltr_config_flags - masks for LTR config command flags
++ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
++ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
++ * memory access
++ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
++ * reg change
++ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
++ * D0 to D3
++ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
++ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
++ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
++ */
++enum iwl_ltr_config_flags {
++ LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
++ LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
++ LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
++ LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
++ LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
++ LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
++ LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
++};
++
++/**
++ * struct iwl_ltr_config_cmd - configures the LTR
++ * @flags: See %enum iwl_ltr_config_flags
++ */
++struct iwl_ltr_config_cmd {
++ __le32 flags;
++ __le32 static_long;
++ __le32 static_short;
++} __packed;
++
+ /* Radio LP RX Energy Threshold measured in dBm */
+ #define POWER_LPRX_RSSI_THRESHOLD 75
+ #define POWER_LPRX_RSSI_THRESHOLD_MAX 94
+ #define POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+ /**
+- * enum iwl_scan_flags - masks for power table command flags
++ * enum iwl_power_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+index 9a922f3..7b73ed4 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+@@ -148,6 +148,7 @@ enum {
+ /* Power - legacy power table command */
+ POWER_TABLE_CMD = 0x77,
+ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
++ LTR_CONFIG = 0xee,
+
+ /* Thermal Throttling*/
+ REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
+index 883e702..bf720a8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
+@@ -475,6 +475,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+
++ if (mvm->trans->ltr_enabled) {
++ struct iwl_ltr_config_cmd cmd = {
++ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
++ };
++
++ WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
++ sizeof(cmd), &cmd));
++ }
++
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ goto error;
+diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
+index 610dbcb..d31a117 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
+@@ -332,6 +332,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
+ CMD(REPLY_BEACON_FILTERING_CMD),
+ CMD(REPLY_THERMAL_MNG_BACKOFF),
+ CMD(MAC_PM_POWER_TABLE),
++ CMD(LTR_CONFIG),
+ CMD(BT_COEX_CI),
+ CMD(BT_COEX_UPDATE_SW_BOOST),
+ CMD(BT_COEX_UPDATE_CORUN_LUT),
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index 9ee410b..dbc8707 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -168,14 +168,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+
+ /*
+ * for data packets, rate info comes from the table inside the fw. This
+- * table is controlled by LINK_QUALITY commands. Exclude ctrl port
+- * frames like EAPOLs which should be treated as mgmt frames. This
+- * avoids them being sent initially in high rates which increases the
+- * chances for completion of the 4-Way handshake.
++ * table is controlled by LINK_QUALITY commands
+ */
+
+- if (ieee80211_is_data(fc) && sta &&
+- !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
++ if (ieee80211_is_data(fc) && sta) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 06e04aa..d7231a8 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -172,6 +172,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
+ {
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 lctl;
++ u16 cap;
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+@@ -182,16 +183,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
+ * power savings, even without L1.
+ */
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
+- /* L1-ASPM enabled; disable(!) L0S */
++ if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+- dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
+- } else {
+- /* L1-ASPM disabled; enable(!) L0S */
++ else
+ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+- dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
+- }
+ trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
++
++ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
++ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
++ dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
++ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
++ trans->ltr_enabled ? "En" : "Dis");
+ }
+
+ /*
+diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
+index 7cf6081..ebd5625 100644
+--- a/drivers/net/wireless/rt2x00/rt2800.h
++++ b/drivers/net/wireless/rt2x00/rt2800.h
+@@ -52,6 +52,7 @@
+ * RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
+ * RF5360 2.4G 1T1R
++ * RF5362 2.4G 1T1R
+ * RF5370 2.4G 1T1R
+ * RF5390 2.4G 1T1R
+ */
+@@ -72,6 +73,7 @@
+ #define RF3070 0x3070
+ #define RF3290 0x3290
+ #define RF5360 0x5360
++#define RF5362 0x5362
+ #define RF5370 0x5370
+ #define RF5372 0x5372
+ #define RF5390 0x5390
+@@ -2145,7 +2147,7 @@ struct mac_iveiv_entry {
+ /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
+ #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70)
+ #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
+-/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
++/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */
+ #define RFCSR3_VCOCAL_EN FIELD8(0x80)
+ /* Bits for RF3050 */
+ #define RFCSR3_BIT1 FIELD8(0x02)
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 893c9d5..9f57a2d 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -3186,6 +3186,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ break;
+ case RF3070:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -3203,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF3322) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
++ rt2x00_rf(rt2x00dev, RF5362) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5372) ||
+ rt2x00_rf(rt2x00dev, RF5390) ||
+@@ -4317,6 +4319,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
+ case RF3070:
+ case RF3290:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7095,6 +7098,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ case RF3320:
+ case RF3322:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7551,6 +7555,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ case RF3320:
+ case RF3322:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+@@ -7680,6 +7685,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ case RF3070:
+ case RF3290:
+ case RF5360:
++ case RF5362:
+ case RF5370:
+ case RF5372:
+ case RF5390:
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 573897b..8444313 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
++ { USB_DEVICE(0x1b75, 0xa200) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888) },
+ /* Pegatron */
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 293ed4b..902b1b0 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1277,52 +1277,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_read_string);
+
+ /**
+- * of_property_read_string_index - Find and read a string from a multiple
+- * strings property.
+- * @np: device node from which the property value is to be read.
+- * @propname: name of the property to be searched.
+- * @index: index of the string in the list of strings
+- * @out_string: pointer to null terminated return string, modified only if
+- * return value is 0.
+- *
+- * Search for a property in a device tree node and retrieve a null
+- * terminated string value (pointer to data, not a copy) in the list of strings
+- * contained in that property.
+- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+- * property does not have a value, and -EILSEQ if the string is not
+- * null-terminated within the length of the property data.
+- *
+- * The out_string pointer is modified only if a valid string can be decoded.
+- */
+-int of_property_read_string_index(struct device_node *np, const char *propname,
+- int index, const char **output)
+-{
+- struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
+-
+- if (!prop)
+- return -EINVAL;
+- if (!prop->value)
+- return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+- p = prop->value;
+-
+- for (i = 0; total < prop->length; total += l, p += l) {
+- l = strlen(p) + 1;
+- if (i++ == index) {
+- *output = p;
+- return 0;
+- }
+- }
+- return -ENODATA;
+-}
+-EXPORT_SYMBOL_GPL(of_property_read_string_index);
+-
+-/**
+ * of_property_match_string() - Find string in a list and return index
+ * @np: pointer to node containing string list property
+ * @propname: string list property name
+@@ -1348,7 +1302,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ end = p + prop->length;
+
+ for (i = 0; p < end; i++, p += l) {
+- l = strlen(p) + 1;
++ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ pr_debug("comparing %s with %s\n", string, p);
+@@ -1360,39 +1314,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
+ EXPORT_SYMBOL_GPL(of_property_match_string);
+
+ /**
+- * of_property_count_strings - Find and return the number of strings from a
+- * multiple strings property.
++ * of_property_read_string_util() - Utility helper for parsing string properties
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ * @skip: Number of strings to skip over at beginning of list.
+ *
+- * Search for a property in a device tree node and retrieve the number of null
+- * terminated string contain in it. Returns the number of strings on
+- * success, -EINVAL if the property does not exist, -ENODATA if property
+- * does not have a value, and -EILSEQ if the string is not null-terminated
+- * within the length of the property data.
++ * Don't call this function directly. It is a utility helper for the
++ * of_property_read_string*() family of functions.
+ */
+-int of_property_count_strings(struct device_node *np, const char *propname)
++int of_property_read_string_helper(struct device_node *np, const char *propname,
++ const char **out_strs, size_t sz, int skip)
+ {
+ struct property *prop = of_find_property(np, propname, NULL);
+- int i = 0;
+- size_t l = 0, total = 0;
+- const char *p;
++ int l = 0, i = 0;
++ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+- if (strnlen(prop->value, prop->length) >= prop->length)
+- return -EILSEQ;
+-
+ p = prop->value;
++ end = p + prop->length;
+
+- for (i = 0; total < prop->length; total += l, p += l, i++)
+- l = strlen(p) + 1;
+-
+- return i;
++ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
++ l = strnlen(p, end - p) + 1;
++ if (p + l > end)
++ return -EILSEQ;
++ if (out_strs && i >= skip)
++ *out_strs++ = p;
++ }
++ i -= skip;
++ return i <= 0 ? -ENODATA : i;
+ }
+-EXPORT_SYMBOL_GPL(of_property_count_strings);
++EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+
+ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
+ {
+diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
+index a737cb5..c92de69 100644
+--- a/drivers/of/selftest.c
++++ b/drivers/of/selftest.c
+@@ -247,8 +247,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
+ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
+ }
+
+-static void __init of_selftest_property_match_string(void)
++static void __init of_selftest_property_string(void)
+ {
++ const char *strings[4];
+ struct device_node *np;
+ int rc;
+
+@@ -265,13 +266,66 @@ static void __init of_selftest_property_match_string(void)
+ rc = of_property_match_string(np, "phandle-list-names", "third");
+ selftest(rc == 2, "third expected:0 got:%i\n", rc);
+ rc = of_property_match_string(np, "phandle-list-names", "fourth");
+- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
++ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
+ rc = of_property_match_string(np, "missing-property", "blah");
+- selftest(rc == -EINVAL, "missing property; rc=%i", rc);
++ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "empty-property", "blah");
+- selftest(rc == -ENODATA, "empty property; rc=%i", rc);
++ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
+ rc = of_property_match_string(np, "unterminated-string", "blah");
+- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++
++ /* of_property_count_strings() tests */
++ rc = of_property_count_strings(np, "string-property");
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "phandle-list-names");
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string");
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ rc = of_property_count_strings(np, "unterminated-string-list");
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++
++ /* of_property_read_string_index() tests */
++ rc = of_property_read_string_index(np, "string-property", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "string-property", 1, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[0] = NULL;
++ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++
++ /* of_property_read_string_array() tests */
++ rc = of_property_read_string_array(np, "string-property", strings, 4);
++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
++ rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
++ /* -- An incorrectly formed string should cause a failure */
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
++ /* -- parsing the correctly formed strings should still work: */
++ strings[2] = NULL;
++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
++ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
++ strings[1] = NULL;
++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
++ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
+ }
+
+ #define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
+@@ -783,7 +837,7 @@ static int __init of_selftest(void)
+ of_selftest_find_node_by_name();
+ of_selftest_dynamic();
+ of_selftest_parse_phandle_with_args();
+- of_selftest_property_match_string();
++ of_selftest_property_string();
+ of_selftest_property_copy();
+ of_selftest_changeset();
+ of_selftest_parse_interrupts();
+diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
+index ce0fe08..5b1527e 100644
+--- a/drivers/of/testcase-data/tests-phandle.dtsi
++++ b/drivers/of/testcase-data/tests-phandle.dtsi
+@@ -39,7 +39,9 @@
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider3 0>;
+ empty-property;
++ string-property = "foobar";
+ unterminated-string = [40 41 42 43];
++ unterminated-string-list = "first", "second", [40 41 42 43];
+ };
+ };
+ };
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 76ef791..6d04771 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(modalias);
+
+-static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
++static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev);
+@@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
+ return result < 0 ? result : count;
+ }
+
+-static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
++static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+ struct pci_dev *pdev;
+@@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
+ pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
+ }
+-static DEVICE_ATTR_RW(enabled);
++static DEVICE_ATTR_RW(enable);
+
+ #ifdef CONFIG_NUMA
+ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
+@@ -564,7 +564,7 @@ static struct attribute *pci_dev_attrs[] = {
+ #endif
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+- &dev_attr_enabled.attr,
++ &dev_attr_enable.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
+ #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
+index 93d7835..acc13f8 100644
+--- a/drivers/phy/phy-omap-usb2.c
++++ b/drivers/phy/phy-omap-usb2.c
+@@ -262,14 +262,16 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ otg->phy = &phy->phy;
+
+ platform_set_drvdata(pdev, phy);
++ pm_runtime_enable(phy->dev);
+
+ generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
+- if (IS_ERR(generic_phy))
++ if (IS_ERR(generic_phy)) {
++ pm_runtime_disable(phy->dev);
+ return PTR_ERR(generic_phy);
++ }
+
+ phy_set_drvdata(generic_phy, phy);
+
+- pm_runtime_enable(phy->dev);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
+index e12e5b0..c23d8ded 100644
+--- a/drivers/pinctrl/pinctrl-baytrail.c
++++ b/drivers/pinctrl/pinctrl-baytrail.c
+@@ -318,7 +318,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
+ "Potential Error: Setting GPIO with direct_irq_en to output");
+
+ reg_val = readl(reg) | BYT_DIR_MASK;
+- reg_val &= ~BYT_OUTPUT_EN;
++ reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
+
+ if (value)
+ writel(reg_val | BYT_LEVEL, reg);
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 96a0b75..26c4fd1 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -579,6 +579,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
+ },
+ },
++ {
++ /*
++ * Note no video_set_backlight_video_vendor, we must use the
++ * acer interface, as there is no native backlight interface.
++ */
++ .ident = "Acer KAV80",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 5a59665..ff765d8 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -1561,6 +1561,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ },
+ {
+ .callback = samsung_dmi_matched,
++ .ident = "NC210",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
++ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
++ },
++ .driver_data = &samsung_broken_acpi_video,
++ },
++ {
++ .callback = samsung_dmi_matched,
+ .ident = "730U3E/740U3E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
+index 9e4dab4..ef1f4c9 100644
+--- a/drivers/power/charger-manager.c
++++ b/drivers/power/charger-manager.c
+@@ -1720,6 +1720,11 @@ static int charger_manager_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++ if (!desc->psy_fuel_gauge) {
++ dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
++ return -EINVAL;
++ }
++
+ /* Counting index only */
+ while (desc->psy_charger_stat[i])
+ i++;
+diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
+index c67ff05..d158f71 100644
+--- a/drivers/regulator/max77693.c
++++ b/drivers/regulator/max77693.c
+@@ -227,7 +227,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
+ struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max77693_regulator_data *rdata = NULL;
+ int num_rdata, i;
+- struct regulator_config config;
++ struct regulator_config config = { };
+
+ num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
+ if (!rdata || num_rdata <= 0) {
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index a168e96..54ef393 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -806,7 +806,7 @@ config RTC_DRV_DA9063
+
+ config RTC_DRV_EFI
+ tristate "EFI RTC"
+- depends on EFI
++ depends on EFI && !X86
+ help
+ If you say yes here you will get support for the EFI
+ Real Time Clock.
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index e2beab9..4747d2c 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -757,7 +757,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+- WARN_ON(node && (node != se_nacl));
++ if (WARN_ON(node && (node != se_nacl))) {
++ /*
++ * The nacl no longer matches what we think it should be.
++ * Most likely a new dynamic acl has been added while
++ * someone dropped the hardware lock. It clearly is a
++ * bug elsewhere, but this bit can't make things worse.
++ */
++ btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
++ node, GFP_ATOMIC);
++ }
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index aaea4b9..7cb8c73 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1887,6 +1887,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+ req->cmd_flags |= REQ_DONTPREP;
+ }
+
++ if (blk_queue_tagged(q))
++ req->cmd_flags |= REQ_QUEUED;
++ else
++ req->cmd_flags &= ~REQ_QUEUED;
++
+ scsi_init_cmd_errh(cmd);
+ cmd->scsi_done = scsi_mq_done;
+
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 5021ddf..fde7113 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -46,7 +46,7 @@
+
+ #define SPI_TCR 0x08
+
+-#define SPI_CTAR(x) (0x0c + (x * 4))
++#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
+ #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
+ #define SPI_CTAR_CPOL(x) ((x) << 26)
+ #define SPI_CTAR_CPHA(x) ((x) << 25)
+@@ -70,7 +70,7 @@
+
+ #define SPI_PUSHR 0x34
+ #define SPI_PUSHR_CONT (1 << 31)
+-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
++#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
+ #define SPI_PUSHR_EOQ (1 << 27)
+ #define SPI_PUSHR_CTCNT (1 << 26)
+ #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index f1f0a58..dbd576d 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -1074,7 +1074,7 @@ err_rxdesc:
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+- pl022->sgt_tx.nents, DMA_FROM_DEVICE);
++ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ err_rx_sgmap:
+ sg_free_table(&pl022->sgt_tx);
+ err_alloc_tx_sg:
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 46f45ca..9090dad 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1276,7 +1276,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
+ if (status != 0)
+ return status;
+ write_SSCR0(0, drv_data->ioaddr);
+- clk_disable_unprepare(ssp->clk);
++
++ if (!pm_runtime_suspended(dev))
++ clk_disable_unprepare(ssp->clk);
+
+ return 0;
+ }
+@@ -1290,7 +1292,8 @@ static int pxa2xx_spi_resume(struct device *dev)
+ pxa2xx_spi_dma_resume(drv_data);
+
+ /* Enable the SSP clock */
+- clk_prepare_enable(ssp->clk);
++ if (!pm_runtime_suspended(dev))
++ clk_prepare_enable(ssp->clk);
+
+ /* Restore LPSS private register bits */
+ lpss_ssp_setup(drv_data);
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 2182c74..7a2e9c0 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1462,10 +1462,7 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist;
+ int ret;
+
+- /* user_chanlist could be NULL for do_cmdtest ioctls */
+- if (!user_chanlist)
+- return 0;
+-
++ cmd->chanlist = NULL;
+ chanlist = memdup_user(user_chanlist,
+ cmd->chanlist_len * sizeof(unsigned int));
+ if (IS_ERR(chanlist))
+@@ -1609,13 +1606,18 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
+
+ s = &dev->subdevices[cmd.subdev];
+
+- /* load channel/gain list */
+- ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
+- if (ret)
+- return ret;
++ /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */
++ if (user_chanlist) {
++ /* load channel/gain list */
++ ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
++ if (ret)
++ return ret;
++ }
+
+ ret = s->do_cmdtest(dev, s, &cmd);
+
++ kfree(cmd.chanlist); /* free kernel copy of user chanlist */
++
+ /* restore chanlist pointer before copying back */
+ cmd.chanlist = (unsigned int __force *)user_chanlist;
+
+diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
+index 468327f..d343611 100644
+--- a/drivers/staging/iio/adc/mxs-lradc.c
++++ b/drivers/staging/iio/adc/mxs-lradc.c
+@@ -1565,14 +1565,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
+ /* Grab all IRQ sources */
+ for (i = 0; i < of_cfg->irq_count; i++) {
+ lradc->irq[i] = platform_get_irq(pdev, i);
+- if (lradc->irq[i] < 0)
+- return lradc->irq[i];
++ if (lradc->irq[i] < 0) {
++ ret = lradc->irq[i];
++ goto err_clk;
++ }
+
+ ret = devm_request_irq(dev, lradc->irq[i],
+ mxs_lradc_handle_irq, 0,
+ of_cfg->irq_name[i], iio);
+ if (ret)
+- return ret;
++ goto err_clk;
+ }
+
+ lradc->vref_mv = of_cfg->vref_mv;
+@@ -1594,7 +1596,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
+ &mxs_lradc_trigger_handler,
+ &mxs_lradc_buffer_ops);
+ if (ret)
+- return ret;
++ goto err_clk;
+
+ ret = mxs_lradc_trigger_init(iio);
+ if (ret)
+@@ -1649,6 +1651,8 @@ err_dev:
+ mxs_lradc_trigger_remove(iio);
+ err_trig:
+ iio_triggered_buffer_cleanup(iio);
++err_clk:
++ clk_disable_unprepare(lradc->clk);
+ return ret;
+ }
+
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 2b96665..97d4b3f 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = AD5933_REG_TEMP_DATA,
++ .scan_index = -1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 14,
+@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "real_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+- BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "real",
+ .address = AD5933_REG_REAL_DATA,
+ .scan_index = 0,
+ .scan_type = {
+@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "imag_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+- BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "imag",
+ .address = AD5933_REG_IMAG_DATA,
+ .scan_index = 1,
+ .scan_type = {
+@@ -748,14 +745,14 @@ static int ad5933_probe(struct i2c_client *client,
+ indio_dev->name = id->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5933_channels;
+- indio_dev->num_channels = 1; /* only register temp0_input */
++ indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
+
+ ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+- /* skip temp0_input, register in0_(real|imag)_raw */
+- ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
++ ret = iio_buffer_register(indio_dev, ad5933_channels,
++ ARRAY_SIZE(ad5933_channels));
+ if (ret)
+ goto error_unreg_ring;
+
+diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
+index 0731820..e8c98cf 100644
+--- a/drivers/staging/iio/meter/ade7758.h
++++ b/drivers/staging/iio/meter/ade7758.h
+@@ -119,7 +119,6 @@ struct ade7758_state {
+ u8 *tx;
+ u8 *rx;
+ struct mutex buf_lock;
+- const struct iio_chan_spec *ade7758_ring_channels;
+ struct spi_transfer ring_xfer[4];
+ struct spi_message ring_msg;
+ /*
+diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
+index cba183e..94d9914 100644
+--- a/drivers/staging/iio/meter/ade7758_core.c
++++ b/drivers/staging/iio/meter/ade7758_core.c
+@@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
+ .scan_index = 0,
+ .scan_type = {
+@@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
+ .scan_index = 1,
+ .scan_type = {
+@@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
+ .scan_index = 2,
+ .scan_type = {
+@@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
+ .scan_index = 3,
+ .scan_type = {
+@@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 0,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
+ .scan_index = 4,
+ .scan_type = {
+@@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
+ .scan_index = 5,
+ .scan_type = {
+@@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
+ .scan_index = 6,
+ .scan_type = {
+@@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
+ .scan_index = 7,
+ .scan_type = {
+@@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
+ .scan_index = 8,
+ .scan_type = {
+@@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 1,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
+ .scan_index = 9,
+ .scan_type = {
+@@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
+ .scan_index = 10,
+ .scan_type = {
+@@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
+ .scan_index = 11,
+ .scan_type = {
+@@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "apparent_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "apparent",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
+ .scan_index = 12,
+ .scan_type = {
+@@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "active_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "active",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
+ .scan_index = 13,
+ .scan_type = {
+@@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+- .extend_name = "reactive_raw",
+- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
++ .extend_name = "reactive",
+ .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
+ .scan_index = 14,
+ .scan_type = {
+@@ -869,13 +833,14 @@ static int ade7758_probe(struct spi_device *spi)
+ goto error_free_rx;
+ }
+ st->us = spi;
+- st->ade7758_ring_channels = &ade7758_channels[0];
+ mutex_init(&st->buf_lock);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ade7758_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
++ indio_dev->channels = ade7758_channels;
++ indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
+
+ ret = ade7758_configure_ring(indio_dev);
+ if (ret)
+diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
+index c0accf8..6e90064 100644
+--- a/drivers/staging/iio/meter/ade7758_ring.c
++++ b/drivers/staging/iio/meter/ade7758_ring.c
+@@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
+ **/
+ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
+ {
+- struct ade7758_state *st = iio_priv(indio_dev);
+ unsigned channel;
+
+- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
++ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
+ return -EINVAL;
+
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+
+ ade7758_write_waveform_type(&indio_dev->dev,
+- st->ade7758_ring_channels[channel].address);
++ indio_dev->channels[channel].address);
+
+ return 0;
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 98da901..15a1c13 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1409,7 +1409,8 @@ int core_dev_add_initiator_node_lun_acl(
+ * Check to see if there are any existing persistent reservation APTPL
+ * pre-registrations that need to be enabled for this LUN ACL..
+ */
+- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
++ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
++ lacl->mapped_lun);
+ return 0;
+ }
+
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index df35786..1aadcfc 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -944,10 +944,10 @@ int core_scsi3_check_aptpl_registration(
+ struct se_device *dev,
+ struct se_portal_group *tpg,
+ struct se_lun *lun,
+- struct se_lun_acl *lun_acl)
++ struct se_node_acl *nacl,
++ u32 mapped_lun)
+ {
+- struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+- struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
++ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ return 0;
+diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
+index 2ee2936..749fd7b 100644
+--- a/drivers/target/target_core_pr.h
++++ b/drivers/target/target_core_pr.h
+@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
+ unsigned char *, u16, u32, int, int, u8);
+ extern int core_scsi3_check_aptpl_registration(struct se_device *,
+ struct se_portal_group *, struct se_lun *,
+- struct se_lun_acl *);
++ struct se_node_acl *, u32);
+ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+ struct se_node_acl *);
+ extern void core_scsi3_free_all_registrations(struct se_device *);
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index fddfae6..8d8ecfb 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -40,6 +40,7 @@
+ #include <target/target_core_fabric.h>
+
+ #include "target_core_internal.h"
++#include "target_core_pr.h"
+
+ extern struct se_device *g_lun0_dev;
+
+@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
+
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
++ /*
++ * Check to see if there are any existing persistent reservation
++ * APTPL pre-registrations that need to be enabled for this dynamic
++ * LUN ACL now..
++ */
++ core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
++ lun->unpacked_lun);
+ spin_lock(&tpg->tpg_lun_lock);
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 7fa62fc..ab61014 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1877,8 +1877,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+- if (ret)
+- goto out;
++ goto out;
+ }
+
+ switch (cmd->data_direction) {
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 0da0b54..077570a 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -683,17 +683,6 @@ static void msm_power(struct uart_port *port, unsigned int state,
+ }
+
+ #ifdef CONFIG_CONSOLE_POLL
+-static int msm_poll_init(struct uart_port *port)
+-{
+- struct msm_port *msm_port = UART_TO_MSM(port);
+-
+- /* Enable single character mode on RX FIFO */
+- if (msm_port->is_uartdm >= UARTDM_1P4)
+- msm_write(port, UARTDM_DMEN_RX_SC_ENABLE, UARTDM_DMEN);
+-
+- return 0;
+-}
+-
+ static int msm_poll_get_char_single(struct uart_port *port)
+ {
+ struct msm_port *msm_port = UART_TO_MSM(port);
+@@ -705,7 +694,7 @@ static int msm_poll_get_char_single(struct uart_port *port)
+ return msm_read(port, rf_reg) & 0xff;
+ }
+
+-static int msm_poll_get_char_dm_1p3(struct uart_port *port)
++static int msm_poll_get_char_dm(struct uart_port *port)
+ {
+ int c;
+ static u32 slop;
+@@ -729,6 +718,10 @@ static int msm_poll_get_char_dm_1p3(struct uart_port *port)
+ slop = msm_read(port, UARTDM_RF);
+ c = sp[0];
+ count--;
++ msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
++ msm_write(port, 0xFFFFFF, UARTDM_DMRX);
++ msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
++ UART_CR);
+ } else {
+ c = NO_POLL_CHAR;
+ }
+@@ -752,8 +745,8 @@ static int msm_poll_get_char(struct uart_port *port)
+ imr = msm_read(port, UART_IMR);
+ msm_write(port, 0, UART_IMR);
+
+- if (msm_port->is_uartdm == UARTDM_1P3)
+- c = msm_poll_get_char_dm_1p3(port);
++ if (msm_port->is_uartdm)
++ c = msm_poll_get_char_dm(port);
+ else
+ c = msm_poll_get_char_single(port);
+
+@@ -812,7 +805,6 @@ static struct uart_ops msm_uart_pops = {
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+ #ifdef CONFIG_CONSOLE_POLL
+- .poll_init = msm_poll_init,
+ .poll_get_char = msm_poll_get_char,
+ .poll_put_char = msm_poll_put_char,
+ #endif
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 29a7be4..0f03988 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -362,7 +362,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
+ * Die! Die! Die!
+ */
+- if (baud == 38400)
++ if (try == 0 && baud == 38400)
+ baud = altbaud;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 8fbad34..848c17a 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1686,6 +1686,7 @@ int tty_release(struct inode *inode, struct file *filp)
+ int pty_master, tty_closing, o_tty_closing, do_sleep;
+ int idx;
+ char buf[64];
++ long timeout = 0;
+
+ if (tty_paranoia_check(tty, inode, __func__))
+ return 0;
+@@ -1770,7 +1771,11 @@ int tty_release(struct inode *inode, struct file *filp)
+ __func__, tty_name(tty, buf));
+ tty_unlock_pair(tty, o_tty);
+ mutex_unlock(&tty_mutex);
+- schedule();
++ schedule_timeout_killable(timeout);
++ if (timeout < 120 * HZ)
++ timeout = 2 * timeout + 1;
++ else
++ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ /*
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index 610b720..59b25e0 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -539,6 +539,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+
+ /* Save original vc_unipagdir_loc in case we allocate a new one */
+ p = *vc->vc_uni_pagedir_loc;
++
++ if (!p) {
++ err = -EINVAL;
++
++ goto out_unlock;
++ }
+
+ if (p->refcount > 1) {
+ int j, k;
+@@ -623,6 +629,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ set_inverse_transl(vc, p, i); /* Update inverse translations */
+ set_inverse_trans_unicode(vc, p);
+
++out_unlock:
+ console_unlock();
+ return err;
+ }
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 619d13e..4ecb650 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -732,7 +732,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
+ ci_role_destroy(ci);
+ ci_hdrc_enter_lpm(ci, true);
+ usb_phy_shutdown(ci->transceiver);
+- kfree(ci->hw_bank.regmap);
+
+ return 0;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e934e19..7daaef1 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -145,8 +145,15 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
+ /* devices aren't required to support these requests.
+ * the cdc acm descriptor tells whether they do...
+ */
+-#define acm_set_control(acm, control) \
+- acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0)
++static inline int acm_set_control(struct acm *acm, int control)
++{
++ if (acm->quirks & QUIRK_CONTROL_LINE_STATE)
++ return -EOPNOTSUPP;
++
++ return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE,
++ control, NULL, 0);
++}
++
+ #define acm_set_line(acm, line) \
+ acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
+ #define acm_send_break(acm, ms) \
+@@ -980,11 +987,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ /* FIXME: Needs to clear unsupported bits in the termios */
+ acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+- if (!newline.dwDTERate) {
++ if (C_BAUD(tty) == B0) {
+ newline.dwDTERate = acm->line.dwDTERate;
+ newctrl &= ~ACM_CTRL_DTR;
+- } else
++ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
+ newctrl |= ACM_CTRL_DTR;
++ }
+
+ if (newctrl != acm->ctrlout)
+ acm_set_control(acm, acm->ctrlout = newctrl);
+@@ -1314,6 +1322,7 @@ made_compressed_probe:
+ tty_port_init(&acm->port);
+ acm->port.ops = &acm_port_ops;
+ init_usb_anchor(&acm->delayed);
++ acm->quirks = quirks;
+
+ buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
+ if (!buf) {
+@@ -1681,6 +1690,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
++ { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
++ .driver_info = QUIRK_CONTROL_LINE_STATE, },
++ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ },
+ /* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index fc75651..d3251eb 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -121,6 +121,7 @@ struct acm {
+ unsigned int throttle_req:1; /* throttle requested */
+ u8 bInterval;
+ struct usb_anchor delayed; /* writes queued for a device about to be woken */
++ unsigned long quirks;
+ };
+
+ #define CDC_DATA_INTERFACE_TYPE 0x0a
+@@ -132,3 +133,4 @@ struct acm {
+ #define NOT_A_MODEM BIT(3)
+ #define NO_DATA_INTERFACE BIT(4)
+ #define IGNORE_DEVICE BIT(5)
++#define QUIRK_CONTROL_LINE_STATE BIT(6)
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 487abcf..258e6fe 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2057,6 +2057,8 @@ int usb_alloc_streams(struct usb_interface *interface,
+ return -EINVAL;
+ if (dev->speed != USB_SPEED_SUPER)
+ return -EINVAL;
++ if (dev->state < USB_STATE_CONFIGURED)
++ return -ENODEV;
+
+ for (i = 0; i < num_eps; i++) {
+ /* Streams only apply to bulk endpoints. */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index dc84915..674c262 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4540,6 +4540,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
+ struct usb_qualifier_descriptor *qual;
+ int status;
+
++ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
++ return;
++
+ qual = kmalloc (sizeof *qual, GFP_KERNEL);
+ if (qual == NULL)
+ return;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 814e712..39b4081 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -93,6 +93,16 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* Elan Touchscreen */
++ { USB_DEVICE(0x04f3, 0x0089), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
++ { USB_DEVICE(0x04f3, 0x009b), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
++ { USB_DEVICE(0x04f3, 0x016f), .driver_info =
++ USB_QUIRK_DEVICE_QUALIFIER },
++
+ /* Roland SC-8820 */
+ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index fc0de375..97a5a0c 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
+ {
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+
+- dwc3_omap_write_irqmisc_set(omap, 0x00);
++ dwc3_omap_disable_irqs(omap);
+
+ return 0;
+ }
+@@ -607,19 +607,8 @@ static int dwc3_omap_prepare(struct device *dev)
+ static void dwc3_omap_complete(struct device *dev)
+ {
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+- u32 reg;
+
+- reg = (USBOTGSS_IRQMISC_OEVT |
+- USBOTGSS_IRQMISC_DRVVBUS_RISE |
+- USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+- USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+- USBOTGSS_IRQMISC_IDPULLUP_RISE |
+- USBOTGSS_IRQMISC_DRVVBUS_FALL |
+- USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+- USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+- USBOTGSS_IRQMISC_IDPULLUP_FALL);
+-
+- dwc3_omap_write_irqmisc_set(omap, reg);
++ dwc3_omap_enable_irqs(omap);
+ }
+
+ static int dwc3_omap_suspend(struct device *dev)
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 21a3520..0985ff7 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
+
+ /* stall is always issued on EP0 */
+ dep = dwc->eps[0];
+- __dwc3_gadget_ep_set_halt(dep, 1);
++ __dwc3_gadget_ep_set_halt(dep, 1, false);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
+
+@@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ return -EINVAL;
+ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+ break;
+- ret = __dwc3_gadget_ep_set_halt(dep, set);
++ ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+ if (ret)
+ return -EINVAL;
+ break;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 490a6ca..8cbbb54 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -615,12 +615,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ if (!usb_endpoint_xfer_isoc(desc))
+ return 0;
+
+- memset(&trb_link, 0, sizeof(trb_link));
+-
+ /* Link TRB for ISOC. The HWO bit is never reset */
+ trb_st_hw = &dep->trb_pool[0];
+
+ trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
++ memset(trb_link, 0, sizeof(*trb_link));
+
+ trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+ trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
+@@ -671,7 +670,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+
+ /* make sure HW endpoint isn't stalled */
+ if (dep->flags & DWC3_EP_STALL)
+- __dwc3_gadget_ep_set_halt(dep, 0);
++ __dwc3_gadget_ep_set_halt(dep, 0, false);
+
+ reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+ reg &= ~DWC3_DALEPENA_EP(dep->number);
+@@ -1287,7 +1286,7 @@ out0:
+ return ret;
+ }
+
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
+ {
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3 *dwc = dep->dwc;
+@@ -1296,6 +1295,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ memset(&params, 0x00, sizeof(params));
+
+ if (value) {
++ if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
++ (!list_empty(&dep->req_queued) ||
++ !list_empty(&dep->request_list)))) {
++ dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
++ dep->name);
++ return -EAGAIN;
++ }
++
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_SETSTALL, &params);
+ if (ret)
+@@ -1333,7 +1340,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+ goto out;
+ }
+
+- ret = __dwc3_gadget_ep_set_halt(dep, value);
++ ret = __dwc3_gadget_ep_set_halt(dep, value, false);
+ out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+@@ -1353,7 +1360,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
+ if (dep->number == 0 || dep->number == 1)
+ return dwc3_gadget_ep0_set_halt(ep, 1);
+ else
+- return dwc3_gadget_ep_set_halt(ep, 1);
++ return __dwc3_gadget_ep_set_halt(dep, 1, false);
+ }
+
+ /* -------------------------------------------------------------------------- */
+diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
+index a0ee75b..ac625582 100644
+--- a/drivers/usb/dwc3/gadget.h
++++ b/drivers/usb/dwc3/gadget.h
+@@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
+ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
+ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
+ gfp_t gfp_flags);
+-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+
+ /**
+ * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
+diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
+index ab1065a..3384486 100644
+--- a/drivers/usb/gadget/function/f_acm.c
++++ b/drivers/usb/gadget/function/f_acm.c
+@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ if (acm->notify->driver_data) {
+ VDBG(cdev, "reset acm control interface %d\n", intf);
+ usb_ep_disable(acm->notify);
+- } else {
+- VDBG(cdev, "init acm ctrl interface %d\n", intf);
++ }
++
++ if (!acm->notify->desc)
+ if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+ return -EINVAL;
+- }
++
+ usb_ep_enable(acm->notify);
+ acm->notify->driver_data = acm;
+
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 7ad7137..a3c277c 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -648,15 +648,26 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ if (io_data->read && ret > 0) {
+ int i;
+ size_t pos = 0;
++
++ /*
++ * Since req->length may be bigger than io_data->len (after
++ * being rounded up to maxpacketsize), we may end up with more
++ * data then user space has space for.
++ */
++ ret = min_t(int, ret, io_data->len);
++
+ use_mm(io_data->mm);
+ for (i = 0; i < io_data->nr_segs; i++) {
++ size_t len = min_t(size_t, ret - pos,
++ io_data->iovec[i].iov_len);
++ if (!len)
++ break;
+ if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
+- &io_data->buf[pos],
+- io_data->iovec[i].iov_len))) {
++ &io_data->buf[pos], len))) {
+ ret = -EFAULT;
+ break;
+ }
+- pos += io_data->iovec[i].iov_len;
++ pos += len;
+ }
+ unuse_mm(io_data->mm);
+ }
+@@ -688,7 +699,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_ep *ep;
+ char *data = NULL;
+- ssize_t ret, data_len;
++ ssize_t ret, data_len = -EINVAL;
+ int halt;
+
+ /* Are we still active? */
+@@ -788,13 +799,30 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ /* Fire the request */
+ struct usb_request *req;
+
++ /*
++ * Sanity Check: even though data_len can't be used
++ * uninitialized at the time I write this comment, some
++ * compilers complain about this situation.
++ * In order to keep the code clean from warnings, data_len is
++ * being initialized to -EINVAL during its declaration, which
++ * means we can't rely on compiler anymore to warn no future
++ * changes won't result in data_len being used uninitialized.
++ * For such reason, we're adding this redundant sanity check
++ * here.
++ */
++ if (unlikely(data_len == -EINVAL)) {
++ WARN(1, "%s: data_len == -EINVAL\n", __func__);
++ ret = -EINVAL;
++ goto error_lock;
++ }
++
+ if (io_data->aio) {
+ req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
+ if (unlikely(!req))
+ goto error_lock;
+
+ req->buf = data;
+- req->length = io_data->len;
++ req->length = data_len;
+
+ io_data->buf = data;
+ io_data->ep = ep->ep;
+@@ -816,7 +844,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+
+ req = ep->req;
+ req->buf = data;
+- req->length = io_data->len;
++ req->length = data_len;
+
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
+@@ -2626,8 +2654,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+ func->conf = c;
+ func->gadget = c->cdev->gadget;
+
+- ffs_data_get(func->ffs);
+-
+ /*
+ * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+ * configurations are bound in sequence with list_for_each_entry,
+diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
+index b0d9817..38913ea 100644
+--- a/drivers/usb/gadget/udc/udc-core.c
++++ b/drivers/usb/gadget/udc/udc-core.c
+@@ -458,6 +458,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
+ {
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
++ if (!udc->driver) {
++ dev_err(dev, "soft-connect without a gadget driver\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (sysfs_streq(buf, "connect")) {
+ usb_gadget_udc_start(udc->gadget, udc->driver);
+ usb_gadget_connect(udc->gadget);
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index 82800a7..6f1d48e 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -220,7 +220,7 @@ config USB_EHCI_SH
+
+ config USB_EHCI_EXYNOS
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+- depends on PLAT_S5P || ARCH_EXYNOS
++ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+
+@@ -527,7 +527,7 @@ config USB_OHCI_SH
+
+ config USB_OHCI_EXYNOS
+ tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
+- depends on PLAT_S5P || ARCH_EXYNOS
++ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index c22a3e1..d125568 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -126,20 +126,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+- /* Workaround for occasional spurious wakeups from S5 (or
+- * any other sleep) on Haswell machines with LPT and LPT-LP
+- * with the new Intel BIOS
+- */
+- /* Limit the quirk to only known vendors, as this triggers
+- * yet another BIOS bug on some other machines
+- * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+- */
+- if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+- }
+- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+@@ -160,6 +146,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == 0x3432)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+
++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
++ pdev->device == 0x1042)
++ xhci->quirks |= XHCI_BROKEN_STREAMS;
++
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 3ee133f..013fd1c 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -209,7 +209,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
+ }
+ }
+
+- if (!list_empty(&controller->early_tx_list)) {
++ if (!list_empty(&controller->early_tx_list) &&
++ !hrtimer_is_queued(&controller->early_tx)) {
+ ret = HRTIMER_RESTART;
+ hrtimer_forward_now(&controller->early_tx,
+ ktime_set(0, 50 * NSEC_PER_USEC));
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 154bcf1..b18f8d5 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -896,7 +896,9 @@ static int dsps_resume(struct device *dev)
+ dsps_writel(mbase, wrp->mode, glue->context.mode);
+ dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+- setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
++ if (musb->xceiv->state == OTG_STATE_B_IDLE &&
++ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
++ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+
+ return 0;
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index eca1747..cfd009d 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
++ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index dc72b92..0dad8ce 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -140,6 +140,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+ * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
+ */
+ static const struct usb_device_id id_table_combined[] = {
++ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
+@@ -661,6 +662,8 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5937b2d..6786b70 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -30,6 +30,12 @@
+
+ /*** third-party PIDs (using FTDI_VID) ***/
+
++/*
++ * Certain versions of the official Windows FTDI driver reprogrammed
++ * counterfeit FTDI devices to PID 0. Support these devices anyway.
++ */
++#define FTDI_BRICK_PID 0x0000
++
+ #define FTDI_LUMEL_PD12_PID 0x6002
+
+ /*
+@@ -143,8 +149,12 @@
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+ #define XSENS_VID 0x2639
+-#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++#define XSENS_AWINDA_STATION_PID 0x0101
++#define XSENS_AWINDA_DONGLE_PID 0x0102
+ #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
++#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
++
++/* Xsens devices using FTDI VID */
+ #define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
+ #define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
+ #define XSENS_CONVERTER_2_PID 0xD38A
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 078f9ed..a31ff15 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ port->interrupt_out_urb->transfer_buffer_length = length;
+
+ priv->cur_pos = priv->cur_pos + length;
+- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
++ result = usb_submit_urb(port->interrupt_out_urb,
++ GFP_ATOMIC);
+ dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
+ todo = priv->filled - priv->cur_pos;
+
+@@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
+ priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
+ result = usb_submit_urb(port->interrupt_in_urb,
+- GFP_NOIO);
++ GFP_ATOMIC);
+ dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
+ }
+ }
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index 4856fb7..4b7bfb3 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+
+ /* The connected devices do not have a bulk write endpoint,
+ * to transmit data to de barcode device the control endpoint is used */
+- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
++ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!dr) {
+ count = -ENOMEM;
+ goto error_no_dr;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 54a8120..e87219a 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
+ #define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE920 0x1200
++#define TELIT_PRODUCT_LE910 0x1201
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -361,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Haier products */
+ #define HAIER_VENDOR_ID 0x201e
++#define HAIER_PRODUCT_CE81B 0x10f8
+ #define HAIER_PRODUCT_CE100 0x2009
+
+ /* Cinterion (formerly Siemens) products */
+@@ -588,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le910_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(2),
++};
++
+ static const struct option_blacklist_info telit_le920_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(5),
+@@ -1137,6 +1144,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1612,6 +1621,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
+ /* Pirelli */
+ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 22c7d43..b1d815e 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ */
+ if (result == USB_STOR_XFER_LONG)
+ fake_sense = 1;
++
++ /*
++ * Sometimes a device will mistakenly skip the data phase
++ * and go directly to the status phase without sending a
++ * zero-length packet. If we get a 13-byte response here,
++ * check whether it really is a CSW.
++ */
++ if (result == USB_STOR_XFER_SHORT &&
++ srb->sc_data_direction == DMA_FROM_DEVICE &&
++ transfer_length - scsi_get_resid(srb) ==
++ US_BULK_CS_WRAP_LEN) {
++ struct scatterlist *sg = NULL;
++ unsigned int offset = 0;
++
++ if (usb_stor_access_xfer_buf((unsigned char *) bcs,
++ US_BULK_CS_WRAP_LEN, srb, &sg,
++ &offset, FROM_XFER_BUF) ==
++ US_BULK_CS_WRAP_LEN &&
++ bcs->Signature ==
++ cpu_to_le32(US_BULK_CS_SIGN)) {
++ usb_stor_dbg(us, "Device skipped data phase\n");
++ scsi_set_resid(srb, transfer_length);
++ goto skipped_data_phase;
++ }
++ }
+ }
+
+ /* See flow chart on pg 15 of the Bulk Only Transport spec for
+@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
++ skipped_data_phase:
+ /* check bulk status */
+ residue = le32_to_cpu(bcs->Residue);
+ usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 8511b54..2fefaf9 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -54,6 +54,20 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
++/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
++UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
++ "Seagate",
++ "Expansion Desk",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_ATA_1X),
++
++/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */
++UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
++ "Seagate",
++ "Backup Plus",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_ATA_1X),
++
+ /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
+ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
+ "Seagate",
+@@ -61,6 +75,13 @@ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
++/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
++UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
++ "Seagate",
++ "Backup+ BK",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_ATA_1X),
++
+ /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
+ "JMicron",
+@@ -75,3 +96,10 @@ UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
+ "ASM1051",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
++
++/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
++UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
++ "VIA",
++ "VL711",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_ATA_1X),
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..dbfe4ee 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
+ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ int bottom_only)
+ {
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bs = info->var.yres - bh;
+ struct fb_fillrect region;
+
+- region.color = attr_bgcol_ec(bgshift, vc, info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
+index 41b32ae..5a3cbf6 100644
+--- a/drivers/video/console/fbcon_ccw.c
++++ b/drivers/video/console/fbcon_ccw.c
+@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int bs = vc->vc_rows*ch;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
+index a93670e..e7ee44d 100644
+--- a/drivers/video/console/fbcon_cw.c
++++ b/drivers/video/console/fbcon_cw.c
+@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int bh = info->var.xres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.yres - rw;
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
+index ff0872c..19e3714 100644
+--- a/drivers/video/console/fbcon_ud.c
++++ b/drivers/video/console/fbcon_ud.c
+@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ struct fb_fillrect region;
+- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
+
+- region.color = attr_bgcol_ec(bgshift,vc,info);
++ region.color = 0;
+ region.rop = ROP_COPY;
+
+ if (rw && !bottom_only) {
+diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
+index bcb5723..6d4bfee 100644
+--- a/drivers/video/fbdev/core/cfbcopyarea.c
++++ b/drivers/video/fbdev/core/cfbcopyarea.c
+@@ -55,8 +55,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
+ * If you suspect bug in this function, compare it with this simple
+ * memmove implementation.
+ */
+- fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
+- (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
+ return;
+ #endif
+
+@@ -221,8 +221,8 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
+ * If you suspect bug in this function, compare it with this simple
+ * memmove implementation.
+ */
+- fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
+- (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
+ return;
+ #endif
+
+@@ -324,7 +324,10 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
+ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
++ if (!first)
++ FB_WRITEL(d0, dst);
++ else
++ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+ d0 = d1;
+ dst--;
+ n -= dst_idx+1;
+diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
+index 3d1463c..add40d0 100644
+--- a/drivers/virtio/virtio_pci.c
++++ b/drivers/virtio/virtio_pci.c
+@@ -789,6 +789,7 @@ static int virtio_pci_restore(struct device *dev)
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
++ unsigned status = 0;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+@@ -799,14 +800,40 @@ static int virtio_pci_restore(struct device *dev)
+ return ret;
+
+ pci_set_master(pci_dev);
++ /* We always start by resetting the device, in case a previous
++ * driver messed it up. */
++ vp_reset(&vp_dev->vdev);
++
++ /* Acknowledge that we've seen the device. */
++ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
++ vp_set_status(&vp_dev->vdev, status);
++
++ /* Maybe driver failed before freeze.
++ * Restore the failed status, for debugging. */
++ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++
++ if (!drv)
++ return 0;
++
++ /* We have a driver! */
++ status |= VIRTIO_CONFIG_S_DRIVER;
++ vp_set_status(&vp_dev->vdev, status);
++
+ vp_finalize_features(&vp_dev->vdev);
+
+- if (drv && drv->restore)
++ if (drv->restore) {
+ ret = drv->restore(&vp_dev->vdev);
++ if (ret) {
++ status |= VIRTIO_CONFIG_S_FAILED;
++ vp_set_status(&vp_dev->vdev, status);
++ return ret;
++ }
++ }
+
+ /* Finally, tell the device we're all set */
+- if (!ret)
+- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
++ status |= VIRTIO_CONFIG_S_DRIVER_OK;
++ vp_set_status(&vp_dev->vdev, status);
+
+ return ret;
+ }
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 54c84da..7409772 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -423,7 +423,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+ ret = 0;
+ fail:
+ while (ret < 0 && !list_empty(&tmplist)) {
+- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
++ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 3588a80..72daaa5 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2082,6 +2082,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ struct page *page, void *fsdata)
+ {
+ struct inode *inode = mapping->host;
++ loff_t old_size = inode->i_size;
+ int i_size_changed = 0;
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+@@ -2101,6 +2102,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
+ unlock_page(page);
+ page_cache_release(page);
+
++ if (old_size < pos)
++ pagecache_isize_extended(inode, old_size, pos);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+@@ -2318,6 +2321,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
+ err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
++
++ if (unlikely(fatal_signal_pending(current))) {
++ err = -EINTR;
++ goto out;
++ }
+ }
+
+ /* page covers the boundary, find the boundary offset */
+diff --git a/fs/dcache.c b/fs/dcache.c
+index cb25a1a..34b40be8 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2675,11 +2675,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
+ if (!IS_ROOT(new)) {
+ spin_unlock(&inode->i_lock);
+ dput(new);
++ iput(inode);
+ return ERR_PTR(-EIO);
+ }
+ if (d_ancestor(new, dentry)) {
+ spin_unlock(&inode->i_lock);
+ dput(new);
++ iput(inode);
+ return ERR_PTR(-EIO);
+ }
+ write_seqlock(&rename_lock);
+@@ -2810,6 +2812,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
+ * the beginning of the name. The sequence number check at the caller will
+ * retry it again when a d_move() does happen. So any garbage in the buffer
+ * due to mismatched pointer and length will be discarded.
++ *
++ * Data dependency barrier is needed to make sure that we see that terminating
++ * NUL. Alpha strikes again, film at 11...
+ */
+ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ {
+@@ -2817,6 +2822,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+ u32 dlen = ACCESS_ONCE(name->len);
+ char *p;
+
++ smp_read_barrier_depends();
++
+ *buflen -= dlen + 1;
+ if (*buflen < 0)
+ return -ENAMETOOLONG;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 622e882..2c42e73 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -1354,13 +1354,6 @@ set_qf_format:
+ "not specified.");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext3_msg(sb, KERN_ERR, "error: journaled quota format "
+- "specified with no journaling "
+- "enabled.");
+- return 0;
+- }
+ }
+ #endif
+ return 1;
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 581ef40..e069155 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -176,7 +176,7 @@ static unsigned int num_clusters_in_group(struct super_block *sb,
+ }
+
+ /* Initializes an uninitialized block bitmap */
+-static void ext4_init_block_bitmap(struct super_block *sb,
++static int ext4_init_block_bitmap(struct super_block *sb,
+ struct buffer_head *bh,
+ ext4_group_t block_group,
+ struct ext4_group_desc *gdp)
+@@ -192,7 +192,6 @@ static void ext4_init_block_bitmap(struct super_block *sb,
+ /* If checksum is bad mark all blocks used to prevent allocation
+ * essentially implementing a per-group read-only flag. */
+ if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
+- ext4_error(sb, "Checksum bad for group %u", block_group);
+ grp = ext4_get_group_info(sb, block_group);
+ if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+ percpu_counter_sub(&sbi->s_freeclusters_counter,
+@@ -205,7 +204,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
+ count);
+ }
+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+- return;
++ return -EIO;
+ }
+ memset(bh->b_data, 0, sb->s_blocksize);
+
+@@ -243,6 +242,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
+ sb->s_blocksize * 8, bh->b_data);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+ ext4_group_desc_csum_set(sb, block_group, gdp);
++ return 0;
+ }
+
+ /* Return the number of free blocks in a block group. It is used when
+@@ -438,11 +438,15 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ }
+ ext4_lock_group(sb, block_group);
+ if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+- ext4_init_block_bitmap(sb, bh, block_group, desc);
++ int err;
++
++ err = ext4_init_block_bitmap(sb, bh, block_group, desc);
+ set_bitmap_uptodate(bh);
+ set_buffer_uptodate(bh);
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
++ if (err)
++ ext4_error(sb, "Checksum bad for grp %u", block_group);
+ return bh;
+ }
+ ext4_unlock_group(sb, block_group);
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index 3285aa5..b610779 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ __u32 provided, calculated;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
+@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ __u32 csum;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
+@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ __u32 csum;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index b0c225c..96ac9d3 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -2109,6 +2109,7 @@ int do_journal_get_write_access(handle_t *handle,
+ #define CONVERT_INLINE_DATA 2
+
+ extern struct inode *ext4_iget(struct super_block *, unsigned long);
++extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+ extern int ext4_write_inode(struct inode *, struct writeback_control *);
+ extern int ext4_setattr(struct dentry *, struct iattr *);
+ extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+@@ -2332,10 +2333,18 @@ extern int ext4_register_li_request(struct super_block *sb,
+ static inline int ext4_has_group_desc_csum(struct super_block *sb)
+ {
+ return EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
++ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
++ (EXT4_SB(sb)->s_chksum_driver != NULL);
+ }
+
++static inline int ext4_has_metadata_csum(struct super_block *sb)
++{
++ WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
++ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++ !EXT4_SB(sb)->s_chksum_driver);
++
++ return (EXT4_SB(sb)->s_chksum_driver != NULL);
++}
+ static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
+ {
+ return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 74292a7..18d8dc8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -73,8 +73,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
+ {
+ struct ext4_extent_tail *et;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ et = find_ext4_extent_tail(eh);
+@@ -88,8 +87,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
+ {
+ struct ext4_extent_tail *et;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ et = find_ext4_extent_tail(eh);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index aca7b24..8131be8 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -137,10 +137,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
+ }
+
++ iocb->private = &overwrite;
+ if (o_direct) {
+ blk_start_plug(&plug);
+
+- iocb->private = &overwrite;
+
+ /* check whether we do a DIO overwrite or not */
+ if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 5b87fc3..ac644c3 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -887,6 +887,10 @@ got:
+ struct buffer_head *block_bitmap_bh;
+
+ block_bitmap_bh = ext4_read_block_bitmap(sb, group);
++ if (!block_bitmap_bh) {
++ err = -EIO;
++ goto out;
++ }
+ BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
+ err = ext4_journal_get_write_access(handle, block_bitmap_bh);
+ if (err) {
+@@ -1011,8 +1015,7 @@ got:
+ spin_unlock(&sbi->s_next_gen_lock);
+
+ /* Precompute checksum seed for inode metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ __u32 csum;
+ __le32 inum = cpu_to_le32(inode->i_ino);
+ __le32 gen = cpu_to_le32(inode->i_generation);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index bea662b..aa8e695 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1126,8 +1126,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
+ inline_size - EXT4_INLINE_DOTDOT_SIZE);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ inode->i_size = inode->i_sb->s_blocksize;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3aa26e9..7d1057b 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
+
+ if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ cpu_to_le32(EXT4_OS_LINUX) ||
+- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ !ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ provided = le16_to_cpu(raw->i_checksum_lo);
+@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
+
+ if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+ cpu_to_le32(EXT4_OS_LINUX) ||
+- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ !ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ csum = ext4_inode_csum(inode, raw, ei);
+@@ -224,16 +222,15 @@ void ext4_evict_inode(struct inode *inode)
+ goto no_delete;
+ }
+
+- if (!is_bad_inode(inode))
+- dquot_initialize(inode);
++ if (is_bad_inode(inode))
++ goto no_delete;
++ dquot_initialize(inode);
+
+ if (ext4_should_order_data(inode))
+ ext4_begin_ordered_truncate(inode, 0);
+ truncate_inode_pages_final(&inode->i_data);
+
+ WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
+- if (is_bad_inode(inode))
+- goto no_delete;
+
+ /*
+ * Protect us against freezing - iput() caller didn't have to have any
+@@ -2515,6 +2512,20 @@ static int ext4_nonda_switch(struct super_block *sb)
+ return 0;
+ }
+
++/* We always reserve for an inode update; the superblock could be there too */
++static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
++{
++ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
++ return 1;
++
++ if (pos + len <= 0x7fffffffULL)
++ return 1;
++
++ /* We might need to update the superblock to set LARGE_FILE */
++ return 2;
++}
++
+ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+@@ -2565,7 +2576,8 @@ retry_grab:
+ * of file which has an already mapped buffer.
+ */
+ retry_journal:
+- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
++ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
++ ext4_da_write_credits(inode, pos, len));
+ if (IS_ERR(handle)) {
+ page_cache_release(page);
+ return PTR_ERR(handle);
+@@ -3936,8 +3948,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ ei->i_extra_isize = 0;
+
+ /* Precompute checksum seed for inode metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ __u32 csum;
+ __le32 inum = cpu_to_le32(inode->i_ino);
+@@ -4127,6 +4138,13 @@ bad_inode:
+ return ERR_PTR(ret);
+ }
+
++struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
++{
++ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
++ return ERR_PTR(-EIO);
++ return ext4_iget(sb, ino);
++}
++
+ static int ext4_inode_blocks_set(handle_t *handle,
+ struct ext4_inode *raw_inode,
+ struct ext4_inode_info *ei)
+@@ -4536,8 +4554,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ ext4_orphan_del(NULL, inode);
+ goto err_out;
+ }
+- } else
++ } else {
++ loff_t oldsize = inode->i_size;
++
+ i_size_write(inode, attr->ia_size);
++ pagecache_isize_extended(inode, oldsize, inode->i_size);
++ }
+
+ /*
+ * Blocks are going to be removed from the inode. Wait
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 0f2252e..bfda18a 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -331,8 +331,7 @@ flags_out:
+ if (!inode_owner_or_capable(inode))
+ return -EPERM;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(inode->i_sb)) {
+ ext4_warning(sb, "Setting inode version is not "
+ "supported with metadata_csum enabled.");
+ return -ENOTTY;
+@@ -532,9 +531,17 @@ group_add_out:
+ }
+
+ case EXT4_IOC_SWAP_BOOT:
++ {
++ int err;
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+- return swap_inode_boot_loader(sb, inode);
++ err = mnt_want_write_file(filp);
++ if (err)
++ return err;
++ err = swap_inode_boot_loader(sb, inode);
++ mnt_drop_write_file(filp);
++ return err;
++ }
+
+ case EXT4_IOC_RESIZE_FS: {
+ ext4_fsblk_t n_blocks_count;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 32bce84..8313ca3 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
+
+ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
+@@ -29,8 +28,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
+
+ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 603e4eb..5b7dad6 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -124,8 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ "directory leaf block found instead of index block");
+ return ERR_PTR(-EIO);
+ }
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
++ if (!ext4_has_metadata_csum(inode->i_sb) ||
+ buffer_verified(bh))
+ return bh;
+
+@@ -340,8 +339,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
+ {
+ struct ext4_dir_entry_tail *t;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ t = get_dirent_tail(inode, dirent);
+@@ -362,8 +360,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
+ {
+ struct ext4_dir_entry_tail *t;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ t = get_dirent_tail(inode, dirent);
+@@ -438,8 +435,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
+ struct dx_tail *t;
+ int count_offset, limit, count;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return 1;
+
+ c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -468,8 +464,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
+ struct dx_tail *t;
+ int count_offset, limit, count;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ c = get_dx_countlimit(inode, dirent, &count_offset);
+@@ -557,8 +552,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
+ unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+ EXT4_DIR_REC_LEN(2) - infosize;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ entry_space -= sizeof(struct dx_tail);
+ return entry_space / sizeof(struct dx_entry);
+ }
+@@ -567,8 +561,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
+ {
+ unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ entry_space -= sizeof(struct dx_tail);
+ return entry_space / sizeof(struct dx_entry);
+ }
+@@ -1441,7 +1434,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
+ dentry);
+ return ERR_PTR(-EIO);
+ }
+- inode = ext4_iget(dir->i_sb, ino);
++ inode = ext4_iget_normal(dir->i_sb, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ EXT4_ERROR_INODE(dir,
+ "deleted inode referenced: %u",
+@@ -1474,7 +1467,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
+ return ERR_PTR(-EIO);
+ }
+
+- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
++ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
+ }
+
+ /*
+@@ -1548,8 +1541,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+ int csum_size = 0;
+ int err = 0, i;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ bh2 = ext4_append(handle, dir, &newblock);
+@@ -1718,8 +1710,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+ int csum_size = 0;
+ int err;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ if (!de) {
+@@ -1786,8 +1777,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ struct fake_dirent *fde;
+ int csum_size = 0;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ blocksize = dir->i_sb->s_blocksize;
+@@ -1904,8 +1894,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ ext4_lblk_t block, blocks;
+ int csum_size = 0;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(inode->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ sb = dir->i_sb;
+@@ -2167,8 +2156,7 @@ static int ext4_delete_entry(handle_t *handle,
+ return err;
+ }
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ BUFFER_TRACE(bh, "get_write_access");
+@@ -2387,8 +2375,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+ int csum_size = 0;
+ int err;
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(dir->i_sb))
+ csum_size = sizeof(struct ext4_dir_entry_tail);
+
+ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+@@ -2573,7 +2560,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ int err = 0, rc;
+ bool dirty = false;
+
+- if (!sbi->s_journal)
++ if (!sbi->s_journal || is_bad_inode(inode))
+ return 0;
+
+ WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 1e43b90..ca45883 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1081,7 +1081,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
+ break;
+
+ if (meta_bg == 0)
+- backup_block = group * bpg + blk_off;
++ backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
+ else
+ backup_block = (ext4_group_first_block_no(sb, group) +
+ ext4_bg_has_super(sb, group));
+@@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
+ {
+ struct buffer_head *bh;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 0;
+
+ bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 0b28b36..b1f0ac7 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -141,8 +141,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
+ static int ext4_superblock_csum_verify(struct super_block *sb,
+ struct ext4_super_block *es)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return 1;
+
+ return es->s_checksum == ext4_superblock_csum(sb, es);
+@@ -152,8 +151,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
+ {
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(sb))
+ return;
+
+ es->s_checksum = ext4_superblock_csum(sb, es);
+@@ -1002,7 +1000,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
+ * Currently we don't know the generation for parent directory, so
+ * a generation of 0 means "accept any"
+ */
+- inode = ext4_iget(sb, ino);
++ inode = ext4_iget_normal(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+@@ -1712,13 +1710,6 @@ static int parse_options(char *options, struct super_block *sb,
+ "not specified");
+ return 0;
+ }
+- } else {
+- if (sbi->s_jquota_fmt) {
+- ext4_msg(sb, KERN_ERR, "journaled quota format "
+- "specified with no journaling "
+- "enabled");
+- return 0;
+- }
+ }
+ #endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+@@ -2016,8 +2007,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ __u16 crc = 0;
+ __le32 le_group = cpu_to_le32(block_group);
+
+- if ((sbi->s_es->s_feature_ro_compat &
+- cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
++ if (ext4_has_metadata_csum(sbi->s_sb)) {
+ /* Use new metadata_csum algorithm */
+ __le16 save_csum;
+ __u32 csum32;
+@@ -2035,6 +2025,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
+ }
+
+ /* old crc16 code */
++ if (!(sbi->s_es->s_feature_ro_compat &
++ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
++ return 0;
++
+ offset = offsetof(struct ext4_group_desc, bg_checksum);
+
+ crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
+@@ -3179,8 +3173,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ int compat, incompat;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
++ if (ext4_has_metadata_csum(sb)) {
+ /* journal checksum v3 */
+ compat = 0;
+ incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+@@ -3487,8 +3480,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+
+ /* Precompute checksum seed for all metadata */
+- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (ext4_has_metadata_csum(sb))
+ sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
+ sizeof(es->s_uuid));
+
+@@ -3506,6 +3498,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ #ifdef CONFIG_EXT4_FS_POSIX_ACL
+ set_opt(sb, POSIX_ACL);
+ #endif
++ /* don't forget to enable journal_csum when metadata_csum is enabled. */
++ if (ext4_has_metadata_csum(sb))
++ set_opt(sb, JOURNAL_CHECKSUM);
++
+ if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
+ set_opt(sb, JOURNAL_DATA);
+ else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index e738733..2d1e5803 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -142,8 +142,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
+ sector_t block_nr,
+ struct ext4_xattr_header *hdr)
+ {
+- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
++ if (ext4_has_metadata_csum(inode->i_sb) &&
+ (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
+ return 0;
+ return 1;
+@@ -153,8 +152,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
+ sector_t block_nr,
+ struct ext4_xattr_header *hdr)
+ {
+- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
++ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
+@@ -190,14 +188,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ }
+
+ static int
+-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
++ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
++ void *value_start)
+ {
+- while (!IS_LAST_ENTRY(entry)) {
+- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
++ struct ext4_xattr_entry *e = entry;
++
++ while (!IS_LAST_ENTRY(e)) {
++ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
+ if ((void *)next >= end)
+ return -EIO;
+- entry = next;
++ e = next;
+ }
++
++ while (!IS_LAST_ENTRY(entry)) {
++ if (entry->e_value_size != 0 &&
++ (value_start + le16_to_cpu(entry->e_value_offs) <
++ (void *)e + sizeof(__u32) ||
++ value_start + le16_to_cpu(entry->e_value_offs) +
++ le32_to_cpu(entry->e_value_size) > end))
++ return -EIO;
++ entry = EXT4_XATTR_NEXT(entry);
++ }
++
+ return 0;
+ }
+
+@@ -214,7 +226,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
+ return -EIO;
+ if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+ return -EIO;
+- error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
++ error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
++ bh->b_data);
+ if (!error)
+ set_buffer_verified(bh);
+ return error;
+@@ -331,7 +344,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(entry, end);
++ error = ext4_xattr_check_names(entry, end, entry);
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_find_entry(&entry, name_index, name,
+@@ -463,7 +476,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ raw_inode = ext4_raw_inode(&iloc);
+ header = IHDR(inode, raw_inode);
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+- error = ext4_xattr_check_names(IFIRST(header), end);
++ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ if (error)
+ goto cleanup;
+ error = ext4_xattr_list_entries(dentry, IFIRST(header),
+@@ -986,7 +999,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ is->s.here = is->s.first;
+ is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
+- error = ext4_xattr_check_names(IFIRST(header), is->s.end);
++ error = ext4_xattr_check_names(IFIRST(header), is->s.end,
++ IFIRST(header));
+ if (error)
+ return error;
+ /* Find the named attribute. */
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 9b329b5..bcbef08 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal,
+ !jbd2_descr_block_csum_verify(journal,
+ bh->b_data)) {
+ err = -EIO;
++ brelse(bh);
+ goto failed;
+ }
+
+diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
+index 413ef89..046fee8 100644
+--- a/fs/jffs2/jffs2_fs_sb.h
++++ b/fs/jffs2/jffs2_fs_sb.h
+@@ -134,8 +134,6 @@ struct jffs2_sb_info {
+ struct rw_semaphore wbuf_sem; /* Protects the write buffer */
+
+ struct delayed_work wbuf_dwork; /* write-buffer write-out work */
+- int wbuf_queued; /* non-zero delayed work is queued */
+- spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
+
+ unsigned char *oobbuf;
+ int oobavail; /* How many bytes are available for JFFS2 in OOB */
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index a6597d6..09ed551 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
+ struct jffs2_sb_info *c = work_to_sb(work);
+ struct super_block *sb = OFNI_BS_2SFFJ(c);
+
+- spin_lock(&c->wbuf_dwork_lock);
+- c->wbuf_queued = 0;
+- spin_unlock(&c->wbuf_dwork_lock);
+-
+ if (!(sb->s_flags & MS_RDONLY)) {
+ jffs2_dbg(1, "%s()\n", __func__);
+ jffs2_flush_wbuf_gc(c, 0);
+@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+- spin_lock(&c->wbuf_dwork_lock);
+- if (!c->wbuf_queued) {
++ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
++ if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
+ jffs2_dbg(1, "%s()\n", __func__);
+- delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+- queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
+- c->wbuf_queued = 1;
+- }
+- spin_unlock(&c->wbuf_dwork_lock);
+ }
+
+ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+
+ /* Initialise write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ c->wbuf_pagesize = c->mtd->writesize;
+ c->wbuf_ofs = 0xFFFFFFFF;
+@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+ c->wbuf_pagesize = c->mtd->erasesize;
+
+@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
+ c->wbuf_pagesize = c->mtd->writesize;
+@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
+ return 0;
+
+ init_rwsem(&c->wbuf_sem);
+- spin_lock_init(&c->wbuf_dwork_lock);
+ INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
+
+ c->wbuf_pagesize = c->mtd->writesize;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index daa8e75..9106f42 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+
+ msg.rpc_proc = &clnt->cl_procinfo[proc];
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++ if (status == -ECONNREFUSED) {
++ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
++ status);
++ rpc_force_rebind(clnt);
++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
++ }
+ if (status < 0)
+ dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ status);
+diff --git a/fs/namei.c b/fs/namei.c
+index 3ddb044..bb02687 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3154,7 +3154,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
+ if (error)
+ goto out2;
+ audit_inode(pathname, nd->path.dentry, 0);
+- error = may_open(&nd->path, op->acc_mode, op->open_flag);
++ /* Don't check for other permissions, the inode was just created */
++ error = may_open(&nd->path, MAY_OPEN, op->open_flag);
+ if (error)
+ goto out2;
+ file->f_path.mnt = nd->path.mnt;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7f67b46..550dbff 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2822,6 +2822,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ /* make sure we can reach put_old from new_root */
+ if (!is_path_reachable(old_mnt, old.dentry, &new))
+ goto out4;
++ /* make certain new is below the root */
++ if (!is_path_reachable(new_mnt, new.dentry, &root))
++ goto out4;
+ root_mp->m_count++; /* pin it so it won't go away */
+ lock_mount_hash();
+ detach_mnt(new_mnt, &parent_path);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 5e0dc52..1d3cb47 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1229,7 +1229,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
+ */
+ if (argp->opcnt == resp->opcnt)
+ return false;
+-
++ if (next->opnum == OP_ILLEGAL)
++ return false;
+ nextd = OPDESC(next);
+ /*
+ * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
+@@ -1546,7 +1547,8 @@ static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op
+ static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
+ struct nfsd4_op *op)
+ {
+- return NFS4_MAX_SESSIONID_LEN + 20;
++ return (op_encode_hdr_size
++ + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
+ }
+
+ static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+@@ -1850,6 +1852,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
+ .op_func = (nfsd4op_func)nfsd4_sequence,
+ .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+ .op_name = "OP_SEQUENCE",
++ .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
+ },
+ [OP_DESTROY_CLIENTID] = {
+ .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
+diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
+index ea34952..485d22d 100644
+--- a/fs/ocfs2/cluster/tcp.c
++++ b/fs/ocfs2/cluster/tcp.c
+@@ -925,7 +925,7 @@ static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
+ size_t veclen, size_t total)
+ {
+ int ret;
+- struct msghdr msg;
++ struct msghdr msg = {.msg_flags = 0,};
+
+ if (sock == NULL) {
+ ret = -EINVAL;
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 192297b..fafb7a0 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -320,10 +320,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
+ compressed ? ".enc.z" : "");
+ break;
+ case PSTORE_TYPE_CONSOLE:
+- sprintf(name, "console-%s", psname);
++ sprintf(name, "console-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_FTRACE:
+- sprintf(name, "ftrace-%s", psname);
++ sprintf(name, "ftrace-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_MCE:
+ sprintf(name, "mce-%s-%lld", psname, id);
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index f2d0eee..23c548d 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -634,7 +634,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
+ dqstats_inc(DQST_LOOKUPS);
+ err = sb->dq_op->write_dquot(dquot);
+ if (!ret && err)
+- err = ret;
++ ret = err;
+ dqput(dquot);
+ spin_lock(&dq_list_lock);
+ }
+diff --git a/fs/super.c b/fs/super.c
+index b9a214d..6f8c954 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
+ inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
+ dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
+ total_objects = dentries + inodes + fs_objects + 1;
++ if (!total_objects)
++ total_objects = 1;
+
+ /* proportion the scan between the caches */
+ dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
+diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
+index aa13ad0..26b69b2 100644
+--- a/fs/ubifs/commit.c
++++ b/fs/ubifs/commit.c
+@@ -166,10 +166,6 @@ static int do_commit(struct ubifs_info *c)
+ err = ubifs_orphan_end_commit(c);
+ if (err)
+ goto out;
+- old_ltail_lnum = c->ltail_lnum;
+- err = ubifs_log_end_commit(c, new_ltail_lnum);
+- if (err)
+- goto out;
+ err = dbg_check_old_index(c, &zroot);
+ if (err)
+ goto out;
+@@ -202,7 +198,9 @@ static int do_commit(struct ubifs_info *c)
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ else
+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
+- err = ubifs_write_master(c);
++
++ old_ltail_lnum = c->ltail_lnum;
++ err = ubifs_log_end_commit(c, new_ltail_lnum);
+ if (err)
+ goto out;
+
+diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
+index a47ddfc..c14628f 100644
+--- a/fs/ubifs/log.c
++++ b/fs/ubifs/log.c
+@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
+ h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
+ t = (long long)c->ltail_lnum * c->leb_size;
+
+- if (h >= t)
++ if (h > t)
+ return c->log_bytes - h + t;
+- else
++ else if (h != t)
+ return t - h;
++ else if (c->lhead_lnum != c->ltail_lnum)
++ return 0;
++ else
++ return c->log_bytes;
+ }
+
+ /**
+@@ -447,9 +451,9 @@ out:
+ * @ltail_lnum: new log tail LEB number
+ *
+ * This function is called on when the commit operation was finished. It
+- * moves log tail to new position and unmaps LEBs which contain obsolete data.
+- * Returns zero in case of success and a negative error code in case of
+- * failure.
++ * moves log tail to new position and updates the master node so that it stores
++ * the new log tail LEB number. Returns zero in case of success and a negative
++ * error code in case of failure.
+ */
+ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ {
+@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
+ spin_unlock(&c->buds_lock);
+
+ err = dbg_check_bud_bytes(c);
++ if (err)
++ goto out;
+
++ err = ubifs_write_master(c);
++
++out:
+ mutex_unlock(&c->log_mutex);
+ return err;
+ }
+diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
+index f1deb96..894924a 100644
+--- a/fs/xfs/xfs_itable.c
++++ b/fs/xfs/xfs_itable.c
+@@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk(
+ XFS_WANT_CORRUPTED_RETURN(stat == 1);
+
+ /* Check if the record contains the inode in request */
+- if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
+- return -EINVAL;
++ if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
++ *icount = 0;
++ return 0;
++ }
+
+ idx = agino - irec->ir_startino + 1;
+ if (idx < XFS_INODES_PER_CHUNK &&
+@@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk(
+
+ #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
+
++struct xfs_bulkstat_agichunk {
++ char __user **ac_ubuffer;/* pointer into user's buffer */
++ int ac_ubleft; /* bytes left in user's buffer */
++ int ac_ubelem; /* spaces used in user's buffer */
++};
++
+ /*
+ * Process inodes in chunk with a pointer to a formatter function
+ * that will iget the inode and fill in the appropriate structure.
+ */
+-int
++static int
+ xfs_bulkstat_ag_ichunk(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ struct xfs_inobt_rec_incore *irbp,
+ bulkstat_one_pf formatter,
+ size_t statstruct_size,
+- struct xfs_bulkstat_agichunk *acp)
++ struct xfs_bulkstat_agichunk *acp,
++ xfs_agino_t *last_agino)
+ {
+- xfs_ino_t lastino = acp->ac_lastino;
+ char __user **ubufp = acp->ac_ubuffer;
+- int ubleft = acp->ac_ubleft;
+- int ubelem = acp->ac_ubelem;
+- int chunkidx, clustidx;
++ int chunkidx;
+ int error = 0;
+- xfs_agino_t agino;
++ xfs_agino_t agino = irbp->ir_startino;
+
+- for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
+- XFS_BULKSTAT_UBLEFT(ubleft) &&
+- irbp->ir_freecount < XFS_INODES_PER_CHUNK;
+- chunkidx++, clustidx++, agino++) {
+- int fmterror; /* bulkstat formatter result */
++ for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
++ chunkidx++, agino++) {
++ int fmterror;
+ int ubused;
+- xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
+
+- ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
++ /* inode won't fit in buffer, we are done */
++ if (acp->ac_ubleft < statstruct_size)
++ break;
+
+ /* Skip if this inode is free */
+- if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
+- lastino = ino;
++ if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
+ continue;
+- }
+-
+- /*
+- * Count used inodes as free so we can tell when the
+- * chunk is used up.
+- */
+- irbp->ir_freecount++;
+
+ /* Get the inode and fill in a single buffer */
+ ubused = statstruct_size;
+- error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
+- if (fmterror == BULKSTAT_RV_NOTHING) {
+- if (error && error != -ENOENT && error != -EINVAL) {
+- ubleft = 0;
+- break;
+- }
+- lastino = ino;
+- continue;
+- }
+- if (fmterror == BULKSTAT_RV_GIVEUP) {
+- ubleft = 0;
++ error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
++ *ubufp, acp->ac_ubleft, &ubused, &fmterror);
++
++ if (fmterror == BULKSTAT_RV_GIVEUP ||
++ (error && error != -ENOENT && error != -EINVAL)) {
++ acp->ac_ubleft = 0;
+ ASSERT(error);
+ break;
+ }
+- if (*ubufp)
+- *ubufp += ubused;
+- ubleft -= ubused;
+- ubelem++;
+- lastino = ino;
++
++ /* be careful not to leak error if at end of chunk */
++ if (fmterror == BULKSTAT_RV_NOTHING || error) {
++ error = 0;
++ continue;
++ }
++
++ *ubufp += ubused;
++ acp->ac_ubleft -= ubused;
++ acp->ac_ubelem++;
+ }
+
+- acp->ac_lastino = lastino;
+- acp->ac_ubleft = ubleft;
+- acp->ac_ubelem = ubelem;
++ /*
++ * Post-update *last_agino. At this point, agino will always point one
++ * inode past the last inode we processed successfully. Hence we
++ * substract that inode when setting the *last_agino cursor so that we
++ * return the correct cookie to userspace. On the next bulkstat call,
++ * the inode under the lastino cookie will be skipped as we have already
++ * processed it here.
++ */
++ *last_agino = agino - 1;
+
+ return error;
+ }
+@@ -353,45 +356,33 @@ xfs_bulkstat(
+ xfs_agino_t agino; /* inode # in allocation group */
+ xfs_agnumber_t agno; /* allocation group number */
+ xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
+- int end_of_ag; /* set if we've seen the ag end */
+- int error; /* error code */
+- int fmterror;/* bulkstat formatter result */
+- int i; /* loop index */
+- int icount; /* count of inodes good in irbuf */
+ size_t irbsize; /* size of irec buffer in bytes */
+- xfs_ino_t ino; /* inode number (filesystem) */
+- xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
+ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
+- xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
+- xfs_ino_t lastino; /* last inode number returned */
+ int nirbuf; /* size of irbuf */
+- int rval; /* return value error code */
+- int tmp; /* result value from btree calls */
+ int ubcount; /* size of user's buffer */
+- int ubleft; /* bytes left in user's buffer */
+- char __user *ubufp; /* pointer into user's buffer */
+- int ubelem; /* spaces used in user's buffer */
++ struct xfs_bulkstat_agichunk ac;
++ int error = 0;
+
+ /*
+ * Get the last inode value, see if there's nothing to do.
+ */
+- ino = (xfs_ino_t)*lastinop;
+- lastino = ino;
+- agno = XFS_INO_TO_AGNO(mp, ino);
+- agino = XFS_INO_TO_AGINO(mp, ino);
++ agno = XFS_INO_TO_AGNO(mp, *lastinop);
++ agino = XFS_INO_TO_AGINO(mp, *lastinop);
+ if (agno >= mp->m_sb.sb_agcount ||
+- ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
++ *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
+ *done = 1;
+ *ubcountp = 0;
+ return 0;
+ }
+
+ ubcount = *ubcountp; /* statstruct's */
+- ubleft = ubcount * statstruct_size; /* bytes */
+- *ubcountp = ubelem = 0;
++ ac.ac_ubuffer = &ubuffer;
++ ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
++ ac.ac_ubelem = 0;
++
++ *ubcountp = 0;
+ *done = 0;
+- fmterror = 0;
+- ubufp = ubuffer;
++
+ irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ if (!irbuf)
+ return -ENOMEM;
+@@ -402,9 +393,13 @@ xfs_bulkstat(
+ * Loop over the allocation groups, starting from the last
+ * inode returned; 0 means start of the allocation group.
+ */
+- rval = 0;
+- while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
+- cond_resched();
++ while (agno < mp->m_sb.sb_agcount) {
++ struct xfs_inobt_rec_incore *irbp = irbuf;
++ struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
++ bool end_of_ag = false;
++ int icount = 0;
++ int stat;
++
+ error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ if (error)
+ break;
+@@ -414,10 +409,6 @@ xfs_bulkstat(
+ */
+ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
+ XFS_BTNUM_INO);
+- irbp = irbuf;
+- irbufend = irbuf + nirbuf;
+- end_of_ag = 0;
+- icount = 0;
+ if (agino > 0) {
+ /*
+ * In the middle of an allocation group, we need to get
+@@ -427,22 +418,23 @@ xfs_bulkstat(
+
+ error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
+ if (error)
+- break;
++ goto del_cursor;
+ if (icount) {
+ irbp->ir_startino = r.ir_startino;
+ irbp->ir_freecount = r.ir_freecount;
+ irbp->ir_free = r.ir_free;
+ irbp++;
+- agino = r.ir_startino + XFS_INODES_PER_CHUNK;
+ }
+ /* Increment to the next record */
+- error = xfs_btree_increment(cur, 0, &tmp);
++ error = xfs_btree_increment(cur, 0, &stat);
+ } else {
+ /* Start of ag. Lookup the first inode chunk */
+- error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
++ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
++ }
++ if (error || stat == 0) {
++ end_of_ag = true;
++ goto del_cursor;
+ }
+- if (error)
+- break;
+
+ /*
+ * Loop through inode btree records in this ag,
+@@ -451,10 +443,10 @@ xfs_bulkstat(
+ while (irbp < irbufend && icount < ubcount) {
+ struct xfs_inobt_rec_incore r;
+
+- error = xfs_inobt_get_rec(cur, &r, &i);
+- if (error || i == 0) {
+- end_of_ag = 1;
+- break;
++ error = xfs_inobt_get_rec(cur, &r, &stat);
++ if (error || stat == 0) {
++ end_of_ag = true;
++ goto del_cursor;
+ }
+
+ /*
+@@ -469,77 +461,79 @@ xfs_bulkstat(
+ irbp++;
+ icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
+ }
+- /*
+- * Set agino to after this chunk and bump the cursor.
+- */
+- agino = r.ir_startino + XFS_INODES_PER_CHUNK;
+- error = xfs_btree_increment(cur, 0, &tmp);
++ error = xfs_btree_increment(cur, 0, &stat);
++ if (error || stat == 0) {
++ end_of_ag = true;
++ goto del_cursor;
++ }
+ cond_resched();
+ }
++
+ /*
+- * Drop the btree buffers and the agi buffer.
+- * We can't hold any of the locks these represent
+- * when calling iget.
++ * Drop the btree buffers and the agi buffer as we can't hold any
++ * of the locks these represent when calling iget. If there is a
++ * pending error, then we are done.
+ */
++del_cursor:
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ xfs_buf_relse(agbp);
++ if (error)
++ break;
+ /*
+- * Now format all the good inodes into the user's buffer.
++ * Now format all the good inodes into the user's buffer. The
++ * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
++ * for the next loop iteration.
+ */
+ irbufend = irbp;
+ for (irbp = irbuf;
+- irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
+- struct xfs_bulkstat_agichunk ac;
+-
+- ac.ac_lastino = lastino;
+- ac.ac_ubuffer = &ubuffer;
+- ac.ac_ubleft = ubleft;
+- ac.ac_ubelem = ubelem;
++ irbp < irbufend && ac.ac_ubleft >= statstruct_size;
++ irbp++) {
+ error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
+- formatter, statstruct_size, &ac);
++ formatter, statstruct_size, &ac,
++ &agino);
+ if (error)
+- rval = error;
+-
+- lastino = ac.ac_lastino;
+- ubleft = ac.ac_ubleft;
+- ubelem = ac.ac_ubelem;
++ break;
+
+ cond_resched();
+ }
++
+ /*
+- * Set up for the next loop iteration.
++ * If we've run out of space or had a formatting error, we
++ * are now done
+ */
+- if (XFS_BULKSTAT_UBLEFT(ubleft)) {
+- if (end_of_ag) {
+- agno++;
+- agino = 0;
+- } else
+- agino = XFS_INO_TO_AGINO(mp, lastino);
+- } else
++ if (ac.ac_ubleft < statstruct_size || error)
+ break;
++
++ if (end_of_ag) {
++ agno++;
++ agino = 0;
++ }
+ }
+ /*
+ * Done, we're either out of filesystem or space to put the data.
+ */
+ kmem_free(irbuf);
+- *ubcountp = ubelem;
++ *ubcountp = ac.ac_ubelem;
++
+ /*
+- * Found some inodes, return them now and return the error next time.
++ * We found some inodes, so clear the error status and return them.
++ * The lastino pointer will point directly at the inode that triggered
++ * any error that occurred, so on the next call the error will be
++ * triggered again and propagated to userspace as there will be no
++ * formatted inodes in the buffer.
+ */
+- if (ubelem)
+- rval = 0;
+- if (agno >= mp->m_sb.sb_agcount) {
+- /*
+- * If we ran out of filesystem, mark lastino as off
+- * the end of the filesystem, so the next call
+- * will return immediately.
+- */
+- *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
++ if (ac.ac_ubelem)
++ error = 0;
++
++ /*
++ * If we ran out of filesystem, lastino will point off the end of
++ * the filesystem so the next call will return immediately.
++ */
++ *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
++ if (agno >= mp->m_sb.sb_agcount)
+ *done = 1;
+- } else
+- *lastinop = (xfs_ino_t)lastino;
+
+- return rval;
++ return error;
+ }
+
+ int
+diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
+index aaed080..6ea8b39 100644
+--- a/fs/xfs/xfs_itable.h
++++ b/fs/xfs/xfs_itable.h
+@@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
+ int *ubused,
+ int *stat);
+
+-struct xfs_bulkstat_agichunk {
+- xfs_ino_t ac_lastino; /* last inode returned */
+- char __user **ac_ubuffer;/* pointer into user's buffer */
+- int ac_ubleft; /* bytes left in user's buffer */
+- int ac_ubelem; /* spaces used in user's buffer */
+-};
+-
+-int
+-xfs_bulkstat_ag_ichunk(
+- struct xfs_mount *mp,
+- xfs_agnumber_t agno,
+- struct xfs_inobt_rec_incore *irbp,
+- bulkstat_one_pf formatter,
+- size_t statstruct_size,
+- struct xfs_bulkstat_agichunk *acp);
+-
+ /*
+ * Values for stat return value.
+ */
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index e973540..2dd405c 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -74,7 +74,6 @@
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 518b465..f2057ff8 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1142,8 +1142,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+ /*
+ * tag stuff
+ */
+-#define blk_rq_tagged(rq) \
+- ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
++#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
+ extern int blk_queue_start_tag(struct request_queue *, struct request *);
+ extern struct request *blk_queue_find_tag(struct request_queue *, int);
+ extern void blk_queue_end_tag(struct request_queue *, struct request *);
+@@ -1285,10 +1284,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
+ static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+ {
+ unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+- unsigned int alignment = (sector << 9) & (granularity - 1);
++ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+
+- return (granularity + lim->alignment_offset - alignment)
+- & (granularity - 1);
++ return (granularity + lim->alignment_offset - alignment) % granularity;
+ }
+
+ static inline int bdev_alignment_offset(struct block_device *bdev)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index f53c4a9..26ee25f 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -287,6 +287,7 @@ struct hid_item {
+ #define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+ #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
+ #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
++#define HID_QUIRK_ALWAYS_POLL 0x00000400
+ #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
+ #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
+ #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index e0752d2..2d946c9 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
+ return false;
+ }
+
+-void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
+- unsigned long *flags);
+-
+-extern atomic_t memcg_moving;
+-
+-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
+- bool *locked, unsigned long *flags)
+-{
+- if (mem_cgroup_disabled())
+- return;
+- rcu_read_lock();
+- *locked = false;
+- if (atomic_read(&memcg_moving))
+- __mem_cgroup_begin_update_page_stat(page, locked, flags);
+-}
+-
+-void __mem_cgroup_end_update_page_stat(struct page *page,
+- unsigned long *flags);
+-static inline void mem_cgroup_end_update_page_stat(struct page *page,
+- bool *locked, unsigned long *flags)
+-{
+- if (mem_cgroup_disabled())
+- return;
+- if (*locked)
+- __mem_cgroup_end_update_page_stat(page, flags);
+- rcu_read_unlock();
+-}
+-
+-void mem_cgroup_update_page_stat(struct page *page,
+- enum mem_cgroup_stat_index idx,
+- int val);
+-
+-static inline void mem_cgroup_inc_page_stat(struct page *page,
++struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
++ unsigned long *flags);
++void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
++ unsigned long flags);
++void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
++ enum mem_cgroup_stat_index idx, int val);
++
++static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+ {
+- mem_cgroup_update_page_stat(page, idx, 1);
++ mem_cgroup_update_page_stat(memcg, idx, 1);
+ }
+
+-static inline void mem_cgroup_dec_page_stat(struct page *page,
++static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+ {
+- mem_cgroup_update_page_stat(page, idx, -1);
++ mem_cgroup_update_page_stat(memcg, idx, -1);
+ }
+
+ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+ {
+ }
+
+-static inline void mem_cgroup_begin_update_page_stat(struct page *page,
++static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
+ bool *locked, unsigned long *flags)
+ {
++ return NULL;
+ }
+
+-static inline void mem_cgroup_end_update_page_stat(struct page *page,
+- bool *locked, unsigned long *flags)
++static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
++ bool locked, unsigned long flags)
+ {
+ }
+
+@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
+ return false;
+ }
+
+-static inline void mem_cgroup_inc_page_stat(struct page *page,
++static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+ {
+ }
+
+-static inline void mem_cgroup_dec_page_stat(struct page *page,
++static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+ {
+ }
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 16e6f1e..f952cc8 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1174,6 +1174,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
+
+ extern void truncate_pagecache(struct inode *inode, loff_t new);
+ extern void truncate_setsize(struct inode *inode, loff_t newsize);
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+ int truncate_inode_page(struct address_space *mapping, struct page *page);
+ int generic_error_remove_page(struct address_space *mapping, struct page *page);
+@@ -1232,7 +1233,6 @@ int __set_page_dirty_no_writeback(struct page *page);
+ int redirty_page_for_writepage(struct writeback_control *wbc,
+ struct page *page);
+ void account_page_dirtied(struct page *page, struct address_space *mapping);
+-void account_page_writeback(struct page *page);
+ int set_page_dirty(struct page *page);
+ int set_page_dirty_lock(struct page *page);
+ int clear_page_dirty_for_io(struct page *page);
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 6c4363b..ee0fc7e 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -267,14 +267,12 @@ extern int of_property_read_u64(const struct device_node *np,
+ extern int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string);
+-extern int of_property_read_string_index(struct device_node *np,
+- const char *propname,
+- int index, const char **output);
+ extern int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string);
+-extern int of_property_count_strings(struct device_node *np,
+- const char *propname);
++extern int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index);
+ extern int of_device_is_compatible(const struct device_node *device,
+ const char *);
+ extern int of_device_is_available(const struct device_node *device);
+@@ -486,15 +484,9 @@ static inline int of_property_read_string(struct device_node *np,
+ return -ENOSYS;
+ }
+
+-static inline int of_property_read_string_index(struct device_node *np,
+- const char *propname, int index,
+- const char **out_string)
+-{
+- return -ENOSYS;
+-}
+-
+-static inline int of_property_count_strings(struct device_node *np,
+- const char *propname)
++static inline int of_property_read_string_helper(struct device_node *np,
++ const char *propname,
++ const char **out_strs, size_t sz, int index)
+ {
+ return -ENOSYS;
+ }
+@@ -668,6 +660,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
+ }
+
+ /**
++ * of_property_read_string_array() - Read an array of strings from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @out_strs: output array of string pointers.
++ * @sz: number of array elements to read.
++ *
++ * Search for a property in a device tree node and retrieve a list of
++ * terminated string values (pointer to data, not a copy) in that property.
++ *
++ * If @out_strs is NULL, the number of strings in the property is returned.
++ */
++static inline int of_property_read_string_array(struct device_node *np,
++ const char *propname, const char **out_strs,
++ size_t sz)
++{
++ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
++}
++
++/**
++ * of_property_count_strings() - Find and return the number of strings from a
++ * multiple strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ *
++ * Search for a property in a device tree node and retrieve the number of null
++ * terminated string contain in it. Returns the number of strings on
++ * success, -EINVAL if the property does not exist, -ENODATA if property
++ * does not have a value, and -EILSEQ if the string is not null-terminated
++ * within the length of the property data.
++ */
++static inline int of_property_count_strings(struct device_node *np,
++ const char *propname)
++{
++ return of_property_read_string_helper(np, propname, NULL, 0, 0);
++}
++
++/**
++ * of_property_read_string_index() - Find and read a string from a multiple
++ * strings property.
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ * @index: index of the string in the list of strings
++ * @out_string: pointer to null terminated return string, modified only if
++ * return value is 0.
++ *
++ * Search for a property in a device tree node and retrieve a null
++ * terminated string value (pointer to data, not a copy) in the list of strings
++ * contained in that property.
++ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
++ * property does not have a value, and -EILSEQ if the string is not
++ * null-terminated within the length of the property data.
++ *
++ * The out_string pointer is modified only if a valid string can be decoded.
++ */
++static inline int of_property_read_string_index(struct device_node *np,
++ const char *propname,
++ int index, const char **output)
++{
++ int rc = of_property_read_string_helper(np, propname, output, 1, index);
++ return rc < 0 ? rc : 0;
++}
++
++/**
+ * of_property_read_bool - Findfrom a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 647395a..e8d6e10 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
+ extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
++
++extern int oom_kills_count(void);
++extern void note_oom_kill(void);
+ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+diff --git a/include/linux/string.h b/include/linux/string.h
+index d36977e..3b42b37 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+- const void *from, size_t available);
++ const void *from, size_t available);
+
+ /**
+ * strstarts - does @str start with @prefix?
+@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
+
+-extern size_t memweight(const void *ptr, size_t bytes);
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
+
+ /**
+ * kbasename - return the last part of a pathname.
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index fcbfe87..cf391ee 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -357,6 +357,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
+ #define XPRT_CONNECTION_ABORT (7)
+ #define XPRT_CONNECTION_CLOSE (8)
+ #define XPRT_CONGESTED (9)
++#define XPRT_CONNECTION_REUSE (10)
+
+ static inline void xprt_set_connected(struct rpc_xprt *xprt)
+ {
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 32e0f5c..4a185a0 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -44,4 +44,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+
++/* device can't handle device_qualifier descriptor requests */
++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index a2db816..268c8f1 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -669,6 +669,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+
++void ipv6_proxy_select_ident(struct sk_buff *skb);
++
+ int ip6_dst_hoplimit(struct dst_entry *dst);
+
+ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
+diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
+index e645835..56ed843 100644
+--- a/include/scsi/scsi_tcq.h
++++ b/include/scsi/scsi_tcq.h
+@@ -67,8 +67,9 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
+ if (!sdev->tagged_supported)
+ return;
+
+- if (!shost_use_blk_mq(sdev->host) &&
+- !blk_queue_tagged(sdev->request_queue))
++ if (shost_use_blk_mq(sdev->host))
++ queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue);
++ else if (!blk_queue_tagged(sdev->request_queue))
+ blk_queue_init_tags(sdev->request_queue, depth,
+ sdev->host->bqt);
+
+@@ -81,8 +82,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
+ **/
+ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
+ {
+- if (!shost_use_blk_mq(sdev->host) &&
+- blk_queue_tagged(sdev->request_queue))
++ if (blk_queue_tagged(sdev->request_queue))
+ blk_queue_free_tags(sdev->request_queue);
+ scsi_adjust_queue_depth(sdev, 0, depth);
+ }
+diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
+index 4fc66f6..c472bedb 100644
+--- a/include/uapi/drm/vmwgfx_drm.h
++++ b/include/uapi/drm/vmwgfx_drm.h
+@@ -29,7 +29,7 @@
+ #define __VMWGFX_DRM_H__
+
+ #ifndef __KERNEL__
+-#include <drm.h>
++#include <drm/drm.h>
+ #endif
+
+ #define DRM_VMW_MAX_SURFACE_FACES 6
+diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
+index 1874ebe..a1d7e93 100644
+--- a/include/uapi/linux/input.h
++++ b/include/uapi/linux/input.h
+@@ -739,6 +739,13 @@ struct input_keymap_entry {
+ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
+ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
+
++#define KEY_KBDINPUTASSIST_PREV 0x260
++#define KEY_KBDINPUTASSIST_NEXT 0x261
++#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
++#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263
++#define KEY_KBDINPUTASSIST_ACCEPT 0x264
++#define KEY_KBDINPUTASSIST_CANCEL 0x265
++
+ #define BTN_TRIGGER_HAPPY 0x2c0
+ #define BTN_TRIGGER_HAPPY1 0x2c0
+ #define BTN_TRIGGER_HAPPY2 0x2c1
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 963bf13..658f232 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -902,13 +902,23 @@ static void put_ctx(struct perf_event_context *ctx)
+ }
+ }
+
+-static void unclone_ctx(struct perf_event_context *ctx)
++/*
++ * This must be done under the ctx->lock, such as to serialize against
++ * context_equiv(), therefore we cannot call put_ctx() since that might end up
++ * calling scheduler related locks and ctx->lock nests inside those.
++ */
++static __must_check struct perf_event_context *
++unclone_ctx(struct perf_event_context *ctx)
+ {
+- if (ctx->parent_ctx) {
+- put_ctx(ctx->parent_ctx);
++ struct perf_event_context *parent_ctx = ctx->parent_ctx;
++
++ lockdep_assert_held(&ctx->lock);
++
++ if (parent_ctx)
+ ctx->parent_ctx = NULL;
+- }
+ ctx->generation++;
++
++ return parent_ctx;
+ }
+
+ static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
+@@ -2210,6 +2220,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
+ static int context_equiv(struct perf_event_context *ctx1,
+ struct perf_event_context *ctx2)
+ {
++ lockdep_assert_held(&ctx1->lock);
++ lockdep_assert_held(&ctx2->lock);
++
+ /* Pinning disables the swap optimization */
+ if (ctx1->pin_count || ctx2->pin_count)
+ return 0;
+@@ -2943,6 +2956,7 @@ static int event_enable_on_exec(struct perf_event *event,
+ */
+ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
+ {
++ struct perf_event_context *clone_ctx = NULL;
+ struct perf_event *event;
+ unsigned long flags;
+ int enabled = 0;
+@@ -2974,7 +2988,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
+ * Unclone this context if we enabled any event.
+ */
+ if (enabled)
+- unclone_ctx(ctx);
++ clone_ctx = unclone_ctx(ctx);
+
+ raw_spin_unlock(&ctx->lock);
+
+@@ -2984,6 +2998,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
+ perf_event_context_sched_in(ctx, ctx->task);
+ out:
+ local_irq_restore(flags);
++
++ if (clone_ctx)
++ put_ctx(clone_ctx);
+ }
+
+ void perf_event_exec(void)
+@@ -3135,7 +3152,7 @@ errout:
+ static struct perf_event_context *
+ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+ {
+- struct perf_event_context *ctx;
++ struct perf_event_context *ctx, *clone_ctx = NULL;
+ struct perf_cpu_context *cpuctx;
+ unsigned long flags;
+ int ctxn, err;
+@@ -3169,9 +3186,12 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
+ retry:
+ ctx = perf_lock_task_context(task, ctxn, &flags);
+ if (ctx) {
+- unclone_ctx(ctx);
++ clone_ctx = unclone_ctx(ctx);
+ ++ctx->pin_count;
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
++
++ if (clone_ctx)
++ put_ctx(clone_ctx);
+ } else {
+ ctx = alloc_perf_context(pmu, task);
+ err = -ENOMEM;
+@@ -7523,7 +7543,7 @@ __perf_event_exit_task(struct perf_event *child_event,
+ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ {
+ struct perf_event *child_event, *next;
+- struct perf_event_context *child_ctx, *parent_ctx;
++ struct perf_event_context *child_ctx, *clone_ctx = NULL;
+ unsigned long flags;
+
+ if (likely(!child->perf_event_ctxp[ctxn])) {
+@@ -7550,28 +7570,16 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ child->perf_event_ctxp[ctxn] = NULL;
+
+ /*
+- * In order to avoid freeing: child_ctx->parent_ctx->task
+- * under perf_event_context::lock, grab another reference.
+- */
+- parent_ctx = child_ctx->parent_ctx;
+- if (parent_ctx)
+- get_ctx(parent_ctx);
+-
+- /*
+ * If this context is a clone; unclone it so it can't get
+ * swapped to another process while we're removing all
+ * the events from it.
+ */
+- unclone_ctx(child_ctx);
++ clone_ctx = unclone_ctx(child_ctx);
+ update_context_time(child_ctx);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+- /*
+- * Now that we no longer hold perf_event_context::lock, drop
+- * our extra child_ctx->parent_ctx reference.
+- */
+- if (parent_ctx)
+- put_ctx(parent_ctx);
++ if (clone_ctx)
++ put_ctx(clone_ctx);
+
+ /*
+ * Report the task dead after unscheduling the events so that we
+diff --git a/kernel/freezer.c b/kernel/freezer.c
+index aa6a8aa..8f9279b 100644
+--- a/kernel/freezer.c
++++ b/kernel/freezer.c
+@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
+ if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
+ return false;
+
++ if (test_thread_flag(TIF_MEMDIE))
++ return false;
++
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f3a3a07..22b3f1b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -641,8 +641,14 @@ static struct futex_pi_state * alloc_pi_state(void)
+ return pi_state;
+ }
+
++/*
++ * Must be called with the hb lock held.
++ */
+ static void free_pi_state(struct futex_pi_state *pi_state)
+ {
++ if (!pi_state)
++ return;
++
+ if (!atomic_dec_and_test(&pi_state->refcount))
+ return;
+
+@@ -1521,15 +1527,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ }
+
+ retry:
+- if (pi_state != NULL) {
+- /*
+- * We will have to lookup the pi_state again, so free this one
+- * to keep the accounting correct.
+- */
+- free_pi_state(pi_state);
+- pi_state = NULL;
+- }
+-
+ ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
+ if (unlikely(ret != 0))
+ goto out;
+@@ -1619,6 +1616,8 @@ retry_private:
+ case 0:
+ break;
+ case -EFAULT:
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+ put_futex_key(&key2);
+@@ -1634,6 +1633,8 @@ retry_private:
+ * exit to complete.
+ * - The user space value changed.
+ */
++ free_pi_state(pi_state);
++ pi_state = NULL;
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+ put_futex_key(&key2);
+@@ -1710,6 +1711,7 @@ retry_private:
+ }
+
+ out_unlock:
++ free_pi_state(pi_state);
+ double_unlock_hb(hb1, hb2);
+ hb_waiters_dec(hb2);
+
+@@ -1727,8 +1729,6 @@ out_put_keys:
+ out_put_key1:
+ put_futex_key(&key1);
+ out:
+- if (pi_state != NULL)
+- free_pi_state(pi_state);
+ return ret ? ret : task_count;
+ }
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 03214bd2..1c47139 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1842,7 +1842,9 @@ static void free_module(struct module *mod)
+
+ /* We leave it in list to prevent duplicate loads, but make sure
+ * that noone uses it while it's being deconstructed. */
++ mutex_lock(&module_mutex);
+ mod->state = MODULE_STATE_UNFORMED;
++ mutex_unlock(&module_mutex);
+
+ /* Remove dynamic debug info */
+ ddebug_remove_module(mod->name);
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index a9dfa79..1f35a34 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+ error = resume_target_kernel(platform_mode);
+- dpm_resume_end(PMSG_RECOVER);
++ /*
++ * The above should either succeed and jump to the new kernel,
++ * or return with an error. Otherwise things are just
++ * undefined, so let's be paranoid.
++ */
++ BUG_ON(!error);
+ }
++ dpm_resume_end(PMSG_RECOVER);
+ pm_restore_gfp_mask();
+ resume_console();
+ pm_restore_console();
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 4ee194e..7a37cf3 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -108,6 +108,28 @@ static int try_to_freeze_tasks(bool user_only)
+ return todo ? -EBUSY : 0;
+ }
+
++/*
++ * Returns true if all freezable tasks (except for current) are frozen already
++ */
++static bool check_frozen_processes(void)
++{
++ struct task_struct *g, *p;
++ bool ret = true;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ if (p != current && !freezer_should_skip(p) &&
++ !frozen(p)) {
++ ret = false;
++ goto done;
++ }
++ }
++done:
++ read_unlock(&tasklist_lock);
++
++ return ret;
++}
++
+ /**
+ * freeze_processes - Signal user space processes to enter the refrigerator.
+ * The current thread will not be frozen. The same process that calls
+@@ -118,6 +140,7 @@ static int try_to_freeze_tasks(bool user_only)
+ int freeze_processes(void)
+ {
+ int error;
++ int oom_kills_saved;
+
+ error = __usermodehelper_disable(UMH_FREEZING);
+ if (error)
+@@ -131,12 +154,27 @@ int freeze_processes(void)
+
+ printk("Freezing user space processes ... ");
+ pm_freezing = true;
++ oom_kills_saved = oom_kills_count();
+ error = try_to_freeze_tasks(true);
+ if (!error) {
+- printk("done.");
+ __usermodehelper_set_disable_depth(UMH_DISABLED);
+ oom_killer_disable();
++
++ /*
++ * There might have been an OOM kill while we were
++ * freezing tasks and the killed task might be still
++ * on the way out so we have to double check for race.
++ */
++ if (oom_kills_count() != oom_kills_saved &&
++ !check_frozen_processes()) {
++ __usermodehelper_set_disable_depth(UMH_ENABLED);
++ printk("OOM in progress.");
++ error = -EBUSY;
++ goto done;
++ }
++ printk("done.");
+ }
++done:
+ printk("\n");
+ BUG_ON(in_atomic());
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index ec1a286..6d7cb91 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1977,6 +1977,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
+ #ifdef CONFIG_SMP
+ inline struct dl_bw *dl_bw_of(int i)
+ {
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
+ return &cpu_rq(i)->rd->dl_bw;
+ }
+
+@@ -1985,6 +1987,8 @@ static inline int dl_bw_cpus(int i)
+ struct root_domain *rd = cpu_rq(i)->rd;
+ int cpus = 0;
+
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
+ for_each_cpu_and(i, rd->span, cpu_active_mask)
+ cpus++;
+
+@@ -4004,13 +4008,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+ * root_domain.
+ */
+ #ifdef CONFIG_SMP
+- if (task_has_dl_policy(p)) {
+- const struct cpumask *span = task_rq(p)->rd->span;
+-
+- if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
++ if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
++ rcu_read_lock();
++ if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
+ retval = -EBUSY;
++ rcu_read_unlock();
+ goto out_unlock;
+ }
++ rcu_read_unlock();
+ }
+ #endif
+ again:
+@@ -7580,6 +7585,8 @@ static int sched_dl_global_constraints(void)
+ int cpu, ret = 0;
+ unsigned long flags;
+
++ rcu_read_lock();
++
+ /*
+ * Here we want to check the bandwidth not being set to some
+ * value smaller than the currently allocated bandwidth in
+@@ -7601,6 +7608,8 @@ static int sched_dl_global_constraints(void)
+ break;
+ }
+
++ rcu_read_unlock();
++
+ return ret;
+ }
+
+@@ -7616,6 +7625,7 @@ static void sched_dl_do_global(void)
+ if (global_rt_runtime() != RUNTIME_INF)
+ new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
++ rcu_read_lock();
+ /*
+ * FIXME: As above...
+ */
+@@ -7626,6 +7636,7 @@ static void sched_dl_do_global(void)
+ dl_b->bw = new_bw;
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ }
++ rcu_read_unlock();
+ }
+
+ static int sched_rt_global_validate(void)
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 42b463a..31ea01f 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
+ goto out;
+ }
+ } else {
++ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = SIGALRM;
+ event.sigev_value.sival_int = new_timer->it_id;
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index 759d5e0..7e3cd7a 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
+@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+ int syscall_nr;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+
+ /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
+@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
+ return;
+@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ int size;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+- if (syscall_nr < 0)
++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
+ return;
+ if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
+ return;
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 1e031f2..33ce011 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
+ lower = src[off + k];
+ if (left && off + k == lim - 1)
+ lower &= mask;
+- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
++ dst[k] = lower >> rem;
++ if (rem)
++ dst[k] |= upper << (BITS_PER_LONG - rem);
+ if (left && k == lim - 1)
+ dst[k] &= mask;
+ }
+@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
+ upper = src[k];
+ if (left && k == lim - 1)
+ upper &= (1UL << left) - 1;
+- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
++ dst[k + off] = upper << rem;
++ if (rem)
++ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
+ if (left && k + off == lim - 1)
+ dst[k + off] &= (1UL << left) - 1;
+ }
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 9cdf62f..c9f2e8c 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
+ }
+
+ table->orig_nents -= sg_size;
+- if (!skip_first_chunk) {
+- free_fn(sgl, alloc_size);
++ if (skip_first_chunk)
+ skip_first_chunk = false;
+- }
++ else
++ free_fn(sgl, alloc_size);
+ sgl = next;
+ }
+
+diff --git a/lib/string.c b/lib/string.c
+index f3c6ff5..70db57a 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ * keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++ memset(s, 0, count);
++ OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+ * memcpy - Copy one area of memory to another
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index 52abeeb..1811ea2 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -93,11 +93,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ * to be released by the balloon driver.
+ */
+ if (trylock_page(page)) {
++#ifdef CONFIG_BALLOON_COMPACTION
+ if (!PagePrivate(page)) {
+ /* raced with isolation */
+ unlock_page(page);
+ continue;
+ }
++#endif
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_delete(page);
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+diff --git a/mm/cma.c b/mm/cma.c
+index 0ab5646..2904f45 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -123,6 +123,7 @@ static int __init cma_activate_area(struct cma *cma)
+
+ err:
+ kfree(cma->bitmap);
++ cma->count = 0;
+ return -EINVAL;
+ }
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index f8ffd94..45c6d67 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -200,7 +200,7 @@ retry:
+ preempt_disable();
+ if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+ preempt_enable();
+- __free_page(zero_page);
++ __free_pages(zero_page, compound_order(zero_page));
+ goto retry;
+ }
+
+@@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
+ if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ struct page *zero_page = xchg(&huge_zero_page, NULL);
+ BUG_ON(zero_page == NULL);
+- __free_page(zero_page);
++ __free_pages(zero_page, compound_order(zero_page));
+ return HPAGE_PMD_NR;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 28928ce..48914e1 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1545,12 +1545,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+ * start move here.
+ */
+
+-/* for quick checking without looking up memcg */
+-atomic_t memcg_moving __read_mostly;
+-
+ static void mem_cgroup_start_move(struct mem_cgroup *memcg)
+ {
+- atomic_inc(&memcg_moving);
+ atomic_inc(&memcg->moving_account);
+ synchronize_rcu();
+ }
+@@ -1561,10 +1557,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
+ * Now, mem_cgroup_clear_mc() may call this function with NULL.
+ * We check NULL in callee rather than caller.
+ */
+- if (memcg) {
+- atomic_dec(&memcg_moving);
++ if (memcg)
+ atomic_dec(&memcg->moving_account);
+- }
+ }
+
+ /*
+@@ -2249,41 +2243,52 @@ cleanup:
+ return true;
+ }
+
+-/*
+- * Used to update mapped file or writeback or other statistics.
++/**
++ * mem_cgroup_begin_page_stat - begin a page state statistics transaction
++ * @page: page that is going to change accounted state
++ * @locked: &memcg->move_lock slowpath was taken
++ * @flags: IRQ-state flags for &memcg->move_lock
+ *
+- * Notes: Race condition
++ * This function must mark the beginning of an accounted page state
++ * change to prevent double accounting when the page is concurrently
++ * being moved to another memcg:
+ *
+- * Charging occurs during page instantiation, while the page is
+- * unmapped and locked in page migration, or while the page table is
+- * locked in THP migration. No race is possible.
++ * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
++ * if (TestClearPageState(page))
++ * mem_cgroup_update_page_stat(memcg, state, -1);
++ * mem_cgroup_end_page_stat(memcg, locked, flags);
+ *
+- * Uncharge happens to pages with zero references, no race possible.
++ * The RCU lock is held throughout the transaction. The fast path can
++ * get away without acquiring the memcg->move_lock (@locked is false)
++ * because page moving starts with an RCU grace period.
+ *
+- * Charge moving between groups is protected by checking mm->moving
+- * account and taking the move_lock in the slowpath.
++ * The RCU lock also protects the memcg from being freed when the page
++ * state that is going to change is the only thing preventing the page
++ * from being uncharged. E.g. end-writeback clearing PageWriteback(),
++ * which allows migration to go ahead and uncharge the page before the
++ * account transaction might be complete.
+ */
+-
+-void __mem_cgroup_begin_update_page_stat(struct page *page,
+- bool *locked, unsigned long *flags)
++struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
++ bool *locked,
++ unsigned long *flags)
+ {
+ struct mem_cgroup *memcg;
+ struct page_cgroup *pc;
+
++ rcu_read_lock();
++
++ if (mem_cgroup_disabled())
++ return NULL;
++
+ pc = lookup_page_cgroup(page);
+ again:
+ memcg = pc->mem_cgroup;
+ if (unlikely(!memcg || !PageCgroupUsed(pc)))
+- return;
+- /*
+- * If this memory cgroup is not under account moving, we don't
+- * need to take move_lock_mem_cgroup(). Because we already hold
+- * rcu_read_lock(), any calls to move_account will be delayed until
+- * rcu_read_unlock().
+- */
+- VM_BUG_ON(!rcu_read_lock_held());
++ return NULL;
++
++ *locked = false;
+ if (atomic_read(&memcg->moving_account) <= 0)
+- return;
++ return memcg;
+
+ move_lock_mem_cgroup(memcg, flags);
+ if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
+@@ -2291,36 +2296,40 @@ again:
+ goto again;
+ }
+ *locked = true;
++
++ return memcg;
+ }
+
+-void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
++/**
++ * mem_cgroup_end_page_stat - finish a page state statistics transaction
++ * @memcg: the memcg that was accounted against
++ * @locked: value received from mem_cgroup_begin_page_stat()
++ * @flags: value received from mem_cgroup_begin_page_stat()
++ */
++void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
++ unsigned long flags)
+ {
+- struct page_cgroup *pc = lookup_page_cgroup(page);
++ if (memcg && locked)
++ move_unlock_mem_cgroup(memcg, &flags);
+
+- /*
+- * It's guaranteed that pc->mem_cgroup never changes while
+- * lock is held because a routine modifies pc->mem_cgroup
+- * should take move_lock_mem_cgroup().
+- */
+- move_unlock_mem_cgroup(pc->mem_cgroup, flags);
++ rcu_read_unlock();
+ }
+
+-void mem_cgroup_update_page_stat(struct page *page,
++/**
++ * mem_cgroup_update_page_stat - update page state statistics
++ * @memcg: memcg to account against
++ * @idx: page state item to account
++ * @val: number of pages (positive or negative)
++ *
++ * See mem_cgroup_begin_page_stat() for locking requirements.
++ */
++void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val)
+ {
+- struct mem_cgroup *memcg;
+- struct page_cgroup *pc = lookup_page_cgroup(page);
+- unsigned long uninitialized_var(flags);
+-
+- if (mem_cgroup_disabled())
+- return;
+-
+ VM_BUG_ON(!rcu_read_lock_held());
+- memcg = pc->mem_cgroup;
+- if (unlikely(!memcg || !PageCgroupUsed(pc)))
+- return;
+
+- this_cpu_add(memcg->stat->count[idx], val);
++ if (memcg)
++ this_cpu_add(memcg->stat->count[idx], val);
+ }
+
+ /*
+diff --git a/mm/memory.c b/mm/memory.c
+index e229970..37b80fc 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1147,6 +1147,7 @@ again:
+ print_bad_pte(vma, addr, ptent, page);
+ if (unlikely(!__tlb_remove_page(tlb, page))) {
+ force_flush = 1;
++ addr += PAGE_SIZE;
+ break;
+ }
+ continue;
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 1e11df8..f1fb141 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ dump_tasks(memcg, nodemask);
+ }
+
++/*
++ * Number of OOM killer invocations (including memcg OOM killer).
++ * Primarily used by PM freezer to check for potential races with
++ * OOM killed frozen task.
++ */
++static atomic_t oom_kills = ATOMIC_INIT(0);
++
++int oom_kills_count(void)
++{
++ return atomic_read(&oom_kills);
++}
++
++void note_oom_kill(void)
++{
++ atomic_inc(&oom_kills);
++}
++
+ #define K(x) ((x) << (PAGE_SHIFT-10))
+ /*
+ * Must be called while holding a reference to p, which will be released upon
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 91d73ef..ba5fd97 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2116,23 +2116,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
+ EXPORT_SYMBOL(account_page_dirtied);
+
+ /*
+- * Helper function for set_page_writeback family.
+- *
+- * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
+- * while calling this function.
+- * See test_set_page_writeback for example.
+- *
+- * NOTE: Unlike account_page_dirtied this does not rely on being atomic
+- * wrt interrupts.
+- */
+-void account_page_writeback(struct page *page)
+-{
+- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
+- inc_zone_page_state(page, NR_WRITEBACK);
+-}
+-EXPORT_SYMBOL(account_page_writeback);
+-
+-/*
+ * For address_spaces which do not use buffers. Just tag the page as dirty in
+ * its radix tree.
+ *
+@@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
+ int test_clear_page_writeback(struct page *page)
+ {
+ struct address_space *mapping = page_mapping(page);
+- int ret;
+- bool locked;
+ unsigned long memcg_flags;
++ struct mem_cgroup *memcg;
++ bool locked;
++ int ret;
+
+- mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
++ memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
+ if (mapping) {
+ struct backing_dev_info *bdi = mapping->backing_dev_info;
+ unsigned long flags;
+@@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
+ ret = TestClearPageWriteback(page);
+ }
+ if (ret) {
+- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
++ mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
+ dec_zone_page_state(page, NR_WRITEBACK);
+ inc_zone_page_state(page, NR_WRITTEN);
+ }
+- mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
++ mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
+ return ret;
+ }
+
+ int __test_set_page_writeback(struct page *page, bool keep_write)
+ {
+ struct address_space *mapping = page_mapping(page);
+- int ret;
+- bool locked;
+ unsigned long memcg_flags;
++ struct mem_cgroup *memcg;
++ bool locked;
++ int ret;
+
+- mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
++ memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
+ if (mapping) {
+ struct backing_dev_info *bdi = mapping->backing_dev_info;
+ unsigned long flags;
+@@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
+ } else {
+ ret = TestSetPageWriteback(page);
+ }
+- if (!ret)
+- account_page_writeback(page);
+- mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
++ if (!ret) {
++ mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
++ inc_zone_page_state(page, NR_WRITEBACK);
++ }
++ mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
+ return ret;
+
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index eee9619..8c5029f 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2253,6 +2253,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ }
+
+ /*
++ * PM-freezer should be notified that there might be an OOM killer on
++ * its way to kill and wake somebody up. This is too early and we might
++ * end up not killing anything but false positives are acceptable.
++ * See freeze_processes.
++ */
++ note_oom_kill();
++
++ /*
+ * Go through the zonelist yet one more time, keep very high watermark
+ * here, this is only to catch a parallel oom killing, we must fail if
+ * we're still under heavy pressure.
+diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
+index 3708264..5331c2b 100644
+--- a/mm/page_cgroup.c
++++ b/mm/page_cgroup.c
+@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
+ sizeof(struct page_cgroup) * PAGES_PER_SECTION;
+
+ BUG_ON(PageReserved(page));
++ kmemleak_free(addr);
+ free_pages_exact(addr, table_size);
+ }
+ }
+diff --git a/mm/percpu.c b/mm/percpu.c
+index da997f9..2139e30 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1932,8 +1932,6 @@ void __init setup_per_cpu_areas(void)
+
+ if (pcpu_setup_first_chunk(ai, fc) < 0)
+ panic("Failed to initialize percpu areas.");
+-
+- pcpu_free_alloc_info(ai);
+ }
+
+ #endif /* CONFIG_SMP */
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 3e8491c..e01318d 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1042,15 +1042,16 @@ void page_add_new_anon_rmap(struct page *page,
+ */
+ void page_add_file_rmap(struct page *page)
+ {
+- bool locked;
++ struct mem_cgroup *memcg;
+ unsigned long flags;
++ bool locked;
+
+- mem_cgroup_begin_update_page_stat(page, &locked, &flags);
++ memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+ if (atomic_inc_and_test(&page->_mapcount)) {
+ __inc_zone_page_state(page, NR_FILE_MAPPED);
+- mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
++ mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+ }
+- mem_cgroup_end_update_page_stat(page, &locked, &flags);
++ mem_cgroup_end_page_stat(memcg, locked, flags);
+ }
+
+ /**
+@@ -1061,9 +1062,10 @@ void page_add_file_rmap(struct page *page)
+ */
+ void page_remove_rmap(struct page *page)
+ {
++ struct mem_cgroup *uninitialized_var(memcg);
+ bool anon = PageAnon(page);
+- bool locked;
+ unsigned long flags;
++ bool locked;
+
+ /*
+ * The anon case has no mem_cgroup page_stat to update; but may
+@@ -1071,7 +1073,7 @@ void page_remove_rmap(struct page *page)
+ * we hold the lock against page_stat move: so avoid it on anon.
+ */
+ if (!anon)
+- mem_cgroup_begin_update_page_stat(page, &locked, &flags);
++ memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
+
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+@@ -1096,8 +1098,7 @@ void page_remove_rmap(struct page *page)
+ -hpage_nr_pages(page));
+ } else {
+ __dec_zone_page_state(page, NR_FILE_MAPPED);
+- mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+- mem_cgroup_end_update_page_stat(page, &locked, &flags);
++ mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
+ }
+ if (unlikely(PageMlocked(page)))
+ clear_page_mlock(page);
+@@ -1110,10 +1111,9 @@ void page_remove_rmap(struct page *page)
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
+- return;
+ out:
+ if (!anon)
+- mem_cgroup_end_update_page_stat(page, &locked, &flags);
++ mem_cgroup_end_page_stat(memcg, locked, flags);
+ }
+
+ /*
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 96d1673..c646084 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -20,6 +20,7 @@
+ #include <linux/buffer_head.h> /* grr. try_to_release_page,
+ do_invalidatepage */
+ #include <linux/cleancache.h>
++#include <linux/rmap.h>
+ #include "internal.h"
+
+ static void clear_exceptional_entry(struct address_space *mapping,
+@@ -719,12 +720,67 @@ EXPORT_SYMBOL(truncate_pagecache);
+ */
+ void truncate_setsize(struct inode *inode, loff_t newsize)
+ {
++ loff_t oldsize = inode->i_size;
++
+ i_size_write(inode, newsize);
++ if (newsize > oldsize)
++ pagecache_isize_extended(inode, oldsize, newsize);
+ truncate_pagecache(inode, newsize);
+ }
+ EXPORT_SYMBOL(truncate_setsize);
+
+ /**
++ * pagecache_isize_extended - update pagecache after extension of i_size
++ * @inode: inode for which i_size was extended
++ * @from: original inode size
++ * @to: new inode size
++ *
++ * Handle extension of inode size either caused by extending truncate or by
++ * write starting after current i_size. We mark the page straddling current
++ * i_size RO so that page_mkwrite() is called on the nearest write access to
++ * the page. This way filesystem can be sure that page_mkwrite() is called on
++ * the page before user writes to the page via mmap after the i_size has been
++ * changed.
++ *
++ * The function must be called after i_size is updated so that page fault
++ * coming after we unlock the page will already see the new i_size.
++ * The function must be called while we still hold i_mutex - this not only
++ * makes sure i_size is stable but also that userspace cannot observe new
++ * i_size value before we are prepared to store mmap writes at new inode size.
++ */
++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
++{
++ int bsize = 1 << inode->i_blkbits;
++ loff_t rounded_from;
++ struct page *page;
++ pgoff_t index;
++
++ WARN_ON(to > inode->i_size);
++
++ if (from >= to || bsize == PAGE_CACHE_SIZE)
++ return;
++ /* Page straddling @from will not have any hole block created? */
++ rounded_from = round_up(from, bsize);
++ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
++ return;
++
++ index = from >> PAGE_CACHE_SHIFT;
++ page = find_lock_page(inode->i_mapping, index);
++ /* Page not cached? Nothing to do */
++ if (!page)
++ return;
++ /*
++ * See clear_page_dirty_for_io() for details why set_page_dirty()
++ * is needed.
++ */
++ if (page_mkclean(page))
++ set_page_dirty(page);
++ unlock_page(page);
++ page_cache_release(page);
++}
++EXPORT_SYMBOL(pagecache_isize_extended);
++
++/**
+ * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
+ * @inode: inode
+ * @lstart: offset of beginning of hole
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index b2f571d..9f02369 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -292,7 +292,11 @@ int ceph_msgr_init(void)
+ if (ceph_msgr_slab_init())
+ return -ENOMEM;
+
+- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
++ /*
++ * The number of active work items is limited by the number of
++ * connections, so leave @max_active at default.
++ */
++ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
+ if (ceph_msgr_wq)
+ return 0;
+
+diff --git a/net/core/tso.c b/net/core/tso.c
+index 8c3203c..630b30b 100644
+--- a/net/core/tso.c
++++ b/net/core/tso.c
+@@ -1,6 +1,7 @@
+ #include <linux/export.h>
+ #include <net/ip.h>
+ #include <net/tso.h>
++#include <asm/unaligned.h>
+
+ /* Calculate expected number of TX descriptors */
+ int tso_count_descs(struct sk_buff *skb)
+@@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+ iph->id = htons(tso->ip_id);
+ iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+ tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
+- tcph->seq = htonl(tso->tcp_seq);
++ put_unaligned_be32(tso->tcp_seq, &tcph->seq);
+ tso->ip_id++;
+
+ if (!is_last) {
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index b10cd43a..4a74ea8 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -535,7 +535,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
+ return 1;
+
+ attrlen = rtnh_attrlen(rtnh);
+- if (attrlen < 0) {
++ if (attrlen > 0) {
+ struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+ nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 6556263..dd73bea 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -51,7 +51,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+
+- ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
++ ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
+ if (unlikely(ghl < sizeof(*greh)))
+ goto out;
+
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 215af2b..c43a1e2 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ struct sk_buff *nskb;
+ struct sock *sk;
+ struct inet_sock *inet;
++ int err;
+
+ if (ip_options_echo(&replyopts.opt.opt, skb))
+ return;
+@@ -1572,8 +1573,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ sock_net_set(sk, net);
+ __skb_queue_head_init(&sk->sk_write_queue);
+ sk->sk_sndbuf = sysctl_wmem_default;
+- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
+- &ipc, &rt, MSG_DONTWAIT);
++ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
++ len, 0, &ipc, &rt, MSG_DONTWAIT);
++ if (unlikely(err)) {
++ ip_flush_pending_frames(sk);
++ goto out;
++ }
++
+ nskb = skb_peek(&sk->sk_write_queue);
+ if (nskb) {
+ if (arg->csumoffset >= 0)
+@@ -1585,7 +1591,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
+ ip_push_pending_frames(sk, &fl4);
+ }
+-
++out:
+ put_cpu_var(unicast_sock);
+
+ ip_rt_put(rt);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index f4c987b..88c386c 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ skb_pull_rcsum(skb, hdr_len);
+
+ if (inner_proto == htons(ETH_P_TEB)) {
+- struct ethhdr *eh = (struct ethhdr *)skb->data;
++ struct ethhdr *eh;
+
+ if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+ return -ENOMEM;
+
++ eh = (struct ethhdr *)skb->data;
+ if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+ skb->protocol = eh->h_proto;
+ else
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index cbadb94..29836f8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1798,6 +1798,7 @@ local_input:
+ no_route:
+ RT_CACHE_STAT_INC(in_no_route);
+ res.type = RTN_UNREACHABLE;
++ res.fi = NULL;
+ goto local_input;
+
+ /*
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 541f26a..6b0b38f 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2985,61 +2985,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
+ #endif
+
+ #ifdef CONFIG_TCP_MD5SIG
+-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
++static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
+ static DEFINE_MUTEX(tcp_md5sig_mutex);
+-
+-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+-
+- if (p->md5_desc.tfm)
+- crypto_free_hash(p->md5_desc.tfm);
+- }
+- free_percpu(pool);
+-}
++static bool tcp_md5sig_pool_populated = false;
+
+ static void __tcp_alloc_md5sig_pool(void)
+ {
+ int cpu;
+- struct tcp_md5sig_pool __percpu *pool;
+-
+- pool = alloc_percpu(struct tcp_md5sig_pool);
+- if (!pool)
+- return;
+
+ for_each_possible_cpu(cpu) {
+- struct crypto_hash *hash;
+-
+- hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+- if (IS_ERR_OR_NULL(hash))
+- goto out_free;
++ if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
++ struct crypto_hash *hash;
+
+- per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
++ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR_OR_NULL(hash))
++ return;
++ per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
++ }
+ }
+- /* before setting tcp_md5sig_pool, we must commit all writes
+- * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
++ /* before setting tcp_md5sig_pool_populated, we must commit all writes
++ * to memory. See smp_rmb() in tcp_get_md5sig_pool()
+ */
+ smp_wmb();
+- tcp_md5sig_pool = pool;
+- return;
+-out_free:
+- __tcp_free_md5sig_pool(pool);
++ tcp_md5sig_pool_populated = true;
+ }
+
+ bool tcp_alloc_md5sig_pool(void)
+ {
+- if (unlikely(!tcp_md5sig_pool)) {
++ if (unlikely(!tcp_md5sig_pool_populated)) {
+ mutex_lock(&tcp_md5sig_mutex);
+
+- if (!tcp_md5sig_pool)
++ if (!tcp_md5sig_pool_populated)
+ __tcp_alloc_md5sig_pool();
+
+ mutex_unlock(&tcp_md5sig_mutex);
+ }
+- return tcp_md5sig_pool != NULL;
++ return tcp_md5sig_pool_populated;
+ }
+ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+@@ -3053,13 +3034,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+ */
+ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
+ {
+- struct tcp_md5sig_pool __percpu *p;
+-
+ local_bh_disable();
+- p = ACCESS_ONCE(tcp_md5sig_pool);
+- if (p)
+- return __this_cpu_ptr(p);
+
++ if (tcp_md5sig_pool_populated) {
++ /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
++ smp_rmb();
++ return this_cpu_ptr(&tcp_md5sig_pool);
++ }
+ local_bh_enable();
+ return NULL;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index cd17f00..3f49eae 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -208,8 +208,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ inet->inet_dport = usin->sin_port;
+ inet->inet_daddr = daddr;
+
+- inet_set_txhash(sk);
+-
+ inet_csk(sk)->icsk_ext_hdr_len = 0;
+ if (inet_opt)
+ inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+@@ -226,6 +224,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (err)
+ goto failure;
+
++ inet_set_txhash(sk);
++
+ rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
+ inet->inet_sport, inet->inet_dport, sk);
+ if (IS_ERR(rt)) {
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 5ec867e..1d4156d 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -3,11 +3,45 @@
+ * not configured or static. These functions are needed by GSO/GRO implementation.
+ */
+ #include <linux/export.h>
++#include <net/ip.h>
+ #include <net/ipv6.h>
+ #include <net/ip6_fib.h>
+ #include <net/addrconf.h>
+ #include <net/secure_seq.h>
+
++/* This function exists only for tap drivers that must support broken
++ * clients requesting UFO without specifying an IPv6 fragment ID.
++ *
++ * This is similar to ipv6_select_ident() but we use an independent hash
++ * seed to limit information leakage.
++ *
++ * The network header must be set before calling this.
++ */
++void ipv6_proxy_select_ident(struct sk_buff *skb)
++{
++ static u32 ip6_proxy_idents_hashrnd __read_mostly;
++ struct in6_addr buf[2];
++ struct in6_addr *addrs;
++ u32 hash, id;
++
++ addrs = skb_header_pointer(skb,
++ skb_network_offset(skb) +
++ offsetof(struct ipv6hdr, saddr),
++ sizeof(buf), buf);
++ if (!addrs)
++ return;
++
++ net_get_random_once(&ip6_proxy_idents_hashrnd,
++ sizeof(ip6_proxy_idents_hashrnd));
++
++ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
++ hash = __ipv6_addr_jhash(&addrs[0], hash);
++
++ id = ip_idents_reserve(hash, 1);
++ skb_shinfo(skb)->ip6_frag_id = htonl(id);
++}
++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
++
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+ u16 offset = sizeof(struct ipv6hdr);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 29964c3..264c0f2 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -198,8 +198,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ sk->sk_v6_daddr = usin->sin6_addr;
+ np->flow_label = fl6.flowlabel;
+
+- ip6_set_txhash(sk);
+-
+ /*
+ * TCP over IPv4
+ */
+@@ -295,6 +293,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ if (err)
+ goto late_failure;
+
++ ip6_set_txhash(sk);
++
+ if (!tp->write_seq && likely(!tp->repair))
+ tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 8fdadfd..6081329 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
+ */
+ if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ u32 basic_rates = vif->bss_conf.basic_rates;
+- s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
++ s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
+
+ rate = &sband->bitrates[rates[0].idx];
+
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index c416725..f1de72d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
+ static int netlink_dump(struct sock *sk);
+ static void netlink_skb_destructor(struct sk_buff *skb);
+
++/* nl_table locking explained:
++ * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
++ * combined with an RCU read-side lock. Insertion and removal are protected
++ * with nl_sk_hash_lock while using RCU list modification primitives and may
++ * run in parallel to nl_table_lock protected lookups. Destruction of the
++ * Netlink socket may only occur *after* nl_table_lock has been acquired
++ * either during or after the socket has been removed from the list.
++ */
+ DEFINE_RWLOCK(nl_table_lock);
+ EXPORT_SYMBOL_GPL(nl_table_lock);
+ static atomic_t nl_table_users = ATOMIC_INIT(0);
+@@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
+ static int lockdep_nl_sk_hash_is_held(void)
+ {
+ #ifdef CONFIG_LOCKDEP
+- return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1;
+-#else
+- return 1;
++ if (debug_locks)
++ return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
+ #endif
++ return 1;
+ }
+
+ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
+@@ -715,7 +723,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
+ * after validation, the socket and the ring may only be used by a
+ * single process, otherwise we fall back to copying.
+ */
+- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
++ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+ atomic_read(&nlk->mapped) > 1)
+ excl = false;
+
+@@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
+ struct netlink_table *table = &nl_table[protocol];
+ struct sock *sk;
+
++ read_lock(&nl_table_lock);
+ rcu_read_lock();
+ sk = __netlink_lookup(table, portid, net);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
++ read_unlock(&nl_table_lock);
+
+ return sk;
+ }
+@@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock)
+ }
+ netlink_table_ungrab();
+
+- /* Wait for readers to complete */
+- synchronize_net();
+-
+ kfree(nlk->groups);
+ nlk->groups = NULL;
+
+@@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock)
+
+ retry:
+ cond_resched();
++ netlink_table_grab();
+ rcu_read_lock();
+ if (__netlink_lookup(table, portid, net)) {
+ /* Bind collision, search negative portid values. */
+@@ -1288,9 +1296,11 @@ retry:
+ if (rover > -4097)
+ rover = -4097;
+ rcu_read_unlock();
++ netlink_table_ungrab();
+ goto retry;
+ }
+ rcu_read_unlock();
++ netlink_table_ungrab();
+
+ err = netlink_insert(sk, net, portid);
+ if (err == -EADDRINUSE)
+@@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
+ }
+
+ static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
+- __acquires(RCU)
++ __acquires(nl_table_lock) __acquires(RCU)
+ {
++ read_lock(&nl_table_lock);
+ rcu_read_lock();
+ return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ }
+
+ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++ struct rhashtable *ht;
+ struct netlink_sock *nlk;
+ struct nl_seq_iter *iter;
+ struct net *net;
+@@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ iter = seq->private;
+ nlk = v;
+
+- rht_for_each_entry_rcu(nlk, nlk->node.next, node)
++ i = iter->link;
++ ht = &nl_table[i].hash;
++ rht_for_each_entry(nlk, nlk->node.next, ht, node)
+ if (net_eq(sock_net((struct sock *)nlk), net))
+ return nlk;
+
+- i = iter->link;
+ j = iter->hash_idx + 1;
+
+ do {
+- struct rhashtable *ht = &nl_table[i].hash;
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ for (; j < tbl->size; j++) {
+- rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
++ rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
+ if (net_eq(sock_net((struct sock *)nlk), net)) {
+ iter->link = i;
+ iter->hash_idx = j;
+@@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ }
+
+ static void netlink_seq_stop(struct seq_file *seq, void *v)
+- __releases(RCU)
++ __releases(RCU) __releases(nl_table_lock)
+ {
+ rcu_read_unlock();
++ read_unlock(&nl_table_lock);
+ }
+
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 488ddee..e0b94ce 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -461,6 +461,8 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+
+ if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
+ clnt->cl_autobind = 1;
++ if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
++ clnt->cl_noretranstimeo = 1;
+ if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
+ clnt->cl_discrtry = 1;
+ if (!(args->flags & RPC_CLNT_CREATE_QUIET))
+@@ -579,6 +581,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
+ /* Turn off autobind on clones */
+ new->cl_autobind = 0;
+ new->cl_softrtry = clnt->cl_softrtry;
++ new->cl_noretranstimeo = clnt->cl_noretranstimeo;
+ new->cl_discrtry = clnt->cl_discrtry;
+ new->cl_chatty = clnt->cl_chatty;
+ return new;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 43cd89e..700f879 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -845,6 +845,8 @@ static void xs_error_report(struct sock *sk)
+ dprintk("RPC: xs_error_report client %p, error=%d...\n",
+ xprt, -err);
+ trace_rpc_socket_error(xprt, sk->sk_socket, err);
++ if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state))
++ goto out;
+ xprt_wake_pending_tasks(xprt, err);
+ out:
+ read_unlock_bh(&sk->sk_callback_lock);
+@@ -2245,7 +2247,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
+ &xprt->state);
+ /* "close" the socket, preserving the local port */
++ set_bit(XPRT_CONNECTION_REUSE, &xprt->state);
+ xs_tcp_reuse_connection(transport);
++ clear_bit(XPRT_CONNECTION_REUSE, &xprt->state);
+
+ if (abort_and_exit)
+ goto out_eagain;
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index fb1485d..6402065 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -1936,7 +1936,12 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
+ }
+ omsg = buf_msg(obuf);
+ pos += align(msg_size(omsg));
+- if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) {
++ if (msg_isdata(omsg)) {
++ if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
++ tipc_sk_mcast_rcv(obuf);
++ else
++ tipc_sk_rcv(obuf);
++ } else if (msg_user(omsg) == CONN_MANAGER) {
+ tipc_sk_rcv(obuf);
+ } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
+ tipc_named_rcv(obuf);
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 3bcb80d..970772c 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -284,6 +284,13 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ goto out;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
++ if (evm_status == INTEGRITY_NOXATTRS) {
++ struct integrity_iint_cache *iint;
++
++ iint = integrity_iint_find(dentry->d_inode);
++ if (iint && (iint->flags & IMA_NEW_FILE))
++ return 0;
++ }
+ out:
+ if (evm_status != INTEGRITY_PASS)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+@@ -311,9 +318,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ {
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+- && (xattr_data->type == EVM_XATTR_HMAC))
+- return -EPERM;
++ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
++ if (!xattr_value_len)
++ return -EINVAL;
++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
++ return -EPERM;
++ }
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ }
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 225fd94..5850943 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -378,6 +378,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
++ if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
++ return -EINVAL;
+ ima_reset_appraise_flags(dentry->d_inode,
+ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ result = 0;
+diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
+index 904e68a..6885058 100644
+--- a/security/integrity/integrity.h
++++ b/security/integrity/integrity.h
+@@ -61,6 +61,7 @@ enum evm_ima_xattr_type {
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
++ IMA_XATTR_LAST
+ };
+
+ struct evm_ima_xattr_data {
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index b0e9404..e03bad5 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -481,6 +481,7 @@ next_inode:
+ list_entry(sbsec->isec_head.next,
+ struct inode_security_struct, list);
+ struct inode *inode = isec->inode;
++ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+ inode = igrab(inode);
+ if (inode) {
+@@ -489,7 +490,6 @@ next_inode:
+ iput(inode);
+ }
+ spin_lock(&sbsec->isec_lock);
+- list_del_init(&isec->list);
+ goto next_inode;
+ }
+ spin_unlock(&sbsec->isec_lock);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 102e8fd..2d957ba 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -210,6 +210,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
+ if (err < 0)
+ return err;
+
++ if (clear_user(src, sizeof(*src)))
++ return -EFAULT;
+ if (put_user(status.state, &src->state) ||
+ compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
+ compat_put_timespec(&status.tstamp, &src->tstamp) ||
+diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
+index 45a0eed..3b052ed 100644
+--- a/sound/firewire/bebob/bebob_focusrite.c
++++ b/sound/firewire/bebob/bebob_focusrite.c
+@@ -27,12 +27,14 @@
+ #define SAFFIRE_CLOCK_SOURCE_INTERNAL 0
+ #define SAFFIRE_CLOCK_SOURCE_SPDIF 1
+
+-/* '1' is absent, why... */
++/* clock sources as returned from register of Saffire Pro 10 and 26 */
+ #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
++#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
+ #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
+-#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3
+-#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4
++#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 /* not used on s.pro. 10 */
++#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 /* not used on s.pro. 10 */
+ #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5
++#define SAFFIREPRO_CLOCK_SOURCE_COUNT 6
+
+ /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */
+ #define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4
+@@ -101,13 +103,34 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value)
+ &data, sizeof(__be32), 0);
+ }
+
++static char *const saffirepro_10_clk_src_labels[] = {
++ SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
++};
+ static char *const saffirepro_26_clk_src_labels[] = {
+ SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock"
+ };
+-
+-static char *const saffirepro_10_clk_src_labels[] = {
+- SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
++/* Value maps between registers and labels for SaffirePro 10/26. */
++static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = {
++ /* SaffirePro 10 */
++ [0] = {
++ [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
++ [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
++ [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
++ [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = -1, /* not supported */
++ [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = -1, /* not supported */
++ [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 2,
++ },
++ /* SaffirePro 26 */
++ [1] = {
++ [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
++ [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
++ [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
++ [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = 2,
++ [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = 3,
++ [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 4,
++ }
+ };
++
+ static int
+ saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate)
+ {
+@@ -138,24 +161,35 @@ saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate)
+
+ return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id);
+ }
++
++/*
++ * query hardware for current clock source, return our internally
++ * used clock index in *id, depending on hardware.
++ */
+ static int
+ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
+ {
+ int err;
+- u32 value;
++ u32 value; /* clock source read from hw register */
++ const signed char *map;
+
+ err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value);
+ if (err < 0)
+ goto end;
+
+- if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) {
+- if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK)
+- *id = 2;
+- else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF)
+- *id = 1;
+- } else if (value > 1) {
+- *id = value - 1;
++ /* depending on hardware, use a different mapping */
++ if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels)
++ map = saffirepro_clk_maps[0];
++ else
++ map = saffirepro_clk_maps[1];
++
++ /* In a case that this driver cannot handle the value of register. */
++ if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
++ err = -EIO;
++ goto end;
+ }
++
++ *id = (unsigned int)map[value];
+ end:
+ return err;
+ }
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index ef4d0c9..1aab0a32 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -129,12 +129,24 @@ snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal)
+ /* 1.The device has its own operation to switch source of clock */
+ if (clk_spec) {
+ err = clk_spec->get(bebob, &id);
+- if (err < 0)
++ if (err < 0) {
+ dev_err(&bebob->unit->device,
+ "fail to get clock source: %d\n", err);
+- else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
+- strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
++ goto end;
++ }
++
++ if (id >= clk_spec->num) {
++ dev_err(&bebob->unit->device,
++ "clock source %d out of range 0..%d\n",
++ id, clk_spec->num - 1);
++ err = -EIO;
++ goto end;
++ }
++
++ if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
++ strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
+ *internal = true;
++
+ goto end;
+ }
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index aa302fb..0a7f848 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -373,6 +373,8 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
+ #ifdef CONFIG_SND_DMA_SGBUF
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
+ struct snd_sg_buf *sgbuf = dmab->private_data;
++ if (chip->driver_type == AZX_DRIVER_CMEDIA)
++ return; /* deal with only CORB/RIRB buffers */
+ if (on)
+ set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
+ else
+@@ -1768,7 +1770,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
+ #ifdef CONFIG_X86
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx *chip = apcm->chip;
+- if (!azx_snoop(chip))
++ if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ #endif
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b7b293c..0c9d588 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5008,9 +5008,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x2224, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+- SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+- SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+- SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
+index 848cab8..2e9e90d 100644
+--- a/sound/soc/codecs/adau1761.c
++++ b/sound/soc/codecs/adau1761.c
+@@ -405,6 +405,7 @@ static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = {
+ 2, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0),
++ SND_SOC_DAPM_SUPPLY("ALC Clock", ADAU1761_CLK_ENABLE0, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1,
+ 0, 0, NULL, 0),
+@@ -436,6 +437,9 @@ static const struct snd_soc_dapm_route adau1761_dapm_routes[] = {
+ { "Right Playback Mixer", NULL, "Slew Clock" },
+ { "Left Playback Mixer", NULL, "Slew Clock" },
+
++ { "Left Input Mixer", NULL, "ALC Clock" },
++ { "Right Input Mixer", NULL, "ALC Clock" },
++
+ { "Digital Clock 0", NULL, "SYSCLK" },
+ { "Digital Clock 1", NULL, "SYSCLK" },
+ };
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 64f179e..5e8626a 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -1121,6 +1121,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
+ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
+ {
+ struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
++ unsigned int pll_c, pll_d;
+ int ret;
+
+ if (power) {
+@@ -1138,6 +1139,18 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
+ /* Sync reg_cache with the hardware */
+ regcache_cache_only(aic3x->regmap, false);
+ regcache_sync(aic3x->regmap);
++
++ /* Rewrite paired PLL D registers in case cached sync skipped
++ * writing one of them and thus caused other one also not
++ * being written
++ */
++ pll_c = snd_soc_read(codec, AIC3X_PLL_PROGC_REG);
++ pll_d = snd_soc_read(codec, AIC3X_PLL_PROGD_REG);
++ if (pll_c == aic3x_reg[AIC3X_PLL_PROGC_REG].def ||
++ pll_d == aic3x_reg[AIC3X_PLL_PROGD_REG].def) {
++ snd_soc_write(codec, AIC3X_PLL_PROGC_REG, pll_c);
++ snd_soc_write(codec, AIC3X_PLL_PROGD_REG, pll_d);
++ }
+ } else {
+ /*
+ * Do soft reset to this codec instance in order to clear
+diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
+index 61bf6da..e895732 100644
+--- a/sound/soc/intel/sst-haswell-pcm.c
++++ b/sound/soc/intel/sst-haswell-pcm.c
+@@ -693,9 +693,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ }
+
+ #define HSW_FORMATS \
+- (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
+- SNDRV_PCM_FMTBIT_S8)
++ (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+
+ static struct snd_soc_dai_driver hsw_dais[] = {
+ {
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index d074aa9..a3e0a0d 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -4315,10 +4315,10 @@ void snd_soc_remove_platform(struct snd_soc_platform *platform)
+ snd_soc_component_del_unlocked(&platform->component);
+ mutex_unlock(&client_mutex);
+
+- snd_soc_component_cleanup(&platform->component);
+-
+ dev_dbg(platform->dev, "ASoC: Unregistered platform '%s'\n",
+ platform->component.name);
++
++ snd_soc_component_cleanup(&platform->component);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_remove_platform);
+
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 177bd86..7098e6b 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -591,9 +591,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ int shared;
+ struct snd_kcontrol *kcontrol;
+ bool wname_in_long_name, kcname_in_long_name;
+- char *long_name;
++ char *long_name = NULL;
+ const char *name;
+- int ret;
++ int ret = 0;
+
+ prefix = soc_dapm_prefix(dapm);
+ if (prefix)
+@@ -652,15 +652,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+
+ kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
+ prefix);
+- kfree(long_name);
+- if (!kcontrol)
+- return -ENOMEM;
++ if (!kcontrol) {
++ ret = -ENOMEM;
++ goto exit_free;
++ }
++
+ kcontrol->private_free = dapm_kcontrol_free;
+
+ ret = dapm_kcontrol_data_alloc(w, kcontrol);
+ if (ret) {
+ snd_ctl_free_one(kcontrol);
+- return ret;
++ goto exit_free;
+ }
+
+ ret = snd_ctl_add(card, kcontrol);
+@@ -668,17 +670,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
+ dev_err(dapm->dev,
+ "ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
+ w->name, name, ret);
+- return ret;
++ goto exit_free;
+ }
+ }
+
+ ret = dapm_kcontrol_add_widget(kcontrol, w);
+- if (ret)
+- return ret;
++ if (ret == 0)
++ w->kcontrols[kci] = kcontrol;
+
+- w->kcontrols[kci] = kcontrol;
++exit_free:
++ kfree(long_name);
+
+- return 0;
++ return ret;
+ }
+
+ /* create new dapm mixer control */
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 642c862..002311a 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -352,7 +352,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
+ } else {
+ for (i = 0; i < rtd->num_codecs; i++) {
+ codec_dai = rtd->codec_dais[i];
+- if (codec_dai->driver->playback.sig_bits == 0) {
++ if (codec_dai->driver->capture.sig_bits == 0) {
+ bits = 0;
+ break;
+ }
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 7ecd0e8..f61ebb1 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -591,18 +591,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ {
+ struct snd_card *card;
+ struct list_head *p;
++ bool was_shutdown;
+
+ if (chip == (void *)-1L)
+ return;
+
+ card = chip->card;
+ down_write(&chip->shutdown_rwsem);
++ was_shutdown = chip->shutdown;
+ chip->shutdown = 1;
+ up_write(&chip->shutdown_rwsem);
+
+ mutex_lock(&register_mutex);
+- chip->num_interfaces--;
+- if (chip->num_interfaces <= 0) {
++ if (!was_shutdown) {
+ struct snd_usb_endpoint *ep;
+
+ snd_card_disconnect(card);
+@@ -622,6 +623,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ list_for_each(p, &chip->mixer_list) {
+ snd_usb_mixer_disconnect(p);
+ }
++ }
++
++ chip->num_interfaces--;
++ if (chip->num_interfaces <= 0) {
+ usb_chip[chip->index] = NULL;
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 714b949..1f0dc1e 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ gfn_t base_gfn, unsigned long npages);
+
+ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+- unsigned long size)
++ unsigned long npages)
+ {
+ gfn_t end_gfn;
+ pfn_t pfn;
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+- end_gfn = gfn + (size >> PAGE_SHIFT);
++ end_gfn = gfn + npages;
+ gfn += 1;
+
+ if (is_error_noslot_pfn(pfn))
+@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
+ */
+- pfn = kvm_pin_pages(slot, gfn, page_size);
++ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
+ if (is_error_noslot_pfn(pfn)) {
+ gfn += 1;
+ continue;
+@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ if (r) {
+ printk(KERN_ERR "kvm_iommu_map_address:"
+ "iommu failed to map pfn=%llx\n", pfn);
+- kvm_unpin_pages(kvm, pfn, page_size);
++ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ goto unmap_pages;
+ }
+
diff --git a/3.17.2/4420_grsecurity-3.0-3.17.2-201411062034.patch b/3.17.3/4420_grsecurity-3.0-3.17.3-201411150027.patch
index 2da5648..d924e57 100644
--- a/3.17.2/4420_grsecurity-3.0-3.17.2-201411062034.patch
+++ b/3.17.3/4420_grsecurity-3.0-3.17.3-201411150027.patch
@@ -370,7 +370,7 @@ index 1edd5fd..107ff46 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 390afde..33153b5 100644
+index 57a45b1..62f9358 100644
--- a/Makefile
+++ b/Makefile
@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -4944,6 +4944,19 @@ index 3bf8f4e..5dd5491 100644
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
+diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
+index 6e0ed93..c17967f 100644
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
+ sub x1, x1, #2
+ 4: adds x1, x1, #1
+ b.mi 5f
+- strb wzr, [x0]
++USER(9f, strb wzr, [x0] )
+ 5: mov x0, #0
+ ret
+ ENDPROC(__clear_user)
diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
index c3a58a1..78fbf54 100644
--- a/arch/avr32/include/asm/cache.h
@@ -15347,7 +15360,7 @@ index f9e181a..db313b5 100644
err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4299eb0..fefe70e 100644
+index 92a2e93..cd4d95f 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -15,8 +15,10 @@
@@ -15425,7 +15438,7 @@ index 4299eb0..fefe70e 100644
movl %ebp,%ebp /* zero extension */
pushq_cfi $__USER32_DS
/*CFI_REL_OFFSET ss,0*/
-@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
+@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rsp,0
pushfq_cfi
/*CFI_REL_OFFSET rflags,0*/
@@ -15467,20 +15480,27 @@ index 4299eb0..fefe70e 100644
1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-+
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_CLOSE_USERLAND
+#endif
+
+ /*
+ * Sysenter doesn't filter flags, so we need to clear NT
+ * ourselves. To save a few cycles, we can check whether
+@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
+ jnz sysenter_fix_flags
+ sysenter_flags_fixed:
+
+- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ GET_THREAD_INFO(%r11)
+ orl $TS_COMPAT,TI_status(%r11)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,15 +209,18 @@ sysenter_do_call:
+@@ -172,15 +218,18 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -15503,7 +15523,7 @@ index 4299eb0..fefe70e 100644
CFI_REGISTER rip,rdx
RESTORE_ARGS 0,24,0,0,0,0
xorq %r8,%r8
-@@ -193,6 +243,9 @@ sysexit_from_sys_call:
+@@ -205,6 +254,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call __audit_syscall_entry
@@ -15513,7 +15533,7 @@ index 4299eb0..fefe70e 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -204,7 +257,7 @@ sysexit_from_sys_call:
+@@ -216,7 +268,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -15522,7 +15542,7 @@ index 4299eb0..fefe70e 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -215,11 +268,12 @@ sysexit_from_sys_call:
+@@ -227,11 +279,12 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
call __audit_syscall_exit
@@ -15536,7 +15556,7 @@ index 4299eb0..fefe70e 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -237,7 +291,7 @@ sysexit_audit:
+@@ -253,7 +306,7 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -15545,7 +15565,7 @@ index 4299eb0..fefe70e 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -249,6 +303,9 @@ sysenter_tracesys:
+@@ -265,6 +318,9 @@ sysenter_tracesys:
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
@@ -15555,7 +15575,7 @@ index 4299eb0..fefe70e 100644
jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target)
-@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
+@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -15583,7 +15603,7 @@ index 4299eb0..fefe70e 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
+@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -15611,7 +15631,7 @@ index 4299eb0..fefe70e 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -319,13 +395,16 @@ cstar_do_call:
+@@ -335,13 +410,16 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -15631,7 +15651,7 @@ index 4299eb0..fefe70e 100644
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d
-@@ -352,7 +431,7 @@ sysretl_audit:
+@@ -368,7 +446,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -15640,7 +15660,7 @@ index 4299eb0..fefe70e 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -366,11 +445,19 @@ cstar_tracesys:
+@@ -382,11 +460,19 @@ cstar_tracesys:
xchgl %ebp,%r9d
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
@@ -15660,7 +15680,7 @@ index 4299eb0..fefe70e 100644
movq $-EFAULT,%rax
jmp ia32_sysret
CFI_ENDPROC
-@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
+@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -15694,7 +15714,7 @@ index 4299eb0..fefe70e 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -442,6 +536,9 @@ ia32_tracesys:
+@@ -458,6 +551,9 @@ ia32_tracesys:
RESTORE_REST
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
@@ -17449,7 +17469,7 @@ index ced283a..ffe04cc 100644
union {
u64 v64;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index 1a055c8..1a5082a 100644
+index ca3347a..1a5082a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
@@ -17462,19 +17482,7 @@ index 1a055c8..1a5082a 100644
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
extern unsigned int vdso32_enabled;
#endif
-@@ -160,8 +157,9 @@ do { \
- #define elf_check_arch(x) \
- ((x)->e_machine == EM_X86_64)
-
--#define compat_elf_check_arch(x) \
-- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
-+#define compat_elf_check_arch(x) \
-+ (elf_check_arch_ia32(x) || \
-+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
-
- #if __USER32_DS != __USER_DS
- # error "The following code assumes __USER32_DS == __USER_DS"
-@@ -248,7 +246,25 @@ extern int force_personality32;
+@@ -249,7 +246,25 @@ extern int force_personality32;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -17500,7 +17508,7 @@ index 1a055c8..1a5082a 100644
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
-@@ -297,17 +313,13 @@ do { \
+@@ -298,17 +313,13 @@ do { \
#define ARCH_DLINFO \
do { \
@@ -17520,7 +17528,7 @@ index 1a055c8..1a5082a 100644
} while (0)
#define AT_SYSINFO 32
-@@ -322,10 +334,10 @@ else \
+@@ -323,10 +334,10 @@ else \
#endif /* !CONFIG_X86_32 */
@@ -17533,7 +17541,7 @@ index 1a055c8..1a5082a 100644
selected_vdso32->sym___kernel_vsyscall)
struct linux_binprm;
-@@ -337,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
@@ -17807,40 +17815,6 @@ index 53cdfb2..d1369e6 100644
#define flush_insn_slot(p) do { } while (0)
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 92d3486..0d47ae1 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
- kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
- }
-
-+static inline u64 get_canonical(u64 la)
-+{
-+ return ((int64_t)la << 16) >> 16;
-+}
-+
-+static inline bool is_noncanonical_address(u64 la)
-+{
-+#ifdef CONFIG_X86_64
-+ return get_canonical(la) != la;
-+#else
-+ return false;
-+#endif
-+}
-+
- #define TSS_IOPB_BASE_OFFSET 0x66
- #define TSS_BASE_SIZE 0x68
- #define TSS_IOPB_SIZE (65536 / 8)
-@@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
- void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
-
- void kvm_define_shared_msr(unsigned index, u32 msr);
--void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
-+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
-
- bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
-
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 4ad6560..75c7bdd 100644
--- a/arch/x86/include/asm/local.h
@@ -21324,26 +21298,6 @@ index 7b0a55a..ad115bf 100644
#endif /* __ASSEMBLY__ */
/* top of stack page */
-diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
-index 0e79420..990a2fe 100644
---- a/arch/x86/include/uapi/asm/vmx.h
-+++ b/arch/x86/include/uapi/asm/vmx.h
-@@ -67,6 +67,7 @@
- #define EXIT_REASON_EPT_MISCONFIG 49
- #define EXIT_REASON_INVEPT 50
- #define EXIT_REASON_PREEMPTION_TIMER 52
-+#define EXIT_REASON_INVVPID 53
- #define EXIT_REASON_WBINVD 54
- #define EXIT_REASON_XSETBV 55
- #define EXIT_REASON_APIC_WRITE 56
-@@ -114,6 +115,7 @@
- { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
- { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
- { EXIT_REASON_INVD, "INVD" }, \
-+ { EXIT_REASON_INVVPID, "INVVPID" }, \
- { EXIT_REASON_INVPCID, "INVPCID" }
-
- #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index ada2e2d..ca69e16 100644
--- a/arch/x86/kernel/Makefile
@@ -21358,10 +21312,10 @@ index ada2e2d..ca69e16 100644
obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index b436fc7..1ba7044 100644
+index a142e77..6222cdd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1272,7 +1272,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+@@ -1276,7 +1276,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
*/
@@ -21370,7 +21324,7 @@ index b436fc7..1ba7044 100644
/*
* Boxes that need ACPI disabled
*/
-@@ -1347,7 +1347,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+@@ -1351,7 +1351,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
};
/* second table for DMI checks that should run after early-quirks */
@@ -21584,7 +21538,7 @@ index 703130f..27a155d 100644
bp_int3_handler = handler;
bp_int3_addr = (u8 *)addr + sizeof(int3);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index 6776027..972266c 100644
+index 24b5894..6d9701b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
@@ -21910,7 +21864,7 @@ index 60e5497..8efbd2f 100644
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index e4ab2b4..d487ba5 100644
+index 3126558..a1028f6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
@@ -27971,7 +27925,7 @@ index 5cdff03..80fa283 100644
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 2851d63..83bf567 100644
+index ed37a76..39f936e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
@@ -28759,7 +28713,7 @@ index 0d0e922..0886373 100644
if (!fixup_exception(regs)) {
task->thread.error_code = error_code;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index b6025f9..0cc6a1d 100644
+index b7e50bb..f4a93ae 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
@@ -29233,7 +29187,7 @@ index e48b674..a451dd9 100644
.read = native_io_apic_read,
.write = native_io_apic_write,
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index 940b142..0ad3a10 100644
+index 4c540c4..0b985b0 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
@@ -29275,7 +29229,7 @@ index 940b142..0ad3a10 100644
if (use_xsave())
err = xsave_user(buf);
else if (use_fxsr())
-@@ -314,6 +315,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
*/
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
{
@@ -29334,545 +29288,6 @@ index 38a0afe..94421a9 100644
return 0;
out:
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 03954f7..0f4ad73 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
- masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
- }
-
--static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
--{
-- register_address_increment(ctxt, &ctxt->_eip, rel);
--}
--
- static u32 desc_limit_scaled(struct desc_struct *desc)
- {
- u32 limit = get_desc_limit(desc);
-@@ -568,6 +563,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
- return emulate_exception(ctxt, NM_VECTOR, 0, false);
- }
-
-+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
-+ int cs_l)
-+{
-+ switch (ctxt->op_bytes) {
-+ case 2:
-+ ctxt->_eip = (u16)dst;
-+ break;
-+ case 4:
-+ ctxt->_eip = (u32)dst;
-+ break;
-+#ifdef CONFIG_X86_64
-+ case 8:
-+ if ((cs_l && is_noncanonical_address(dst)) ||
-+ (!cs_l && (dst >> 32) != 0))
-+ return emulate_gp(ctxt, 0);
-+ ctxt->_eip = dst;
-+ break;
-+#endif
-+ default:
-+ WARN(1, "unsupported eip assignment size\n");
-+ }
-+ return X86EMUL_CONTINUE;
-+}
-+
-+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
-+{
-+ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
-+}
-+
-+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
-+{
-+ return assign_eip_near(ctxt, ctxt->_eip + rel);
-+}
-+
- static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
- {
- u16 selector;
-@@ -750,8 +779,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
- static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
- unsigned size)
- {
-- if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
-- return __do_insn_fetch_bytes(ctxt, size);
-+ unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
-+
-+ if (unlikely(done_size < size))
-+ return __do_insn_fetch_bytes(ctxt, size - done_size);
- else
- return X86EMUL_CONTINUE;
- }
-@@ -1415,7 +1446,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
-
- /* Does not support long mode */
- static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
-- u16 selector, int seg, u8 cpl, bool in_task_switch)
-+ u16 selector, int seg, u8 cpl,
-+ bool in_task_switch,
-+ struct desc_struct *desc)
- {
- struct desc_struct seg_desc, old_desc;
- u8 dpl, rpl;
-@@ -1547,6 +1580,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- }
- load:
- ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
-+ if (desc)
-+ *desc = seg_desc;
- return X86EMUL_CONTINUE;
- exception:
- emulate_exception(ctxt, err_vec, err_code, true);
-@@ -1557,7 +1592,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- u16 selector, int seg)
- {
- u8 cpl = ctxt->ops->cpl(ctxt);
-- return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
-+ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
- }
-
- static void write_register_operand(struct operand *op)
-@@ -1951,17 +1986,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
- static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
- {
- int rc;
-- unsigned short sel;
-+ unsigned short sel, old_sel;
-+ struct desc_struct old_desc, new_desc;
-+ const struct x86_emulate_ops *ops = ctxt->ops;
-+ u8 cpl = ctxt->ops->cpl(ctxt);
-+
-+ /* Assignment of RIP may only fail in 64-bit mode */
-+ if (ctxt->mode == X86EMUL_MODE_PROT64)
-+ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
-+ VCPU_SREG_CS);
-
- memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
-
-- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
-+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
-+ &new_desc);
- if (rc != X86EMUL_CONTINUE)
- return rc;
-
-- ctxt->_eip = 0;
-- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
-- return X86EMUL_CONTINUE;
-+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
-+ if (rc != X86EMUL_CONTINUE) {
-+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
-+ /* assigning eip failed; restore the old cs */
-+ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
-+ return rc;
-+ }
-+ return rc;
- }
-
- static int em_grp45(struct x86_emulate_ctxt *ctxt)
-@@ -1972,13 +2021,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
- case 2: /* call near abs */ {
- long int old_eip;
- old_eip = ctxt->_eip;
-- ctxt->_eip = ctxt->src.val;
-+ rc = assign_eip_near(ctxt, ctxt->src.val);
-+ if (rc != X86EMUL_CONTINUE)
-+ break;
- ctxt->src.val = old_eip;
- rc = em_push(ctxt);
- break;
- }
- case 4: /* jmp abs */
-- ctxt->_eip = ctxt->src.val;
-+ rc = assign_eip_near(ctxt, ctxt->src.val);
- break;
- case 5: /* jmp far */
- rc = em_jmp_far(ctxt);
-@@ -2013,30 +2064,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
-
- static int em_ret(struct x86_emulate_ctxt *ctxt)
- {
-- ctxt->dst.type = OP_REG;
-- ctxt->dst.addr.reg = &ctxt->_eip;
-- ctxt->dst.bytes = ctxt->op_bytes;
-- return em_pop(ctxt);
-+ int rc;
-+ unsigned long eip;
-+
-+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
-+ if (rc != X86EMUL_CONTINUE)
-+ return rc;
-+
-+ return assign_eip_near(ctxt, eip);
- }
-
- static int em_ret_far(struct x86_emulate_ctxt *ctxt)
- {
- int rc;
-- unsigned long cs;
-+ unsigned long eip, cs;
-+ u16 old_cs;
- int cpl = ctxt->ops->cpl(ctxt);
-+ struct desc_struct old_desc, new_desc;
-+ const struct x86_emulate_ops *ops = ctxt->ops;
-
-- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
-+ if (ctxt->mode == X86EMUL_MODE_PROT64)
-+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
-+ VCPU_SREG_CS);
-+
-+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- return rc;
-- if (ctxt->op_bytes == 4)
-- ctxt->_eip = (u32)ctxt->_eip;
- rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
- if (rc != X86EMUL_CONTINUE)
- return rc;
- /* Outer-privilege level return is not implemented */
- if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
- return X86EMUL_UNHANDLEABLE;
-- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
-+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
-+ &new_desc);
-+ if (rc != X86EMUL_CONTINUE)
-+ return rc;
-+ rc = assign_eip_far(ctxt, eip, new_desc.l);
-+ if (rc != X86EMUL_CONTINUE) {
-+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
-+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
-+ }
- return rc;
- }
-
-@@ -2297,7 +2365,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
- {
- const struct x86_emulate_ops *ops = ctxt->ops;
- struct desc_struct cs, ss;
-- u64 msr_data;
-+ u64 msr_data, rcx, rdx;
- int usermode;
- u16 cs_sel = 0, ss_sel = 0;
-
-@@ -2313,6 +2381,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
- else
- usermode = X86EMUL_MODE_PROT32;
-
-+ rcx = reg_read(ctxt, VCPU_REGS_RCX);
-+ rdx = reg_read(ctxt, VCPU_REGS_RDX);
-+
- cs.dpl = 3;
- ss.dpl = 3;
- ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
-@@ -2330,6 +2401,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
- ss_sel = cs_sel + 8;
- cs.d = 0;
- cs.l = 1;
-+ if (is_noncanonical_address(rcx) ||
-+ is_noncanonical_address(rdx))
-+ return emulate_gp(ctxt, 0);
- break;
- }
- cs_sel |= SELECTOR_RPL_MASK;
-@@ -2338,8 +2412,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
- ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
- ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
-
-- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
-- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
-+ ctxt->_eip = rdx;
-+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
-
- return X86EMUL_CONTINUE;
- }
-@@ -2457,19 +2531,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
- * Now load segment descriptors. If fault happens at this stage
- * it is handled in a context of new task
- */
-- ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-
-@@ -2594,25 +2673,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
- * Now load segment descriptors. If fault happenes at this stage
- * it is handled in a context of new task
- */
-- ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
-+ cpl, true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-- ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
-+ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
-+ true, NULL);
- if (ret != X86EMUL_CONTINUE)
- return ret;
-
-@@ -2880,10 +2966,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
-
- static int em_call(struct x86_emulate_ctxt *ctxt)
- {
-+ int rc;
- long rel = ctxt->src.val;
-
- ctxt->src.val = (unsigned long)ctxt->_eip;
-- jmp_rel(ctxt, rel);
-+ rc = jmp_rel(ctxt, rel);
-+ if (rc != X86EMUL_CONTINUE)
-+ return rc;
- return em_push(ctxt);
- }
-
-@@ -2892,34 +2981,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
- u16 sel, old_cs;
- ulong old_eip;
- int rc;
-+ struct desc_struct old_desc, new_desc;
-+ const struct x86_emulate_ops *ops = ctxt->ops;
-+ int cpl = ctxt->ops->cpl(ctxt);
-
-- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
- old_eip = ctxt->_eip;
-+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
-
- memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
-- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
-+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
-+ &new_desc);
-+ if (rc != X86EMUL_CONTINUE)
- return X86EMUL_CONTINUE;
-
-- ctxt->_eip = 0;
-- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
-+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
-+ if (rc != X86EMUL_CONTINUE)
-+ goto fail;
-
- ctxt->src.val = old_cs;
- rc = em_push(ctxt);
- if (rc != X86EMUL_CONTINUE)
-- return rc;
-+ goto fail;
-
- ctxt->src.val = old_eip;
-- return em_push(ctxt);
-+ rc = em_push(ctxt);
-+ /* If we failed, we tainted the memory, but the very least we should
-+ restore cs */
-+ if (rc != X86EMUL_CONTINUE)
-+ goto fail;
-+ return rc;
-+fail:
-+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
-+ return rc;
-+
- }
-
- static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
- {
- int rc;
-+ unsigned long eip;
-
-- ctxt->dst.type = OP_REG;
-- ctxt->dst.addr.reg = &ctxt->_eip;
-- ctxt->dst.bytes = ctxt->op_bytes;
-- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
-+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
-+ if (rc != X86EMUL_CONTINUE)
-+ return rc;
-+ rc = assign_eip_near(ctxt, eip);
- if (rc != X86EMUL_CONTINUE)
- return rc;
- rsp_increment(ctxt, ctxt->src.val);
-@@ -3250,20 +3355,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
-
- static int em_loop(struct x86_emulate_ctxt *ctxt)
- {
-+ int rc = X86EMUL_CONTINUE;
-+
- register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
- if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
- (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
-- jmp_rel(ctxt, ctxt->src.val);
-+ rc = jmp_rel(ctxt, ctxt->src.val);
-
-- return X86EMUL_CONTINUE;
-+ return rc;
- }
-
- static int em_jcxz(struct x86_emulate_ctxt *ctxt)
- {
-+ int rc = X86EMUL_CONTINUE;
-+
- if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
-- jmp_rel(ctxt, ctxt->src.val);
-+ rc = jmp_rel(ctxt, ctxt->src.val);
-
-- return X86EMUL_CONTINUE;
-+ return rc;
- }
-
- static int em_in(struct x86_emulate_ctxt *ctxt)
-@@ -3351,6 +3460,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
- return X86EMUL_CONTINUE;
- }
-
-+static int em_clflush(struct x86_emulate_ctxt *ctxt)
-+{
-+ /* emulating clflush regardless of cpuid */
-+ return X86EMUL_CONTINUE;
-+}
-+
- static bool valid_cr(int nr)
- {
- switch (nr) {
-@@ -3683,6 +3798,16 @@ static const struct opcode group11[] = {
- X7(D(Undefined)),
- };
-
-+static const struct gprefix pfx_0f_ae_7 = {
-+ I(SrcMem | ByteOp, em_clflush), N, N, N,
-+};
-+
-+static const struct group_dual group15 = { {
-+ N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
-+}, {
-+ N, N, N, N, N, N, N, N,
-+} };
-+
- static const struct gprefix pfx_0f_6f_0f_7f = {
- I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
- };
-@@ -3887,10 +4012,11 @@ static const struct opcode twobyte_table[256] = {
- N, I(ImplicitOps | EmulateOnUD, em_syscall),
- II(ImplicitOps | Priv, em_clts, clts), N,
- DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
-- N, D(ImplicitOps | ModRM), N, N,
-+ N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
- /* 0x10 - 0x1F */
- N, N, N, N, N, N, N, N,
-- D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
-+ D(ImplicitOps | ModRM | SrcMem | NoAccess),
-+ N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
- /* 0x20 - 0x2F */
- DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
- DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
-@@ -3942,7 +4068,7 @@ static const struct opcode twobyte_table[256] = {
- F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
- F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
- F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
-- D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
-+ GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
- /* 0xB0 - 0xB7 */
- I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
- I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
-@@ -4458,10 +4584,10 @@ done_prefixes:
- /* Decode and fetch the destination operand: register or memory. */
- rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
-
--done:
- if (ctxt->rip_relative)
- ctxt->memopp->addr.mem.ea += ctxt->_eip;
-
-+done:
- return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
- }
-
-@@ -4711,7 +4837,7 @@ special_insn:
- break;
- case 0x70 ... 0x7f: /* jcc (short) */
- if (test_cc(ctxt->b, ctxt->eflags))
-- jmp_rel(ctxt, ctxt->src.val);
-+ rc = jmp_rel(ctxt, ctxt->src.val);
- break;
- case 0x8d: /* lea r16/r32, m */
- ctxt->dst.val = ctxt->src.addr.mem.ea;
-@@ -4741,7 +4867,7 @@ special_insn:
- break;
- case 0xe9: /* jmp rel */
- case 0xeb: /* jmp rel short */
-- jmp_rel(ctxt, ctxt->src.val);
-+ rc = jmp_rel(ctxt, ctxt->src.val);
- ctxt->dst.type = OP_NONE; /* Disable writeback. */
- break;
- case 0xf4: /* hlt */
-@@ -4864,13 +4990,11 @@ twobyte_insn:
- break;
- case 0x80 ... 0x8f: /* jnz rel, etc*/
- if (test_cc(ctxt->b, ctxt->eflags))
-- jmp_rel(ctxt, ctxt->src.val);
-+ rc = jmp_rel(ctxt, ctxt->src.val);
- break;
- case 0x90 ... 0x9f: /* setcc r/m8 */
- ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
- break;
-- case 0xae: /* clflush */
-- break;
- case 0xb6 ... 0xb7: /* movzx */
- ctxt->dst.bytes = ctxt->op_bytes;
- ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
-diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
-index 518d864..298781d 100644
---- a/arch/x86/kvm/i8254.c
-+++ b/arch/x86/kvm/i8254.c
-@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
- return;
-
- timer = &pit->pit_state.timer;
-+ mutex_lock(&pit->pit_state.lock);
- if (hrtimer_cancel(timer))
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
-+ mutex_unlock(&pit->pit_state.lock);
- }
-
- static void destroy_pit_timer(struct kvm_pit *pit)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 08e8a89..0e9183e 100644
--- a/arch/x86/kvm/lapic.c
@@ -29900,31 +29315,9 @@ index 4107765..d9eb358 100644
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index ddf7427..fd84599 100644
+index 78dadc3..fd84599 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
-@@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
- msr.host_initiated = false;
-
- svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-- if (svm_set_msr(&svm->vcpu, &msr)) {
-+ if (kvm_set_msr(&svm->vcpu, &msr)) {
- trace_kvm_msr_write_ex(ecx, data);
- kvm_inject_gp(&svm->vcpu, 0);
- } else {
-@@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
-
- if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
- || !svm_exit_handlers[exit_code]) {
-- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-- kvm_run->hw.hardware_exit_reason = exit_code;
-- return 0;
-+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
-+ kvm_queue_exception(vcpu, UD_VECTOR);
-+ return 1;
- }
-
- return svm_exit_handlers[exit_code](svm);
@@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
int cpu = raw_smp_processor_id();
@@ -29949,7 +29342,7 @@ index ddf7427..fd84599 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 6a118fa..c0b3c00 100644
+index 41a5426..c0b3c00 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
@@ -29999,25 +29392,7 @@ index 6a118fa..c0b3c00 100644
{
u64 host_tsc, tsc_offset;
-@@ -2632,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
- default:
- msr = find_msr_entry(vmx, msr_index);
- if (msr) {
-+ u64 old_msr_data = msr->data;
- msr->data = data;
- if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
- preempt_disable();
-- kvm_set_shared_msr(msr->index, msr->data,
-- msr->mask);
-+ ret = kvm_set_shared_msr(msr->index, msr->data,
-+ msr->mask);
- preempt_enable();
-+ if (ret)
-+ msr->data = old_msr_data;
- }
- break;
- }
-@@ -3111,8 +3122,11 @@ static __init int hardware_setup(void)
+@@ -3114,8 +3122,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -30031,7 +29406,7 @@ index 6a118fa..c0b3c00 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3123,13 +3137,15 @@ static __init int hardware_setup(void)
+@@ -3126,13 +3137,15 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -30051,7 +29426,7 @@ index 6a118fa..c0b3c00 100644
if (nested)
nested_vmx_setup_ctls_msrs();
-@@ -4239,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4242,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
@@ -30062,7 +29437,7 @@ index 6a118fa..c0b3c00 100644
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = read_cr4();
-@@ -4266,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4269,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -30071,60 +29446,7 @@ index 6a118fa..c0b3c00 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -5263,7 +5282,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
- msr.data = data;
- msr.index = ecx;
- msr.host_initiated = false;
-- if (vmx_set_msr(vcpu, &msr) != 0) {
-+ if (kvm_set_msr(vcpu, &msr) != 0) {
- trace_kvm_msr_write_ex(ecx, data);
- kvm_inject_gp(vcpu, 0);
- return 1;
-@@ -6636,6 +6655,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
- return 1;
- }
-
-+static int handle_invvpid(struct kvm_vcpu *vcpu)
-+{
-+ kvm_queue_exception(vcpu, UD_VECTOR);
-+ return 1;
-+}
-+
- /*
- * The exit handlers return 1 if the exit was handled fully and guest execution
- * may resume. Otherwise they set the kvm_run parameter to indicate what needs
-@@ -6681,6 +6706,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
- [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
- [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
- [EXIT_REASON_INVEPT] = handle_invept,
-+ [EXIT_REASON_INVVPID] = handle_invvpid,
- };
-
- static const int kvm_vmx_max_exit_handlers =
-@@ -6914,7 +6940,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
- case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
- case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
- case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
-- case EXIT_REASON_INVEPT:
-+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
- /*
- * VMX instructions trap unconditionally. This allows L1 to
- * emulate them for its L2 guest, i.e., allows 3-level nesting!
-@@ -7055,10 +7081,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
- && kvm_vmx_exit_handlers[exit_reason])
- return kvm_vmx_exit_handlers[exit_reason](vcpu);
- else {
-- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
-- vcpu->run->hw.hardware_exit_reason = exit_reason;
-+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
-+ kvm_queue_exception(vcpu, UD_VECTOR);
-+ return 1;
- }
-- return 0;
- }
-
- static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
-@@ -7465,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7475,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -30137,7 +29459,7 @@ index 6a118fa..c0b3c00 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -7517,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7527,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -30149,7 +29471,7 @@ index 6a118fa..c0b3c00 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -7530,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7540,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -30158,7 +29480,7 @@ index 6a118fa..c0b3c00 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -7539,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7549,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -30180,82 +29502,10 @@ index 6a118fa..c0b3c00 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 8f1e22d..c23d3c5 100644
+index 9d292e8..cea125a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
- shared_msr_update(i, shared_msrs_global.msrs[i]);
- }
-
--void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
-+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
- {
- unsigned int cpu = smp_processor_id();
- struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
-+ int err;
-
- if (((value ^ smsr->values[slot].curr) & mask) == 0)
-- return;
-+ return 0;
- smsr->values[slot].curr = value;
-- wrmsrl(shared_msrs_global.msrs[slot], value);
-+ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
-+ if (err)
-+ return 1;
-+
- if (!smsr->registered) {
- smsr->urn.on_user_return = kvm_on_user_return;
- user_return_notifier_register(&smsr->urn);
- smsr->registered = true;
- }
-+ return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
-
-@@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
- }
- EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
-
--
- /*
- * Writes msr value into into the appropriate "register".
- * Returns 0 on success, non-0 otherwise.
-@@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
- */
- int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
- {
-+ switch (msr->index) {
-+ case MSR_FS_BASE:
-+ case MSR_GS_BASE:
-+ case MSR_KERNEL_GS_BASE:
-+ case MSR_CSTAR:
-+ case MSR_LSTAR:
-+ if (is_noncanonical_address(msr->data))
-+ return 1;
-+ break;
-+ case MSR_IA32_SYSENTER_EIP:
-+ case MSR_IA32_SYSENTER_ESP:
-+ /*
-+ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
-+ * non-canonical address is written on Intel but not on
-+ * AMD (which ignores the top 32-bits, because it does
-+ * not implement 64-bit SYSENTER).
-+ *
-+ * 64-bit code should hence be able to write a non-canonical
-+ * value on AMD. Making the address canonical ensures that
-+ * vmentry does not fail on Intel after writing a non-canonical
-+ * value, and that something deterministic happens if the guest
-+ * invokes 64-bit SYSENTER.
-+ */
-+ msr->data = get_canonical(msr->data);
-+ }
- return kvm_x86_ops->set_msr(vcpu, msr);
- }
-+EXPORT_SYMBOL_GPL(kvm_set_msr);
-
- /*
- * Adapt set_msr() to msr_io()'s calling convention
-@@ -1827,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1857,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -30266,7 +29516,7 @@ index 8f1e22d..c23d3c5 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2749,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2779,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -30275,7 +29525,16 @@ index 8f1e22d..c23d3c5 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5609,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -5002,7 +5004,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+
+ ++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
+- if (!is_guest_mode(vcpu)) {
++ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+@@ -5639,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -34869,7 +34128,7 @@ index a32b706..efb308b 100644
unsigned long uninitialized_var(pfn_align);
int i, nid;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index ae242a7..1c7998f 100644
+index 36de293..b820ddc 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
@@ -35567,7 +34826,7 @@ index 6440221..f84b5c7 100644
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index 5c8cb80..5fd7860 100644
+index c881ba8..71aca2e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,7 +15,11 @@
@@ -35630,7 +34889,7 @@ index 5c8cb80..5fd7860 100644
return header;
}
-@@ -853,7 +853,9 @@ common_load: ctx->seen_ld_abs = true;
+@@ -864,7 +864,9 @@ common_load:
pr_err("bpf_jit_compile fatal error\n");
return -EFAULT;
}
@@ -35640,7 +34899,7 @@ index 5c8cb80..5fd7860 100644
}
proglen += ilen;
addrs[i] = proglen;
-@@ -868,7 +870,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
+@@ -879,7 +881,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
void bpf_int_jit_compile(struct bpf_prog *prog)
{
@@ -35649,7 +34908,7 @@ index 5c8cb80..5fd7860 100644
int proglen, oldproglen = 0;
struct jit_context ctx = {};
u8 *image = NULL;
-@@ -900,7 +902,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+@@ -911,7 +913,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (proglen <= 0) {
image = NULL;
if (header)
@@ -35658,7 +34917,7 @@ index 5c8cb80..5fd7860 100644
goto out;
}
if (image) {
-@@ -922,7 +924,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+@@ -935,7 +937,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (image) {
bpf_flush_icache(header, image + proglen);
@@ -35666,7 +34925,7 @@ index 5c8cb80..5fd7860 100644
prog->bpf_func = (void *)image;
prog->jited = 1;
}
-@@ -930,23 +931,15 @@ out:
+@@ -943,23 +944,15 @@ out:
kfree(addrs);
}
@@ -37648,7 +36907,7 @@ index 56d08fd..2e07090 100644
(u8 *) pte, count) < count) {
kfree(pte);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index 9b8eaec..c20279a 100644
+index a6d6270..c4bb72f 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
@@ -39657,7 +38916,7 @@ index 1a00001..c0d4253 100644
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
-index 89c497c..9c736ae 100644
+index 04a14e0..5b8f0aa 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
@@ -40530,7 +39789,7 @@ index 0ea9986..e7b07e4 100644
if (cmd != SIOCWANDEV)
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index c18d41d..7c499f3 100644
+index 8c86a95..7c499f3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -289,9 +289,6 @@
@@ -40565,33 +39824,6 @@ index c18d41d..7c499f3 100644
unsigned int add =
((pool_size - entropy_count)*anfrac*3) >> s;
-@@ -1106,7 +1103,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
-
-- memset(workspace, 0, sizeof(workspace));
-+ memzero_explicit(workspace, sizeof(workspace));
-
- /*
- * In case the hash function has some recognizable output
-@@ -1118,7 +1115,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
- hash.w[2] ^= rol32(hash.w[2], 16);
-
- memcpy(out, &hash, EXTRACT_SIZE);
-- memset(&hash, 0, sizeof(hash));
-+ memzero_explicit(&hash, sizeof(hash));
- }
-
- /*
-@@ -1175,7 +1172,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- }
-
- /* Wipe data just returned from memory */
-- memset(tmp, 0, sizeof(tmp));
-+ memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
- }
@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
@@ -40601,15 +39833,6 @@ index c18d41d..7c499f3 100644
ret = -EFAULT;
break;
}
-@@ -1218,7 +1215,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
- }
-
- /* Wipe data just returned from memory */
-- memset(tmp, 0, sizeof(tmp));
-+ memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
- }
@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
static int proc_do_uuid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -40866,10 +40089,10 @@ index b0c18ed..1713a80 100644
cpu_notifier_register_begin();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 61190f6..fcd899a 100644
+index c05821e..373651a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -2095,7 +2095,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+@@ -2106,7 +2106,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
}
mutex_lock(&cpufreq_governor_mutex);
@@ -40878,7 +40101,7 @@ index 61190f6..fcd899a 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2311,7 +2311,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+@@ -2322,7 +2322,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -40887,7 +40110,7 @@ index 61190f6..fcd899a 100644
.notifier_call = cpufreq_cpu_callback,
};
-@@ -2351,13 +2351,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2362,13 +2362,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -40907,7 +40130,7 @@ index 61190f6..fcd899a 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
-@@ -2414,8 +2418,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2425,8 +2429,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -40921,7 +40144,7 @@ index 61190f6..fcd899a 100644
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
-@@ -2430,8 +2437,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2441,8 +2448,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
@@ -41023,10 +40246,10 @@ index ad3f38f..8f086cd 100644
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index 0668b38..2f3ea18 100644
+index 27bb6d3..4cf595c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
-@@ -120,10 +120,10 @@ struct pstate_funcs {
+@@ -133,10 +133,10 @@ struct pstate_funcs {
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -41039,7 +40262,7 @@ index 0668b38..2f3ea18 100644
struct perf_limits {
int no_turbo;
-@@ -527,17 +527,17 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+@@ -594,18 +594,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
cpu->pstate.current_pstate = pstate;
@@ -41052,9 +40275,11 @@ index 0668b38..2f3ea18 100644
- cpu->pstate.min_pstate = pstate_funcs.get_min();
- cpu->pstate.max_pstate = pstate_funcs.get_max();
- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+- cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.min_pstate = pstate_funcs->get_min();
+ cpu->pstate.max_pstate = pstate_funcs->get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
++ cpu->pstate.scaling = pstate_funcs->get_scaling();
- if (pstate_funcs.get_vid)
- pstate_funcs.get_vid(cpu);
@@ -41063,7 +40288,7 @@ index 0668b38..2f3ea18 100644
intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
-@@ -810,9 +810,9 @@ static int intel_pstate_msrs_not_valid(void)
+@@ -875,9 +875,9 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -41076,7 +40301,7 @@ index 0668b38..2f3ea18 100644
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
-@@ -826,7 +826,7 @@ static int intel_pstate_msrs_not_valid(void)
+@@ -891,7 +891,7 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
@@ -41085,13 +40310,14 @@ index 0668b38..2f3ea18 100644
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.p_gain_pct = policy->p_gain_pct;
-@@ -838,11 +838,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
+@@ -903,12 +903,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
static void copy_cpu_funcs(struct pstate_funcs *funcs)
{
- pstate_funcs.get_max = funcs->get_max;
- pstate_funcs.get_min = funcs->get_min;
- pstate_funcs.get_turbo = funcs->get_turbo;
+- pstate_funcs.get_scaling = funcs->get_scaling;
- pstate_funcs.set = funcs->set;
- pstate_funcs.get_vid = funcs->get_vid;
+ pstate_funcs = funcs;
@@ -41575,6 +40801,20 @@ index 57ea7f4..af06b76 100644
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 5d997a3..2a3973a 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
+ _IOC_SIZE(cmd) > sizeof(buffer))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) == _IOC_READ)
+- memset(&buffer, 0, _IOC_SIZE(cmd));
++ memset(&buffer, 0, sizeof(buffer));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 2c6d5e1..a2cca6b 100644
--- a/drivers/firewire/core-device.c
@@ -42153,7 +41393,7 @@ index 2e0613e..a8b94d9 100644
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index d8324c6..fc9b704 100644
+index b71a026..8b6cc10 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12437,13 +12437,13 @@ struct intel_quirk {
@@ -42750,7 +41990,7 @@ index 4a85bb6..aaea819 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 12c8329..a69e2e8 100644
+index 6684fbf..2e11bf0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1213,7 +1213,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
@@ -47221,6 +46461,20 @@ index 6b0b8b6b..4038398 100644
return 1;
}
+diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+index 5c45c9d..9c29552 100644
+--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
++++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
++ if (cmd->msg_len > sizeof(b) - 4)
++ return -EINVAL;
++
+ memcpy(&b[4], cmd->msg, cmd->msg_len);
+
+ state->config->send_command(fe, 0x72,
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index cca6c2f..77b9a18 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -48192,7 +47446,7 @@ index ccec0e3..199f9ce 100644
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
-index fa5954a..56840e5 100644
+index 1e47903..7683916 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
@@ -49124,7 +48378,7 @@ index 8cffcdf..aadf043 100644
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
-index e5be511..16cb55c 100644
+index fac3821..52c1a04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2355,7 +2355,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -49393,18 +48647,6 @@ index d5e07de..e3bf20a 100644
spinlock_t request_lock;
struct list_head req_list;
-diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
-index 0fcb5e7..148fda3 100644
---- a/drivers/net/hyperv/netvsc_drv.c
-+++ b/drivers/net/hyperv/netvsc_drv.c
-@@ -556,6 +556,7 @@ do_lso:
- do_send:
- /* Start filling in the page buffers with the rndis hdr */
- rndis_msg->msg_len += rndis_msg_size;
-+ packet->total_data_buflen = rndis_msg->msg_len;
- packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
- skb, &packet->page_buf[0]);
-
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2b86f0b..ecc996f 100644
--- a/drivers/net/hyperv/rndis_filter.c
@@ -49441,7 +48683,7 @@ index 9ce854f..e43fa17 100644
priv = netdev_priv(dev);
priv->phy = phy;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
-index 726edab..8939092 100644
+index 5f17ad0..e0463c8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
@@ -49453,7 +48695,7 @@ index 726edab..8939092 100644
}
/* called under rcu_read_lock() from netif_receive_skb */
-@@ -1144,13 +1144,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
+@@ -1150,13 +1150,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
@@ -49476,7 +48718,7 @@ index 726edab..8939092 100644
return rtnl_link_register(ops);
};
-@@ -1230,7 +1232,7 @@ static int macvlan_device_event(struct notifier_block *unused,
+@@ -1236,7 +1238,7 @@ static int macvlan_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -49486,10 +48728,10 @@ index 726edab..8939092 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 0c6adaa..0784e3f 100644
+index 9b5481c..eb32d45 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
-@@ -1018,7 +1018,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+@@ -1021,7 +1021,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
}
ret = 0;
@@ -49498,7 +48740,7 @@ index 0c6adaa..0784e3f 100644
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
macvtap_put_vlan(vlan);
-@@ -1188,7 +1188,7 @@ static int macvtap_device_event(struct notifier_block *unused,
+@@ -1191,7 +1191,7 @@ static int macvtap_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -49508,18 +48750,9 @@ index 0c6adaa..0784e3f 100644
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index fa0d717..bab8c01 100644
+index 90c639b..bab8c01 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
-@@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
- }
-- if (atomic_long_read(&file->f_count) <= 2) {
-+ if (atomic_long_read(&file->f_count) < 2) {
- ppp_release(NULL, file);
- err = 0;
- } else
@@ -1020,7 +1020,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
struct ppp_stats stats;
@@ -49565,10 +48798,10 @@ index 1f76c2ea..9681171 100644
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index acaaf67..a33483d 100644
+index 610d166..a3cc744 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
-@@ -1855,7 +1855,7 @@ unlock:
+@@ -1859,7 +1859,7 @@ unlock:
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
@@ -49577,7 +48810,7 @@ index acaaf67..a33483d 100644
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
-@@ -1868,6 +1868,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+@@ -1872,6 +1872,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned int ifindex;
int ret;
@@ -49727,59 +48960,108 @@ index 59caa06..de191b3 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
-index beb377b..b5bbf08 100644
+index b483127..69aa8ff 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
-@@ -1440,9 +1440,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
- if (!in6_dev)
- goto out;
+@@ -274,13 +274,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
+ }
-- if (!pskb_may_pull(skb, skb->len))
-- goto out;
--
- iphdr = ipv6_hdr(skb);
- saddr = &iphdr->saddr;
- daddr = &iphdr->daddr;
-@@ -1717,6 +1714,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
- struct pcpu_sw_netstats *tx_stats, *rx_stats;
- union vxlan_addr loopback;
- union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
-+ struct net_device *dev = skb->dev;
-+ int len = skb->len;
-
- tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
-@@ -1740,16 +1739,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tx_packets++;
-- tx_stats->tx_bytes += skb->len;
-+ tx_stats->tx_bytes += len;
- u64_stats_update_end(&tx_stats->syncp);
-
- if (netif_rx(skb) == NET_RX_SUCCESS) {
- u64_stats_update_begin(&rx_stats->syncp);
- rx_stats->rx_packets++;
-- rx_stats->rx_bytes += skb->len;
-+ rx_stats->rx_bytes += len;
- u64_stats_update_end(&rx_stats->syncp);
- } else {
-- skb->dev->stats.rx_dropped++;
-+ dev->stats.rx_dropped++;
+-/* Find VXLAN socket based on network namespace and UDP port */
+-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
++/* Find VXLAN socket based on network namespace, address family and UDP port */
++static struct vxlan_sock *vxlan_find_sock(struct net *net,
++ sa_family_t family, __be16 port)
+ {
+ struct vxlan_sock *vs;
+
+ hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
+- if (inet_sk(vs->sock->sk)->inet_sport == port)
++ if (inet_sk(vs->sock->sk)->inet_sport == port &&
++ inet_sk(vs->sock->sk)->sk.sk_family == family)
+ return vs;
}
+ return NULL;
+@@ -299,11 +301,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
}
-@@ -1927,7 +1926,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
- return arp_reduce(dev, skb);
- #if IS_ENABLED(CONFIG_IPV6)
- else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
-- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
-+ pskb_may_pull(skb, sizeof(struct ipv6hdr)
-+ + sizeof(struct nd_msg)) &&
- ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
- struct nd_msg *msg;
-
-@@ -2750,7 +2750,7 @@ nla_put_failure:
+ /* Look up VNI in a per net namespace table */
+-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
++static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
++ sa_family_t family, __be16 port)
+ {
+ struct vxlan_sock *vs;
+
+- vs = vxlan_find_sock(net, port);
++ vs = vxlan_find_sock(net, family, port);
+ if (!vs)
+ return NULL;
+
+@@ -1820,7 +1823,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_dev *dst_vxlan;
+
+ ip_rt_put(rt);
+- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
++ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
++ dst->sa.sa_family, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+@@ -1874,7 +1878,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_dev *dst_vxlan;
+
+ dst_release(ndst);
+- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
++ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
++ dst->sa.sa_family, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+@@ -2034,13 +2039,15 @@ static int vxlan_init(struct net_device *dev)
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_sock *vs;
++ bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ spin_lock(&vn->sock_lock);
+- vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
++ vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
++ vxlan->dst_port);
+ if (vs) {
+ /* If we have a socket with same port already, reuse it */
+ atomic_inc(&vs->refcnt);
+@@ -2439,6 +2446,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_sock *vs;
++ bool ipv6 = flags & VXLAN_F_IPV6;
+
+ vs = vxlan_socket_create(net, port, rcv, data, flags);
+ if (!IS_ERR(vs))
+@@ -2448,7 +2456,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ return vs;
+
+ spin_lock(&vn->sock_lock);
+- vs = vxlan_find_sock(net, port);
++ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+ if (vs) {
+ if (vs->rcv == rcv)
+ atomic_inc(&vs->refcnt);
+@@ -2607,7 +2615,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
+ nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+ vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+
+- if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
++ if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
++ vxlan->dst_port)) {
+ pr_info("duplicate VNI %u\n", vni);
+ return -EEXIST;
+ }
+@@ -2751,7 +2760,7 @@ nla_put_failure:
return -EMSGSIZE;
}
@@ -49788,7 +49070,7 @@ index beb377b..b5bbf08 100644
.kind = "vxlan",
.maxtype = IFLA_VXLAN_MAX,
.policy = vxlan_policy,
-@@ -2797,7 +2797,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
+@@ -2798,7 +2807,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
@@ -50492,10 +49774,10 @@ index 0ffb6ff..c0b7f0e 100644
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
-index 06e04aa..d5e1f0d 100644
+index d7231a8..80df8ba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
-@@ -1684,7 +1684,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+@@ -1686,7 +1686,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
@@ -50504,7 +49786,7 @@ index 06e04aa..d5e1f0d 100644
u32 reset_flag;
memset(buf, 0, sizeof(buf));
-@@ -1705,7 +1705,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+@@ -1707,7 +1707,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char buf[8];
@@ -51032,7 +50314,7 @@ index 5a40516..136d5a7 100644
kfree(msi_dev_attr);
++count;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
-index 76ef791..adc3bd1 100644
+index 6d04771..4126004 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -52841,7 +52123,7 @@ index d81f3cc..0093e5b 100644
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
-index aaea4b9..c64408d 100644
+index 7cb8c73..14561b5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1581,7 +1581,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
@@ -53089,6 +52371,58 @@ index 6b22106..6c6e641 100644
return -EBUSY;
imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
+diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
+index 503b2d7..c918745 100644
+--- a/drivers/staging/line6/driver.c
++++ b/drivers/staging/line6/driver.c
+@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ {
+ struct usb_device *usbdev = line6->usbdev;
+ int ret;
+- unsigned char len;
++ unsigned char *plen;
+
+ /* query the serial number: */
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
+@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ return ret;
+ }
+
++ plen = kmalloc(1, GFP_KERNEL);
++ if (plen == NULL)
++ return -ENOMEM;
++
+ /* Wait for data length. We'll get 0xff until length arrives. */
+ do {
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_IN,
+- 0x0012, 0x0000, &len, 1,
++ 0x0012, 0x0000, plen, 1,
+ LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "receive length failed (error %d)\n", ret);
++ kfree(plen);
+ return ret;
+ }
+- } while (len == 0xff);
++ } while (*plen == 0xff);
+
+- if (len != datalen) {
++ if (*plen != datalen) {
+ /* should be equal or something went wrong */
+ dev_err(line6->ifcdev,
+ "length mismatch (expected %d, got %d)\n",
+- (int)datalen, (int)len);
++ (int)datalen, (int)*plen);
++ kfree(plen);
+ return -EINVAL;
+ }
++ kfree(plen);
+
+ /* receive the result: */
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index bcce919..f30fcf9 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -53424,10 +52758,10 @@ index e7e9372..161f530 100644
login->tgt_agt = sbp_target_agent_register(login);
if (IS_ERR(login->tgt_agt)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
-index 98da901..bb443e8 100644
+index 15a1c13..6c9b96b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
-@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+@@ -1526,7 +1526,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
sema_init(&dev->caw_sem, 1);
@@ -53437,7 +52771,7 @@ index 98da901..bb443e8 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 7fa62fc..abdd041 100644
+index ab61014..8f1116e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -54161,10 +53495,10 @@ index a260cde..6b2b5ce 100644
/* This is only available if kgdboc is a built in for early debugging */
static int __init kgdboc_early_init(char *opt)
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
-index 0da0b54..80ae306 100644
+index 077570a..12550a9 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
-@@ -989,7 +989,7 @@ static struct uart_driver msm_uart_driver = {
+@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
.cons = MSM_CONSOLE,
};
@@ -54173,7 +53507,7 @@ index 0da0b54..80ae306 100644
static const struct of_device_id msm_uartdm_table[] = {
{ .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
-@@ -1008,7 +1008,7 @@ static int msm_serial_probe(struct platform_device *pdev)
+@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
int irq;
if (pdev->id == -1)
@@ -54215,7 +53549,7 @@ index c78f43a..22b1dab 100644
if (cfg->uart_flags & UPF_CONS_FLOW) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
-index 29a7be4..0144e62 100644
+index 0f03988..8a8038d 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1343,7 +1343,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
@@ -54653,10 +53987,10 @@ index 42bad18..447d7a2 100644
if (get_user(c, buf))
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 8fbad34..0db0a39 100644
+index 848c17a..e930437 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
-@@ -3464,7 +3464,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
+@@ -3469,7 +3469,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
void tty_default_fops(struct file_operations *fops)
{
@@ -55091,7 +54425,7 @@ index 0b59731..46ee7d1 100644
dev->rawdescriptors[i] + (*ppos - pos),
min(len, alloclen))) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 487abcf..06226dc 100644
+index 258e6fe..9ea48d7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
@@ -55113,7 +54447,7 @@ index 487abcf..06226dc 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index dc84915..cdb6624 100644
+index 674c262..71fdd90 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -55124,7 +54458,7 @@ index dc84915..cdb6624 100644
#include <asm/uaccess.h>
#include <asm/byteorder.h>
-@@ -4662,6 +4663,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
@@ -55192,19 +54526,6 @@ index 2dd2362..1135437 100644
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
-diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index 490a6ca..1f8364d 100644
---- a/drivers/usb/dwc3/gadget.c
-+++ b/drivers/usb/dwc3/gadget.c
-@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- if (!usb_endpoint_xfer_isoc(desc))
- return 0;
-
-- memset(&trb_link, 0, sizeof(trb_link));
--
- /* Link TRB for ISOC. The HWO bit is never reset */
- trb_st_hw = &dep->trb_pool[0];
-
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 8cfc319..4868255 100644
--- a/drivers/usb/early/ehci-dbgp.c
@@ -60442,22 +59763,10 @@ index e2e798a..f454c18 100644
static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
diff --git a/fs/buffer.c b/fs/buffer.c
-index 3588a80..3d038a9 100644
+index 72daaa5..60ffeb9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -2318,6 +2318,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
- err = 0;
-
- balance_dirty_pages_ratelimited(mapping);
-+
-+ if (unlikely(fatal_signal_pending(current))) {
-+ err = -EINTR;
-+ goto out;
-+ }
- }
-
- /* page covers the boundary, find the boundary offset */
-@@ -3424,7 +3429,7 @@ void __init buffer_init(void)
+@@ -3432,7 +3432,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -61497,7 +60806,7 @@ index a93f7e6..d58bcbe 100644
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index cb25a1a..c557cb6 100644
+index 34b40be8..2003532 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
@@ -61665,21 +60974,7 @@ index cb25a1a..c557cb6 100644
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -2675,11 +2676,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
- if (!IS_ROOT(new)) {
- spin_unlock(&inode->i_lock);
- dput(new);
-+ iput(inode);
- return ERR_PTR(-EIO);
- }
- if (d_ancestor(new, dentry)) {
- spin_unlock(&inode->i_lock);
- dput(new);
-+ iput(inode);
- return ERR_PTR(-EIO);
- }
- write_seqlock(&rename_lock);
-@@ -3300,7 +3303,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -3307,7 +3308,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
@@ -61688,7 +60983,7 @@ index cb25a1a..c557cb6 100644
}
}
return D_WALK_CONTINUE;
-@@ -3416,7 +3419,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3423,7 +3424,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -62630,10 +61925,10 @@ index c6874be..f8a6ae8 100644
static int
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index 581ef40..cec52d7 100644
+index e069155..b825b08 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
-@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
/* Hm, nope. Are (enough) root reserved clusters available? */
if (uid_eq(sbi->s_resuid, current_fsuid()) ||
(!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
@@ -62645,7 +61940,7 @@ index 581ef40..cec52d7 100644
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index b0c225c..0e69bd7 100644
+index 96ac9d3..1c30e7e6 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1275,19 +1275,19 @@ struct ext4_sb_info {
@@ -62796,10 +62091,10 @@ index 8b0f9ef..cb9f620 100644
return 0;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
-index 32bce84..112d969 100644
+index 8313ca3..8a37d08 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
-@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
const char *function, unsigned int line, const char *msg)
{
@@ -62809,10 +62104,10 @@ index 32bce84..112d969 100644
"MMP failure info: last update time: %llu, last update "
"node: %s, last update device: %s\n",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 0b28b36..b85d0f53 100644
+index b1f0ac7..77e9a05 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
-@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+@@ -1274,7 +1274,7 @@ static ext4_fsblk_t get_sb_block(void **data)
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
@@ -62821,7 +62116,7 @@ index 0b28b36..b85d0f53 100644
"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
#ifdef CONFIG_QUOTA
-@@ -2460,7 +2460,7 @@ struct ext4_attr {
+@@ -2454,7 +2454,7 @@ struct ext4_attr {
int offset;
int deprecated_val;
} u;
@@ -62831,10 +62126,10 @@ index 0b28b36..b85d0f53 100644
static int parse_strtoull(const char *buf,
unsigned long long max, unsigned long long *value)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
-index e738733..9843a6c 100644
+index 2d1e5803..1b082d415 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
-@@ -386,7 +386,7 @@ static int
+@@ -399,7 +399,7 @@ static int
ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
char *buffer, size_t buffer_size)
{
@@ -62843,7 +62138,7 @@ index e738733..9843a6c 100644
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
const struct xattr_handler *handler =
-@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
+@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
buffer += size;
}
rest -= size;
@@ -64607,7 +63902,7 @@ index 4a6cf28..d3a29d3 100644
jffs2_prealloc_raw_node_refs(c, jeb, 1);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
-index a6597d6..41b30ec 100644
+index 09ed551..45684f8 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
@@ -64828,7 +64123,7 @@ index 6740a62..ccb472f 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index 3ddb044..5533df9 100644
+index bb02687..79cba2c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -65116,7 +64411,7 @@ index 3ddb044..5533df9 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3206,7 +3298,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3207,7 +3299,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -65125,7 +64420,7 @@ index 3ddb044..5533df9 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3224,7 +3316,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3225,7 +3317,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -65134,7 +64429,7 @@ index 3ddb044..5533df9 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3324,9 +3416,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3325,9 +3417,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -65148,7 +64443,7 @@ index 3ddb044..5533df9 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3378,6 +3472,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3379,6 +3473,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -65169,7 +64464,7 @@ index 3ddb044..5533df9 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3441,6 +3549,17 @@ retry:
+@@ -3442,6 +3550,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65187,7 +64482,7 @@ index 3ddb044..5533df9 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3456,6 +3575,8 @@ retry:
+@@ -3457,6 +3576,8 @@ retry:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
@@ -65196,7 +64491,7 @@ index 3ddb044..5533df9 100644
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
-@@ -3510,9 +3631,16 @@ retry:
+@@ -3511,9 +3632,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65213,7 +64508,7 @@ index 3ddb044..5533df9 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3595,6 +3723,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3596,6 +3724,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -65222,7 +64517,7 @@ index 3ddb044..5533df9 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3627,10 +3757,21 @@ retry:
+@@ -3628,10 +3758,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -65244,7 +64539,7 @@ index 3ddb044..5533df9 100644
exit3:
dput(dentry);
exit2:
-@@ -3721,6 +3862,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3722,6 +3863,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -65253,7 +64548,7 @@ index 3ddb044..5533df9 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3747,10 +3890,22 @@ retry_deleg:
+@@ -3748,10 +3891,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -65276,7 +64571,7 @@ index 3ddb044..5533df9 100644
exit2:
dput(dentry);
}
-@@ -3839,9 +3994,17 @@ retry:
+@@ -3840,9 +3995,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -65294,7 +64589,7 @@ index 3ddb044..5533df9 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3945,6 +4108,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3946,6 +4109,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -65302,7 +64597,7 @@ index 3ddb044..5533df9 100644
int how = 0;
int error;
-@@ -3968,7 +4132,7 @@ retry:
+@@ -3969,7 +4133,7 @@ retry:
if (error)
return error;
@@ -65311,7 +64606,7 @@ index 3ddb044..5533df9 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3980,11 +4144,28 @@ retry:
+@@ -3981,11 +4145,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -65340,7 +64635,7 @@ index 3ddb044..5533df9 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4295,6 +4476,12 @@ retry_deleg:
+@@ -4296,6 +4477,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -65353,7 +64648,7 @@ index 3ddb044..5533df9 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry, flags);
if (error)
-@@ -4302,6 +4489,9 @@ retry_deleg:
+@@ -4303,6 +4490,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode, flags);
@@ -65363,7 +64658,7 @@ index 3ddb044..5533df9 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4344,14 +4534,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4345,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
@@ -65390,7 +64685,7 @@ index 3ddb044..5533df9 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 7f67b46..c4ad324 100644
+index 550dbff..c4ad324 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
@@ -65508,17 +64803,7 @@ index 7f67b46..c4ad324 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -2822,6 +2846,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
- /* make sure we can reach put_old from new_root */
- if (!is_path_reachable(old_mnt, old.dentry, &new))
- goto out4;
-+ /* make certain new is below the root */
-+ if (!is_path_reachable(new_mnt, new.dentry, &root))
-+ goto out4;
- root_mp->m_count++; /* pin it so it won't go away */
- lock_mount_hash();
- detach_mnt(new_mnt, &parent_path);
-@@ -3053,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
+@@ -3056,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -65565,7 +64850,7 @@ index 577a36f..1cde799 100644
void nfs_fattr_init(struct nfs_fattr *fattr)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 5e0dc52..64681bc 100644
+index 1d3cb47..2b8ed89 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1155,7 +1155,7 @@ struct nfsd4_operation {
@@ -68645,19 +67930,6 @@ index ae0c3ce..9ee641c 100644
generic_fillattr(inode, stat);
return 0;
-diff --git a/fs/super.c b/fs/super.c
-index b9a214d..6f8c954 100644
---- a/fs/super.c
-+++ b/fs/super.c
-@@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
- inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
- dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
- total_objects = dentries + inodes + fs_objects + 1;
-+ if (!total_objects)
-+ total_objects = 1;
-
- /* proportion the scan between the caches */
- dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 0b45ff4..847de5b 100644
--- a/fs/sysfs/dir.c
@@ -68987,6 +68259,28 @@ index 3799695..0ddc953 100644
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
goto out_put;
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index d10dc8f..56b3430 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -230,7 +230,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
+ * of the compiler which do not like us using do_div in the middle
+ * of large functions.
+ */
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+ __u32 mod;
+
+@@ -286,7 +286,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+ return 0;
+ }
+ #else
+-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
++static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
+ {
+ __u32 mod;
+
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
index 0000000..f27264e
@@ -80989,10 +80283,10 @@ index cbc5833..8123ebc 100644
if (sizeof(l) == 4)
return fls(l);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 518b465..11953e6 100644
+index f2057ff8..59dfa2d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -1627,7 +1627,7 @@ struct block_device_operations {
+@@ -1625,7 +1625,7 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
struct module *owner;
@@ -84065,7 +83359,7 @@ index f230a97..714c006 100644
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 16e6f1e..d79d2f1 100644
+index f952cc8..b9f6135 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -84110,7 +83404,7 @@ index 16e6f1e..d79d2f1 100644
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1203,9 +1209,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1204,9 +1210,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@@ -86169,29 +85463,6 @@ index 680f9a3..f13aeb0 100644
__SONET_ITEMS
#undef __HANDLE_ITEM
};
-diff --git a/include/linux/string.h b/include/linux/string.h
-index d36977e..3b42b37 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
- #endif
-
- extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
-- const void *from, size_t available);
-+ const void *from, size_t available);
-
- /**
- * strstarts - does @str start with @prefix?
-@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
- return strncmp(str, prefix, strlen(prefix)) == 0;
- }
-
--extern size_t memweight(const void *ptr, size_t bytes);
-+size_t memweight(const void *ptr, size_t bytes);
-+void memzero_explicit(void *s, size_t count);
-
- /**
- * kbasename - return the last part of a pathname.
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53..dc934c9 100644
--- a/include/linux/sunrpc/addr.h
@@ -87535,8 +86806,24 @@ index 4a5b9a3..ca27d73 100644
.update = sctp_csum_update,
.combine = sctp_csum_combine,
};
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 9fbd856..856f01c 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -426,6 +426,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
+ asoc->pmtu_pending = 0;
+ }
+
++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
++{
++ return !list_empty(&chunk->list);
++}
++
+ /* Walk through a list of TLV parameters. Don't trust the
+ * individual parameter lengths and instead depend on
+ * the chunk length to indicate when to stop. Make sure
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
-index 7f4eeb3..37e8fe1 100644
+index 7f4eeb3..aaa63d9 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
@@ -87548,6 +86835,19 @@ index 7f4eeb3..37e8fe1 100644
/* A naming convention of "sctp_sf_xxx" applies to all the state functions
* currently in use.
+@@ -248,9 +248,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
+ int, __be16);
+ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
+ union sctp_addr *addr);
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp);
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp);
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf);
+ int sctp_process_asconf_ack(struct sctp_association *asoc,
@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
__u32 sctp_generate_tsn(const struct sctp_endpoint *);
@@ -89674,7 +88974,7 @@ index 379650b..30c5180 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 963bf13..a78dd3e 100644
+index 658f232..32e9595 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -161,8 +161,15 @@ static struct srcu_struct pmus_srcu;
@@ -89712,7 +89012,7 @@ index 963bf13..a78dd3e 100644
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
-@@ -3034,7 +3041,7 @@ static void __perf_event_read(void *info)
+@@ -3051,7 +3058,7 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
@@ -89721,7 +89021,7 @@ index 963bf13..a78dd3e 100644
}
static u64 perf_event_read(struct perf_event *event)
-@@ -3410,9 +3417,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3430,9 +3437,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
@@ -89733,7 +89033,7 @@ index 963bf13..a78dd3e 100644
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -3861,10 +3868,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -3881,10 +3888,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@ -89746,7 +89046,7 @@ index 963bf13..a78dd3e 100644
arch_perf_update_userpage(userpg, now);
-@@ -4428,7 +4435,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+@@ -4448,7 +4455,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
@@ -89755,7 +89055,7 @@ index 963bf13..a78dd3e 100644
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
-@@ -4519,11 +4526,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -4539,11 +4546,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
@@ -89769,7 +89069,7 @@ index 963bf13..a78dd3e 100644
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -6838,7 +6845,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -6858,7 +6865,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
@@ -89778,7 +89078,7 @@ index 963bf13..a78dd3e 100644
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -7117,6 +7124,11 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -7137,6 +7144,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -89790,7 +89090,7 @@ index 963bf13..a78dd3e 100644
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
-@@ -7469,10 +7481,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -7489,10 +7501,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
@@ -90350,7 +89650,7 @@ index a91e47d..71c9064 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index f3a3a07..6820bc0 100644
+index 22b3f1b..6820bc0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -202,7 +202,7 @@ struct futex_pi_state {
@@ -90392,72 +89692,6 @@ index f3a3a07..6820bc0 100644
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -641,8 +646,14 @@ static struct futex_pi_state * alloc_pi_state(void)
- return pi_state;
- }
-
-+/*
-+ * Must be called with the hb lock held.
-+ */
- static void free_pi_state(struct futex_pi_state *pi_state)
- {
-+ if (!pi_state)
-+ return;
-+
- if (!atomic_dec_and_test(&pi_state->refcount))
- return;
-
-@@ -1521,15 +1532,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
- }
-
- retry:
-- if (pi_state != NULL) {
-- /*
-- * We will have to lookup the pi_state again, so free this one
-- * to keep the accounting correct.
-- */
-- free_pi_state(pi_state);
-- pi_state = NULL;
-- }
--
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
- if (unlikely(ret != 0))
- goto out;
-@@ -1619,6 +1621,8 @@ retry_private:
- case 0:
- break;
- case -EFAULT:
-+ free_pi_state(pi_state);
-+ pi_state = NULL;
- double_unlock_hb(hb1, hb2);
- hb_waiters_dec(hb2);
- put_futex_key(&key2);
-@@ -1634,6 +1638,8 @@ retry_private:
- * exit to complete.
- * - The user space value changed.
- */
-+ free_pi_state(pi_state);
-+ pi_state = NULL;
- double_unlock_hb(hb1, hb2);
- hb_waiters_dec(hb2);
- put_futex_key(&key2);
-@@ -1710,6 +1716,7 @@ retry_private:
- }
-
- out_unlock:
-+ free_pi_state(pi_state);
- double_unlock_hb(hb1, hb2);
- hb_waiters_dec(hb2);
-
-@@ -1727,8 +1734,6 @@ out_put_keys:
- out_put_key1:
- put_futex_key(&key1);
- out:
-- if (pi_state != NULL)
-- free_pi_state(pi_state);
- return ret ? ret : task_count;
- }
-
@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
@@ -91270,7 +90504,7 @@ index 1d96dd0..994ff19 100644
default:
diff --git a/kernel/module.c b/kernel/module.c
-index 03214bd2..6242887 100644
+index 1c47139..6242887 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -60,6 +60,7 @@
@@ -91465,17 +90699,7 @@ index 03214bd2..6242887 100644
set_memory_ro);
}
}
-@@ -1842,7 +1861,9 @@ static void free_module(struct module *mod)
-
- /* We leave it in list to prevent duplicate loads, but make sure
- * that noone uses it while it's being deconstructed. */
-+ mutex_lock(&module_mutex);
- mod->state = MODULE_STATE_UNFORMED;
-+ mutex_unlock(&module_mutex);
-
- /* Remove dynamic debug info */
- ddebug_remove_module(mod->name);
-@@ -1863,16 +1884,19 @@ static void free_module(struct module *mod)
+@@ -1865,16 +1884,19 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
@@ -91498,7 +90722,7 @@ index 03214bd2..6242887 100644
#ifdef CONFIG_MPU
update_protections(current->mm);
-@@ -1941,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1943,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
int ret = 0;
const struct kernel_symbol *ksym;
@@ -91530,7 +90754,7 @@ index 03214bd2..6242887 100644
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* Ignore common symbols */
-@@ -1968,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1970,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
if (ksym && !IS_ERR(ksym)) {
@@ -91540,7 +90764,7 @@ index 03214bd2..6242887 100644
break;
}
-@@ -1987,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+@@ -1989,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
secbase = (unsigned long)mod_percpu(mod);
else
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
@@ -91561,7 +90785,7 @@ index 03214bd2..6242887 100644
return ret;
}
-@@ -2075,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
+@@ -2077,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
|| s->sh_entsize != ~0UL
|| strstarts(sname, ".init"))
continue;
@@ -91588,7 +90812,7 @@ index 03214bd2..6242887 100644
}
pr_debug("Init section allocation order:\n");
-@@ -2104,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
+@@ -2106,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
|| s->sh_entsize != ~0UL
|| !strstarts(sname, ".init"))
continue;
@@ -91617,7 +90841,7 @@ index 03214bd2..6242887 100644
}
}
-@@ -2293,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2295,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* Put symbol section at end of init part of module. */
symsect->sh_flags |= SHF_ALLOC;
@@ -91626,7 +90850,7 @@ index 03214bd2..6242887 100644
info->index.sym) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
-@@ -2310,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+@@ -2312,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
}
/* Append room for core symbols at end of core part. */
@@ -91644,7 +90868,7 @@ index 03214bd2..6242887 100644
info->index.str) | INIT_OFFSET_MASK;
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
}
-@@ -2334,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2336,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
/* Make sure we get permanent strtab: don't use info->strtab. */
mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
@@ -91661,7 +90885,7 @@ index 03214bd2..6242887 100644
src = mod->symtab;
for (ndst = i = 0; i < mod->num_symtab; i++) {
if (i == 0 ||
-@@ -2351,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+@@ -2353,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
}
}
mod->core_num_syms = ndst;
@@ -91670,7 +90894,7 @@ index 03214bd2..6242887 100644
}
#else
static inline void layout_symtab(struct module *mod, struct load_info *info)
-@@ -2384,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
+@@ -2386,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
return vmalloc_exec(size);
}
@@ -91709,7 +90933,7 @@ index 03214bd2..6242887 100644
mutex_unlock(&module_mutex);
}
return ret;
-@@ -2648,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2650,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
mod = (void *)info->sechdrs[info->index.mod].sh_addr;
if (info->index.sym == 0) {
@@ -91725,7 +90949,7 @@ index 03214bd2..6242887 100644
return ERR_PTR(-ENOEXEC);
}
-@@ -2664,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2666,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
static int check_modinfo(struct module *mod, struct load_info *info, int flags)
{
const char *modmagic = get_modinfo(info, "vermagic");
@@ -91740,7 +90964,7 @@ index 03214bd2..6242887 100644
if (flags & MODULE_INIT_IGNORE_VERMAGIC)
modmagic = NULL;
-@@ -2690,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+@@ -2692,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
}
/* Set up license info based on the info section */
@@ -91749,7 +90973,7 @@ index 03214bd2..6242887 100644
return 0;
}
-@@ -2784,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2786,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
void *ptr;
/* Do the allocs. */
@@ -91758,7 +90982,7 @@ index 03214bd2..6242887 100644
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
-@@ -2794,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2796,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
if (!ptr)
return -ENOMEM;
@@ -91774,7 +90998,7 @@ index 03214bd2..6242887 100644
/*
* The pointer to this block is stored in the module structure
* which is inside the block. This block doesn't need to be
-@@ -2807,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2809,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
@@ -91824,7 +91048,7 @@ index 03214bd2..6242887 100644
/* Transfer each section which specifies SHF_ALLOC */
pr_debug("final section addresses:\n");
-@@ -2824,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -2826,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
if (!(shdr->sh_flags & SHF_ALLOC))
continue;
@@ -91877,7 +91101,7 @@ index 03214bd2..6242887 100644
pr_debug("\t0x%lx %s\n",
(long)shdr->sh_addr, info->secstrings + shdr->sh_name);
}
-@@ -2890,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
+@@ -2892,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
* Do it before processing of module parameters, so the module
* can provide parameter accessor functions of its own.
*/
@@ -91896,7 +91120,7 @@ index 03214bd2..6242887 100644
set_fs(old_fs);
}
-@@ -2952,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
+@@ -2954,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
@@ -91909,7 +91133,7 @@ index 03214bd2..6242887 100644
}
int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -2966,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -2968,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
static int post_relocation(struct module *mod, const struct load_info *info)
{
/* Sort exception table now relocations are done. */
@@ -91919,7 +91143,7 @@ index 03214bd2..6242887 100644
/* Copy relocated percpu area over. */
percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
-@@ -3075,11 +3211,12 @@ static int do_init_module(struct module *mod)
+@@ -3077,11 +3211,12 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
@@ -91937,7 +91161,7 @@ index 03214bd2..6242887 100644
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
-@@ -3147,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
+@@ -3149,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_bug_finalize(info->hdr, info->sechdrs, mod);
/* Set RO and NX regions for core */
@@ -91962,7 +91186,7 @@ index 03214bd2..6242887 100644
/* Mark state as coming so strong_try_module_get() ignores us,
* but kallsyms etc. can see us. */
-@@ -3240,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3242,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err)
goto free_unload;
@@ -92001,7 +91225,7 @@ index 03214bd2..6242887 100644
/* Fix up syms, so that st_value is a pointer to location. */
err = simplify_symbols(mod, info);
if (err < 0)
-@@ -3258,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3260,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
flush_module_icache(mod);
@@ -92015,7 +91239,7 @@ index 03214bd2..6242887 100644
dynamic_debug_setup(info->debug, info->num_debug);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
-@@ -3312,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3314,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
dynamic_debug_remove(info->debug);
synchronize_sched();
@@ -92028,7 +91252,7 @@ index 03214bd2..6242887 100644
free_unload:
module_unload_free(mod);
unlink_mod:
-@@ -3401,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3403,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
@@ -92048,7 +91272,7 @@ index 03214bd2..6242887 100644
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
-@@ -3652,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3654,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
return 0;
seq_printf(m, "%s %u",
@@ -92057,7 +91281,7 @@ index 03214bd2..6242887 100644
print_unload_info(m, mod);
/* Informative for users. */
-@@ -3661,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3663,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading":
"Live");
/* Used by oprofile and other similar tools. */
@@ -92066,7 +91290,7 @@ index 03214bd2..6242887 100644
/* Taints info */
if (mod->taints)
-@@ -3697,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -3699,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
static int __init proc_modules_init(void)
{
@@ -92084,7 +91308,7 @@ index 03214bd2..6242887 100644
return 0;
}
module_init(proc_modules_init);
-@@ -3758,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
+@@ -3760,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -92094,7 +91318,7 @@ index 03214bd2..6242887 100644
return NULL;
list_for_each_entry_rcu(mod, &modules, list) {
-@@ -3799,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
+@@ -3801,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
*/
struct module *__module_text_address(unsigned long addr)
{
@@ -92311,7 +91535,7 @@ index e4e4121..71faf14 100644
select LZO_COMPRESS
select LZO_DECOMPRESS
diff --git a/kernel/power/process.c b/kernel/power/process.c
-index 4ee194e..925778f 100644
+index 7a37cf3..3e4c1c8 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
@@ -93481,7 +92705,7 @@ index a63f4dc..349bbb0 100644
unsigned long timeout)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index ec1a286..6b516b8 100644
+index 6d7cb91..420f2d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1857,7 +1857,7 @@ void set_numabalancing_state(bool enabled)
@@ -93493,7 +92717,7 @@ index ec1a286..6b516b8 100644
int err;
int state = numabalancing_enabled;
-@@ -2320,8 +2320,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+@@ -2324,8 +2324,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
@@ -93505,7 +92729,7 @@ index ec1a286..6b516b8 100644
if (!prev->mm) {
prev->active_mm = NULL;
-@@ -3103,6 +3105,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -3107,6 +3109,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = nice_to_rlimit(nice);
@@ -93514,7 +92738,7 @@ index ec1a286..6b516b8 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -3129,7 +3133,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -3133,7 +3137,8 @@ SYSCALL_DEFINE1(nice, int, increment)
nice = task_nice(current) + increment;
nice = clamp_val(nice, MIN_NICE, MAX_NICE);
@@ -93524,7 +92748,7 @@ index ec1a286..6b516b8 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -3408,6 +3413,7 @@ recheck:
+@@ -3412,6 +3417,7 @@ recheck:
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
@@ -93532,7 +92756,7 @@ index ec1a286..6b516b8 100644
/* can't increase priority */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
-@@ -4797,6 +4803,7 @@ void idle_task_exit(void)
+@@ -4802,6 +4808,7 @@ void idle_task_exit(void)
if (mm != &init_mm) {
switch_mm(mm, &init_mm, current);
@@ -93540,7 +92764,7 @@ index ec1a286..6b516b8 100644
finish_arch_post_lock_switch();
}
mmdrop(mm);
-@@ -4892,7 +4899,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -4897,7 +4904,7 @@ static void migrate_tasks(unsigned int dead_cpu)
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -93549,7 +92773,7 @@ index ec1a286..6b516b8 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -4909,17 +4916,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -4914,17 +4921,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -93571,7 +92795,7 @@ index ec1a286..6b516b8 100644
/*
* In the intermediate directories, both the child directory and
-@@ -4927,22 +4934,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -4932,22 +4939,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -93603,7 +92827,7 @@ index ec1a286..6b516b8 100644
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -4962,7 +4972,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -4967,7 +4977,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -93612,7 +92836,7 @@ index ec1a286..6b516b8 100644
if (table == NULL)
return NULL;
-@@ -5000,9 +5010,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -5005,9 +5015,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -93624,7 +92848,7 @@ index ec1a286..6b516b8 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -5029,11 +5039,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -5034,11 +5044,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -93639,7 +92863,7 @@ index ec1a286..6b516b8 100644
if (entry == NULL)
return;
-@@ -5056,8 +5068,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -5061,8 +5073,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -94440,7 +93664,7 @@ index 3b89464..5e38379 100644
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
-index 42b463a..a6b008f 100644
+index 31ea01f..7fc61ef 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -43,6 +43,7 @@
@@ -94541,7 +93765,7 @@ index 42b463a..a6b008f 100644
int it_id_set = IT_ID_NOT_SET;
if (!kc)
-@@ -1013,6 +1014,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
if (copy_from_user(&new_tp, tp, sizeof (*tp)))
return -EFAULT;
@@ -95265,36 +94489,9 @@ index 8a4e5cb..64f270d 100644
local_irq_save(flags);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
-index 759d5e0..5156a5fe 100644
+index 7e3cd7a..5156a5fe 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
-@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
-
- /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
-@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
- int syscall_nr;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
-
- /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
-@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
- if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
- return;
@@ -602,6 +602,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
int num;
@@ -95313,15 +94510,6 @@ index 759d5e0..5156a5fe 100644
mutex_lock(&syscall_trace_lock);
sys_perf_refcount_enter--;
-@@ -641,7 +645,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
- int size;
-
- syscall_nr = trace_get_syscall_nr(current, regs);
-- if (syscall_nr < 0)
-+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
- if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
- return;
@@ -674,6 +678,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
int num;
@@ -95507,32 +94695,10 @@ index 114d1be..ab0350c 100644
(val << avg->factor)) >> avg->weight :
(val << avg->factor);
diff --git a/lib/bitmap.c b/lib/bitmap.c
-index 1e031f2..89e3d6f 100644
+index 33ce011..89e3d6f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
-@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
- lower = src[off + k];
- if (left && off + k == lim - 1)
- lower &= mask;
-- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
-+ dst[k] = lower >> rem;
-+ if (rem)
-+ dst[k] |= upper << (BITS_PER_LONG - rem);
- if (left && k == lim - 1)
- dst[k] &= mask;
- }
-@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
- upper = src[k];
- if (left && k == lim - 1)
- upper &= (1UL << left) - 1;
-- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
-+ dst[k + off] = upper << rem;
-+ if (rem)
-+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
- if (left && k + off == lim - 1)
- dst[k + off] &= (1UL << left) - 1;
- }
-@@ -429,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
+@@ -433,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
@@ -95541,7 +94707,7 @@ index 1e031f2..89e3d6f 100644
bitmap_zero(maskp, nmaskbits);
-@@ -514,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
+@@ -518,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -95550,7 +94716,7 @@ index 1e031f2..89e3d6f 100644
ulen, 1, maskp, nmaskbits);
}
-@@ -605,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+@@ -609,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
{
unsigned a, b;
int c, old_c, totaldigits;
@@ -95559,7 +94725,7 @@ index 1e031f2..89e3d6f 100644
int exp_digit, in_range;
totaldigits = c = 0;
-@@ -700,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -704,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
@@ -96095,33 +95261,6 @@ index 0922579..9d7adb9 100644
+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
#endif
}
-diff --git a/lib/string.c b/lib/string.c
-index f3c6ff5..70db57a 100644
---- a/lib/string.c
-+++ b/lib/string.c
-@@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
- EXPORT_SYMBOL(memset);
- #endif
-
-+/**
-+ * memzero_explicit - Fill a region of memory (e.g. sensitive
-+ * keying data) with 0s.
-+ * @s: Pointer to the start of the area.
-+ * @count: The size of the area.
-+ *
-+ * memzero_explicit() doesn't need an arch-specific version as
-+ * it just invokes the one of memset() implicitly.
-+ */
-+void memzero_explicit(void *s, size_t count)
-+{
-+ memset(s, 0, count);
-+ OPTIMIZER_HIDE_VAR(s);
-+}
-+EXPORT_SYMBOL(memzero_explicit);
-+
- #ifndef __HAVE_ARCH_MEMCPY
- /**
- * memcpy - Copy one area of memory to another
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index bb2b201..46abaf9 100644
--- a/lib/strncpy_from_user.c
@@ -96937,7 +96076,7 @@ index 44c6bd2..60369dc3 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index e229970..68218aa 100644
+index 37b80fc..68218aa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -96987,15 +96126,7 @@ index e229970..68218aa 100644
vma->vm_file->f_op->mmap);
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-@@ -1147,6 +1153,7 @@ again:
- print_bad_pte(vma, addr, ptent, page);
- if (unlikely(!__tlb_remove_page(tlb, page))) {
- force_flush = 1;
-+ addr += PAGE_SIZE;
- break;
- }
- continue;
-@@ -1500,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -97006,7 +96137,7 @@ index e229970..68218aa 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -1544,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -97028,7 +96159,7 @@ index e229970..68218aa 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -1629,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -97036,7 +96167,7 @@ index e229970..68218aa 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -1876,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -97047,7 +96178,7 @@ index e229970..68218aa 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -1896,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -97058,7 +96189,7 @@ index e229970..68218aa 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2018,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
}
@@ -97245,7 +96376,7 @@ index e229970..68218aa 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2216,6 +2424,12 @@ gotten:
+@@ -2217,6 +2424,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -97258,7 +96389,7 @@ index e229970..68218aa 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2269,6 +2483,10 @@ gotten:
+@@ -2270,6 +2483,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -97269,7 +96400,7 @@ index e229970..68218aa 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2543,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2544,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -97281,7 +96412,7 @@ index e229970..68218aa 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -2566,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2567,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -97293,7 +96424,7 @@ index e229970..68218aa 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2585,40 +2813,6 @@ out_release:
+@@ -2586,40 +2813,6 @@ out_release:
}
/*
@@ -97334,7 +96465,7 @@ index e229970..68218aa 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -2628,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2629,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags)
{
struct mem_cgroup *memcg;
@@ -97367,7 +96498,7 @@ index e229970..68218aa 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2672,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2673,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -97379,7 +96510,7 @@ index e229970..68218aa 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
mem_cgroup_commit_charge(page, memcg, false);
-@@ -2681,6 +2876,12 @@ setpte:
+@@ -2682,6 +2876,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -97392,7 +96523,7 @@ index e229970..68218aa 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2911,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2912,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
@@ -97404,7 +96535,7 @@ index e229970..68218aa 100644
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
-@@ -2953,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2954,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(fault_page);
goto uncharge_out;
}
@@ -97423,7 +96554,7 @@ index e229970..68218aa 100644
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
-@@ -3003,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3004,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
@@ -97435,7 +96566,7 @@ index e229970..68218aa 100644
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
-@@ -3244,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3245,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -97448,7 +96579,7 @@ index e229970..68218aa 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3263,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3264,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -97490,7 +96621,7 @@ index e229970..68218aa 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3399,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3400,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -97514,7 +96645,7 @@ index e229970..68218aa 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3429,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3430,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -97545,7 +96676,7 @@ index e229970..68218aa 100644
#endif /* __PAGETABLE_PMD_FOLDED */
static int __follow_pte(struct mm_struct *mm, unsigned long address,
-@@ -3538,8 +3839,8 @@ out:
+@@ -3539,8 +3839,8 @@ out:
return ret;
}
@@ -97556,7 +96687,7 @@ index e229970..68218aa 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -3565,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3566,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -97567,7 +96698,7 @@ index e229970..68218aa 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -3574,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3575,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -97576,7 +96707,7 @@ index e229970..68218aa 100644
void *maddr;
struct page *page = NULL;
-@@ -3635,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3636,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -97587,7 +96718,7 @@ index e229970..68218aa 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -3646,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3647,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -99426,7 +98557,7 @@ index a881d96..e5932cd 100644
struct mm_struct *mm;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 91d73ef..0e564d2 100644
+index ba5fd97..5a95869 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
@@ -99439,7 +98570,7 @@ index 91d73ef..0e564d2 100644
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index eee9619..155d328 100644
+index 8c5029f..d6907f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -99553,7 +98684,7 @@ index eee9619..155d328 100644
zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
} while (zone++ != preferred_zone);
}
-@@ -5702,7 +5742,7 @@ static void __setup_per_zone_wmarks(void)
+@@ -5710,7 +5750,7 @@ static void __setup_per_zone_wmarks(void)
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -99563,7 +98694,7 @@ index eee9619..155d328 100644
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/mm/percpu.c b/mm/percpu.c
-index da997f9..19040e9 100644
+index 2139e30..1d45bce 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -99627,7 +98758,7 @@ index 5077afc..846c9ef 100644
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
diff --git a/mm/rmap.c b/mm/rmap.c
-index 3e8491c..02abccc 100644
+index e01318d..25117ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
@@ -102041,8 +101172,325 @@ index 1a19b98..df2b4ec 100644
if (!can_dir) {
printk(KERN_INFO "can: failed to create /proc/net/can . "
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index ffeba8f..c0d666a 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+
++/*
++ * Should be used for buffers allocated with ceph_kvmalloc().
++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
++ * in-buffer (msg front).
++ *
++ * Dispose of @sgt with teardown_sgtable().
++ *
++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
++ * in cases where a single sg is sufficient. No attempt to reduce the
++ * number of sgs by squeezing physically contiguous pages together is
++ * made though, for simplicity.
++ */
++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
++ const void *buf, unsigned int buf_len)
++{
++ struct scatterlist *sg;
++ const bool is_vmalloc = is_vmalloc_addr(buf);
++ unsigned int off = offset_in_page(buf);
++ unsigned int chunk_cnt = 1;
++ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
++ int i;
++ int ret;
++
++ if (buf_len == 0) {
++ memset(sgt, 0, sizeof(*sgt));
++ return -EINVAL;
++ }
++
++ if (is_vmalloc) {
++ chunk_cnt = chunk_len >> PAGE_SHIFT;
++ chunk_len = PAGE_SIZE;
++ }
++
++ if (chunk_cnt > 1) {
++ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
++ if (ret)
++ return ret;
++ } else {
++ WARN_ON(chunk_cnt != 1);
++ sg_init_table(prealloc_sg, 1);
++ sgt->sgl = prealloc_sg;
++ sgt->nents = sgt->orig_nents = 1;
++ }
++
++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++ struct page *page;
++ unsigned int len = min(chunk_len - off, buf_len);
++
++ if (is_vmalloc)
++ page = vmalloc_to_page(buf);
++ else
++ page = virt_to_page(buf);
++
++ sg_set_page(sg, page, len, off);
++
++ off = 0;
++ buf += len;
++ buf_len -= len;
++ }
++ WARN_ON(buf_len != 0);
++
++ return 0;
++}
++
++static void teardown_sgtable(struct sg_table *sgt)
++{
++ if (sgt->orig_nents > 1)
++ sg_free_table(sgt);
++}
++
+ static int ceph_aes_encrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[2], sg_out[1];
++ struct scatterlist sg_in[2], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+
+ *dst_len = src_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], src, src_len);
+ sg_set_buf(&sg_in[1], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len)
+ {
+- struct scatterlist sg_in[3], sg_out[1];
++ struct scatterlist sg_in[3], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+
+ *dst_len = src1_len + src2_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 3);
+ sg_set_buf(&sg_in[0], src1, src1_len);
+ sg_set_buf(&sg_in[1], src2, src2_len);
+ sg_set_buf(&sg_in[2], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src1_len + src2_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt2 failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[2];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[2], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+- sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+- sg_set_buf(sg_in, src, src_len);
+ sg_set_buf(&sg_out[0], dst, *dst_len);
+ sg_set_buf(&sg_out[1], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst_len)
+@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt2(const void *key, int key_len,
+@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[3];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[3], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- sg_init_table(sg_in, 1);
+- sg_set_buf(sg_in, src, src_len);
+ sg_init_table(sg_out, 3);
+ sg_set_buf(&sg_out[0], dst1, *dst1_len);
+ sg_set_buf(&sg_out[1], dst2, *dst2_len);
+ sg_set_buf(&sg_out[2], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst1_len)
+@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ dst2, *dst2_len, 1);
+ */
+
+- return 0;
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
-index b2f571d..e6160e9 100644
+index 9f02369..e6160e9 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
@@ -102063,19 +101511,6 @@ index b2f571d..e6160e9 100644
s = addr_str[i];
switch (ss->ss_family) {
-@@ -292,7 +292,11 @@ int ceph_msgr_init(void)
- if (ceph_msgr_slab_init())
- return -ENOMEM;
-
-- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
-+ /*
-+ * The number of active work items is limited by the number of
-+ * connections, so leave @max_active at default.
-+ */
-+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
- if (ceph_msgr_wq)
- return 0;
-
diff --git a/net/compat.c b/net/compat.c
index bc8aeef..f9c070c 100644
--- a/net/compat.c
@@ -102316,7 +101751,7 @@ index cf8a95f..2837211 100644
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
-index cf999e0..c59a975 100644
+index cf999e0..c59a9754 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
@@ -103231,7 +102666,7 @@ index 255aa99..45c78f8 100644
break;
case NETDEV_DOWN:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index b10cd43a..22327f9 100644
+index 4a74ea8..32335a7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
@@ -103244,7 +102679,7 @@ index b10cd43a..22327f9 100644
return nh->nh_saddr;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
-index 6556263..db77807 100644
+index dd73bea..a2eec02 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
@@ -103415,7 +102850,7 @@ index 3d4da2c..40f9c29 100644
ICMP_PROT_UNREACH, 0);
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
-index 215af2b..73cbbe1 100644
+index c43a1e2..73cbbe1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
@@ -103427,39 +102862,6 @@ index 215af2b..73cbbe1 100644
kfree_skb(skb);
return -ENOMEM;
}
-@@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- struct sk_buff *nskb;
- struct sock *sk;
- struct inet_sock *inet;
-+ int err;
-
- if (ip_options_echo(&replyopts.opt.opt, skb))
- return;
-@@ -1572,8 +1573,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- sock_net_set(sk, net);
- __skb_queue_head_init(&sk->sk_write_queue);
- sk->sk_sndbuf = sysctl_wmem_default;
-- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
-- &ipc, &rt, MSG_DONTWAIT);
-+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
-+ len, 0, &ipc, &rt, MSG_DONTWAIT);
-+ if (unlikely(err)) {
-+ ip_flush_pending_frames(sk);
-+ goto out;
-+ }
-+
- nskb = skb_peek(&sk->sk_write_queue);
- if (nskb) {
- if (arg->csumoffset >= 0)
-@@ -1585,7 +1591,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
- ip_push_pending_frames(sk, &fl4);
- }
--
-+out:
- put_cpu_var(unicast_sock);
-
- ip_rt_put(rt);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5cb830c..81a7a56 100644
--- a/net/ipv4/ip_sockglue.c
@@ -103483,24 +102885,6 @@ index 5cb830c..81a7a56 100644
msg.msg_controllen = len;
msg.msg_flags = flags;
-diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
-index f4c987b..88c386c 100644
---- a/net/ipv4/ip_tunnel_core.c
-+++ b/net/ipv4/ip_tunnel_core.c
-@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
- skb_pull_rcsum(skb, hdr_len);
-
- if (inner_proto == htons(ETH_P_TEB)) {
-- struct ethhdr *eh = (struct ethhdr *)skb->data;
-+ struct ethhdr *eh;
-
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- return -ENOMEM;
-
-+ eh = (struct ethhdr *)skb->data;
- if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
- skb->protocol = eh->h_proto;
- else
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e453cb7..3c8d952 100644
--- a/net/ipv4/ip_vti.c
@@ -103814,7 +103198,7 @@ index 739db31..74f0210 100644
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index cbadb94..691f99e 100644
+index 29836f8..bd1e2ba 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
@@ -103867,7 +103251,7 @@ index cbadb94..691f99e 100644
}
EXPORT_SYMBOL(ip_idents_reserve);
-@@ -2623,34 +2623,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
+@@ -2624,34 +2624,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
@@ -103910,7 +103294,7 @@ index cbadb94..691f99e 100644
err_dup:
return -ENOMEM;
}
-@@ -2673,8 +2673,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
+@@ -2674,8 +2674,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
@@ -103921,7 +103305,7 @@ index cbadb94..691f99e 100644
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
return 0;
-@@ -2717,11 +2717,7 @@ int __init ip_rt_init(void)
+@@ -2718,11 +2718,7 @@ int __init ip_rt_init(void)
{
int rc = 0;
@@ -104068,7 +103452,7 @@ index a906e02..f3b6a0f 100644
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index cd17f00..1e1f252 100644
+index 3f49eae..bde687a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
@@ -104950,7 +104334,7 @@ index 0c56c93..ece50df 100644
struct ctl_table *ipv6_icmp_table;
int err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 29964c3..b8caecf 100644
+index 264c0f2..b6512c6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -105559,7 +104943,7 @@ index 4c5192e..04cc0d8 100644
suspend:
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
-index 8fdadfd..a4f72b8 100644
+index 6081329..ab23834 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
@@ -106178,10 +105562,10 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index c416725..bd22eea 100644
+index f1de72d..f983dcb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
-@@ -265,7 +265,7 @@ static void netlink_overrun(struct sock *sk)
+@@ -273,7 +273,7 @@ static void netlink_overrun(struct sock *sk)
sk->sk_error_report(sk);
}
}
@@ -106190,16 +105574,7 @@ index c416725..bd22eea 100644
}
static void netlink_rcv_wake(struct sock *sk)
-@@ -715,7 +715,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
- * after validation, the socket and the ring may only be used by a
- * single process, otherwise we fall back to copying.
- */
-- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
-+ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
- atomic_read(&nlk->mapped) > 1)
- excl = false;
-
-@@ -2996,7 +2996,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -3009,7 +3009,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
@@ -106853,6 +106228,87 @@ index fc04fe9..8167357 100644
linkwatch_fire_event(dev);
}
}
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index a88b852..f791edd 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1668,6 +1668,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
+ * ack chunk whose serial number matches that of the request.
+ */
+ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
++ if (sctp_chunk_pending(ack))
++ continue;
+ if (ack->subh.addip_hdr->serial == serial) {
+ sctp_chunk_hold(ack);
+ return ack;
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 0e85291..fb7976a 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
+ list_add(&cur_key->key_list, sh_keys);
+
+ cur_key->key = key;
+- sctp_auth_key_hold(key);
+-
+ return 0;
+ nomem:
+ if (!replace)
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 4de12af..7e8a16c 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ } else {
+ /* Nothing to do. Next chunk in the packet, please. */
+ ch = (sctp_chunkhdr_t *) chunk->chunk_end;
+-
+ /* Force chunk->skb->data to chunk->chunk_end. */
+- skb_pull(chunk->skb,
+- chunk->chunk_end - chunk->skb->data);
+-
+- /* Verify that we have at least chunk headers
+- * worth of buffer left.
+- */
+- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+- }
++ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
++ /* We are guaranteed to pull a SCTP header. */
+ }
+ }
+
+@@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
+ chunk->subh.v = NULL; /* Subheader is no longer valid. */
+
+- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
++ skb_tail_pointer(chunk->skb)) {
+ /* This is not a singleton */
+ chunk->singleton = 0;
+ } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+- /* RFC 2960, Section 6.10 Bundling
+- *
+- * Partial chunks MUST NOT be placed in an SCTP packet.
+- * If the receiver detects a partial chunk, it MUST drop
+- * the chunk.
+- *
+- * Since the end of the chunk is past the end of our buffer
+- * (which contains the whole packet, we can freely discard
+- * the whole packet.
+- */
+- sctp_chunk_free(chunk);
+- chunk = queue->in_progress = NULL;
+-
+- return NULL;
++ /* Discard inside state machine. */
++ chunk->pdiscard = 1;
++ chunk->chunk_end = skb_tail_pointer(chunk->skb);
+ } else {
+ /* We are at the end of the packet, so mark the chunk
+ * in case we need to send a SACK.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0e4198e..f94193e 100644
--- a/net/sctp/ipv6.c
@@ -106927,6 +106383,182 @@ index 6240834..cac4b52 100644
}
static int sctp_v4_protosw_init(void)
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index ae0e616..9f32741 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2609,6 +2609,9 @@ do_addr_param:
+ addr_param = param.v + sizeof(sctp_addip_param_t);
+
+ af = sctp_get_af_specific(param_type2af(param.p->type));
++ if (af == NULL)
++ break;
++
+ af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0);
+
+@@ -3110,50 +3113,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
+ return SCTP_ERROR_NO_ERROR;
+ }
+
+-/* Verify the ASCONF packet before we process it. */
+-int sctp_verify_asconf(const struct sctp_association *asoc,
+- struct sctp_paramhdr *param_hdr, void *chunk_end,
+- struct sctp_paramhdr **errp) {
+- sctp_addip_param_t *asconf_param;
++/* Verify the ASCONF packet before we process it. */
++bool sctp_verify_asconf(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, bool addr_param_needed,
++ struct sctp_paramhdr **errp)
++{
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
+ union sctp_params param;
+- int length, plen;
++ bool addr_param_seen = false;
++
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ size_t length = ntohs(param.p->length);
+
+- param.v = (sctp_paramhdr_t *) param_hdr;
+- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+- length = ntohs(param.p->length);
+ *errp = param.p;
+-
+- if (param.v > chunk_end - length ||
+- length < sizeof(sctp_paramhdr_t))
+- return 0;
+-
+ switch (param.p->type) {
++ case SCTP_PARAM_ERR_CAUSE:
++ break;
++ case SCTP_PARAM_IPV4_ADDRESS:
++ if (length != sizeof(sctp_ipv4addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
++ case SCTP_PARAM_IPV6_ADDRESS:
++ if (length != sizeof(sctp_ipv6addr_param_t))
++ return false;
++ addr_param_seen = true;
++ break;
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+- asconf_param = (sctp_addip_param_t *)param.v;
+- plen = ntohs(asconf_param->param_hdr.length);
+- if (plen < sizeof(sctp_addip_param_t) +
+- sizeof(sctp_paramhdr_t))
+- return 0;
++ /* In ASCONF chunks, these need to be first. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ length = ntohs(param.addip->param_hdr.length);
++ if (length < sizeof(sctp_addip_param_t) +
++ sizeof(sctp_paramhdr_t))
++ return false;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(sctp_addip_param_t))
+- return 0;
+-
++ return false;
+ break;
+ default:
+- break;
++ /* This is unkown to us, reject! */
++ return false;
+ }
+-
+- param.v += WORD_ROUND(length);
+ }
+
+- if (param.v != chunk_end)
+- return 0;
++ /* Remaining sanity checks. */
++ if (addr_param_needed && !addr_param_seen)
++ return false;
++ if (!addr_param_needed && addr_param_seen)
++ return false;
++ if (param.v != chunk->chunk_end)
++ return false;
+
+- return 1;
++ return true;
+ }
+
+ /* Process an incoming ASCONF chunk with the next expected serial no. and
+@@ -3162,16 +3178,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ struct sctp_chunk *asconf)
+ {
++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
++ bool all_param_pass = true;
++ union sctp_params param;
+ sctp_addiphdr_t *hdr;
+ union sctp_addr_param *addr_param;
+ sctp_addip_param_t *asconf_param;
+ struct sctp_chunk *asconf_ack;
+-
+ __be16 err_code;
+ int length = 0;
+ int chunk_len;
+ __u32 serial;
+- int all_param_pass = 1;
+
+ chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
+ hdr = (sctp_addiphdr_t *)asconf->skb->data;
+@@ -3199,9 +3216,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ goto done;
+
+ /* Process the TLVs contained within the ASCONF chunk. */
+- while (chunk_len > 0) {
++ sctp_walk_params(param, addip, addip_hdr.params) {
++ /* Skip preceeding address parameters. */
++ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
++ param.p->type == SCTP_PARAM_IPV6_ADDRESS)
++ continue;
++
+ err_code = sctp_process_asconf_param(asoc, asconf,
+- asconf_param);
++ param.addip);
+ /* ADDIP 4.1 A7)
+ * If an error response is received for a TLV parameter,
+ * all TLVs with no response before the failed TLV are
+@@ -3209,28 +3231,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
+ * the failed response are considered unsuccessful unless
+ * a specific success indication is present for the parameter.
+ */
+- if (SCTP_ERROR_NO_ERROR != err_code)
+- all_param_pass = 0;
+-
++ if (err_code != SCTP_ERROR_NO_ERROR)
++ all_param_pass = false;
+ if (!all_param_pass)
+- sctp_add_asconf_response(asconf_ack,
+- asconf_param->crr_id, err_code,
+- asconf_param);
++ sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
++ err_code, param.addip);
+
+ /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
+ * an IP address sends an 'Out of Resource' in its response, it
+ * MUST also fail any subsequent add or delete requests bundled
+ * in the ASCONF.
+ */
+- if (SCTP_ERROR_RSRC_LOW == err_code)
++ if (err_code == SCTP_ERROR_RSRC_LOW)
+ goto done;
+-
+- /* Move to the next ASCONF param. */
+- length = ntohs(asconf_param->param_hdr.length);
+- asconf_param = (void *)asconf_param + length;
+- chunk_len -= length;
+ }
+-
+ done:
+ asoc->peer.addip_serial++;
+
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fef2acd..c705c4f 100644
--- a/net/sctp/sm_sideeffect.c
@@ -106940,6 +106572,61 @@ index fef2acd..c705c4f 100644
NULL,
sctp_generate_t1_cookie_event,
sctp_generate_t1_init_event,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index c8f6063..3ee27b7 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
+ {
+ __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
+
++ /* Previously already marked? */
++ if (unlikely(chunk->pdiscard))
++ return 0;
+ if (unlikely(chunk_length < required_length))
+ return 0;
+
+@@ -3591,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_paramhdr *err_param = NULL;
+ sctp_addiphdr_t *hdr;
+- union sctp_addr_param *addr_param;
+ __u32 serial;
+- int length;
+
+ if (!sctp_vtag_verify(chunk, asoc)) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
+@@ -3618,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
+ hdr = (sctp_addiphdr_t *)chunk->skb->data;
+ serial = ntohl(hdr->serial);
+
+- addr_param = (union sctp_addr_param *)hdr->params;
+- length = ntohs(addr_param->p.length);
+- if (length < sizeof(sctp_paramhdr_t))
+- return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+- (void *)addr_param, commands);
+-
+ /* Verify the ASCONF chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)((void *)addr_param + length),
+- (void *)chunk->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
+@@ -3745,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
+ rcvd_serial = ntohl(addip_hdr->serial);
+
+ /* Verify the ASCONF-ACK chunk before processing it. */
+- if (!sctp_verify_asconf(asoc,
+- (sctp_paramhdr_t *)addip_hdr->params,
+- (void *)asconf_ack->chunk_end,
+- &err_param))
++ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
+ return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
+ (void *)err_param, commands);
+
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 634a2ab..8e93929 100644
--- a/net/sctp/socket.c
@@ -107375,10 +107062,10 @@ index 0663621..c4928d4 100644
goto out_nomem;
cd->u.procfs.channel_ent = NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
-index 488ddee..1b31487 100644
+index e0b94ce..6135813 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
-@@ -1425,7 +1425,9 @@ call_start(struct rpc_task *task)
+@@ -1428,7 +1428,9 @@ call_start(struct rpc_task *task)
(RPC_IS_ASYNC(task) ? "async" : "sync"));
/* Increment call count */
@@ -108617,6 +108304,45 @@ index 35d5a58..9e04789 100644
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 1395760..e4f4ac4 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -82,6 +82,16 @@ echo ""
+ fi
+
+ echo "%install"
++echo 'chmod -f 0500 /boot'
++echo 'if [ -d /lib/modules ]; then'
++echo 'chmod -f 0500 /lib/modules'
++echo 'fi'
++echo 'if [ -d /lib32/modules ]; then'
++echo 'chmod -f 0500 /lib32/modules'
++echo 'fi'
++echo 'if [ -d /lib64/modules ]; then'
++echo 'chmod -f 0500 /lib64/modules'
++echo 'fi'
+ echo 'KBUILD_IMAGE=$(make image_name)'
+ echo "%ifarch ia64"
+ echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
+@@ -139,7 +149,7 @@ echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm
+ echo "fi"
+ echo ""
+ echo "%files"
+-echo '%defattr (-, root, root)'
++echo '%defattr (400, root, root, 500)'
+ echo "%dir /lib/modules"
+ echo "/lib/modules/$KERNELRELEASE"
+ echo "%exclude /lib/modules/$KERNELRELEASE/build"
+@@ -152,7 +162,7 @@ echo '%defattr (-, root, root)'
+ echo "/usr/include"
+ echo ""
+ echo "%files devel"
+-echo '%defattr (-, root, root)'
++echo '%defattr (400, root, root, 500)'
+ echo "/usr/src/kernels/$KERNELRELEASE"
+ echo "/lib/modules/$KERNELRELEASE/build"
+ echo "/lib/modules/$KERNELRELEASE/source"
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 4718d78..9220d58 100644
--- a/scripts/pnmtologo.c
@@ -110026,7 +109752,7 @@ index a18f1fa..c9b9fc4 100644
lock = &avc_cache.slots_lock[hvalue];
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index b0e9404..b15da09 100644
+index e03bad5..b15da09 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -95,8 +95,6 @@
@@ -110038,22 +109764,6 @@ index b0e9404..b15da09 100644
/* SECMARK reference count */
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
-@@ -481,6 +479,7 @@ next_inode:
- list_entry(sbsec->isec_head.next,
- struct inode_security_struct, list);
- struct inode *inode = isec->inode;
-+ list_del_init(&isec->list);
- spin_unlock(&sbsec->isec_lock);
- inode = igrab(inode);
- if (inode) {
-@@ -489,7 +488,6 @@ next_inode:
- iput(inode);
- }
- spin_lock(&sbsec->isec_lock);
-- list_del_init(&isec->list);
- goto next_inode;
- }
- spin_unlock(&sbsec->isec_lock);
@@ -5772,7 +5770,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
#endif
@@ -110456,7 +110166,7 @@ index ada69d7..5f65386 100644
}
} else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
-index 102e8fd..7263bb8 100644
+index 2d957ba..fda022c 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
@@ -110983,7 +110693,7 @@ index 81c916a..516f0bf 100644
chip->pci = pci;
chip->irq = -1;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
-index d074aa9..ce3cc44 100644
+index a3e0a0d..ab98399 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,8 +2286,10 @@ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
@@ -118794,10 +118504,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..06374de
+index 0000000..f527934
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5866 @@
+@@ -0,0 +1,5911 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -119148,6 +118858,7 @@ index 0000000..06374de
+SyS_move_pages_3920 SyS_move_pages 2 3920 NULL
+hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
+brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL
++mite_bytes_written_to_memory_lb_3987 mite_bytes_written_to_memory_lb 0 3987 NULL
+copy_from_user_atomic_iovec_3990 copy_from_user_atomic_iovec 0-4 3990 NULL
+do_add_counters_3992 do_add_counters 3 3992 NULL
+userspace_status_4004 userspace_status 4 4004 NULL
@@ -119218,6 +118929,7 @@ index 0000000..06374de
+C_SYSC_setsockopt_4806 C_SYSC_setsockopt 5 4806 NULL
+repair_io_failure_4815 repair_io_failure 4-3 4815 NULL
+scsi_end_request_4839 scsi_end_request 3-4 4839 NULL
++comedi_buf_write_free_4847 comedi_buf_write_free 2 4847 NULL
+gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
+key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
+ocfs2_defrag_extent_4873 ocfs2_defrag_extent 2 4873 NULL
@@ -119354,6 +119066,7 @@ index 0000000..06374de
+fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
+SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
+ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
++xfs_do_div_6649 xfs_do_div 0-2 6649 NULL
+process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
+btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3 6696 NULL
+ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
@@ -119383,6 +119096,7 @@ index 0000000..06374de
+spi_show_regs_6911 spi_show_regs 3 6911 &proc_sessionid_read_6911 nohasharray
+acm_alloc_minor_6911 acm_alloc_minor 0 6911 &spi_show_regs_6911
+__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
++do_msgrcv_6921 do_msgrcv 3 6921 NULL
+cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
+ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
+qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
@@ -119475,6 +119189,7 @@ index 0000000..06374de
+qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 NULL
+venus_lookup_8121 venus_lookup 4 8121 NULL
+ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
++xfs_file_fallocate_8150 xfs_file_fallocate 3-4 8150 NULL
+ufshcd_wait_for_dev_cmd_8168 ufshcd_wait_for_dev_cmd 0 8168 NULL
+__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
+ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
@@ -119768,6 +119483,7 @@ index 0000000..06374de
+nouveau_gpio_create__11048 nouveau_gpio_create_ 4 11048 NULL
+tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
+insert_inline_extent_backref_11063 insert_inline_extent_backref 8 11063 NULL
++xfs_collapse_file_space_11075 xfs_collapse_file_space 2-3 11075 NULL
+tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
+count_argc_11083 count_argc 0 11083 NULL
+kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
@@ -119959,6 +119675,7 @@ index 0000000..06374de
+ufshcd_compose_upiu_13076 ufshcd_compose_upiu 0 13076 NULL
+xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
+ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL
++SyS_msgrcv_13109 SyS_msgrcv 3 13109 NULL
+snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
+bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
+blk_update_request_13146 blk_update_request 3 13146 NULL
@@ -119998,6 +119715,7 @@ index 0000000..06374de
+sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL
+data_read_13494 data_read 3 13494 NULL nohasharray
+ext_prop_data_store_13494 ext_prop_data_store 3 13494 &data_read_13494
++ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 0-2 13512 NULL
+core_status_13515 core_status 4 13515 NULL
+smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
+bm_init_13529 bm_init 2 13529 NULL
@@ -120086,6 +119804,7 @@ index 0000000..06374de
+snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
+ath10k_write_htt_stats_mask_14458 ath10k_write_htt_stats_mask 3 14458 NULL
+lustre_msg_size_v2_14470 lustre_msg_size_v2 0-1 14470 NULL
++dma_transfer_size_14473 dma_transfer_size 0 14473 NULL
+udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
+ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 NULL
+ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
@@ -120441,7 +120160,8 @@ index 0000000..06374de
+snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
+fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
+regset_tls_set_18459 regset_tls_set 4 18459 NULL
-+pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL
++pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL nohasharray
++mite_bytes_in_transit_18479 mite_bytes_in_transit 0 18479 &pci_vpd_lrdt_size_18479
+udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 NULL
+btrfs_fiemap_18501 btrfs_fiemap 3 18501 NULL
+__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
@@ -120539,6 +120259,7 @@ index 0000000..06374de
+ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
+batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL
+ttm_dma_page_pool_free_19527 ttm_dma_page_pool_free 2-0 19527 NULL
++cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 NULL
+nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
+gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
+ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
@@ -120708,6 +120429,7 @@ index 0000000..06374de
+cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
+fru_length_21257 fru_length 0 21257 NULL
+rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL
++ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
+drm_universal_plane_init_21296 drm_universal_plane_init 6 21296 NULL
+do_msg_fill_21307 do_msg_fill 3 21307 NULL
+add_res_range_21310 add_res_range 4 21310 NULL
@@ -120740,6 +120462,7 @@ index 0000000..06374de
+snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
+ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
+filemap_get_page_21606 filemap_get_page 2 21606 NULL
++ocfs2_refcount_cow_hunk_21630 ocfs2_refcount_cow_hunk 3-4 21630 NULL
+__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
+atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
+ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
@@ -120794,6 +120517,7 @@ index 0000000..06374de
+mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
+lov_setstripe_22307 lov_setstripe 2 22307 NULL
+udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
++C_SYSC_msgrcv_22320 C_SYSC_msgrcv 3 22320 NULL
+atomic_read_22342 atomic_read 0 22342 NULL
+ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL
+snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
@@ -120819,6 +120543,7 @@ index 0000000..06374de
+wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
+pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
+iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
++compat_SyS_msgrcv_22661 compat_SyS_msgrcv 3 22661 NULL
+l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 NULL
+bch_dump_read_22685 bch_dump_read 3 22685 NULL
+reg_umr_22686 reg_umr 5 22686 NULL
@@ -120854,8 +120579,10 @@ index 0000000..06374de
+remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
+viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
+cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
++ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 6-7 23029 NULL
+st_status_23032 st_status 5 23032 NULL
+nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
++comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL
+reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
+unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
+mei_cl_send_23068 mei_cl_send 3 23068 NULL
@@ -120966,6 +120693,7 @@ index 0000000..06374de
+trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
+adu_read_24177 adu_read 3 24177 NULL
+safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
++nv94_aux_24197 nv94_aux 3-6 24197 NULL
+ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
+tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
+pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL nohasharray
@@ -121020,6 +120748,7 @@ index 0000000..06374de
+simple_attr_read_24738 simple_attr_read 3 24738 NULL
+qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
+get_dma_residue_24749 get_dma_residue 0 24749 NULL
++ocfs2_cow_file_pos_24751 ocfs2_cow_file_pos 3 24751 NULL
+kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
+ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
+datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
@@ -121232,6 +120961,7 @@ index 0000000..06374de
+seq_read_27411 seq_read 3 27411 NULL
+ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL
+ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
++ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 3-4 27422 NULL
+cypress_write_27423 cypress_write 4 27423 NULL
+sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
+v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
@@ -121326,6 +121056,7 @@ index 0000000..06374de
+subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
+__videobuf_mmap_setup_28421 __videobuf_mmap_setup 0 28421 NULL
+ksocknal_alloc_tx_28426 ksocknal_alloc_tx 2 28426 NULL
++hid_hw_output_report_28429 hid_hw_output_report 0 28429 NULL
+mpage_readpages_28436 mpage_readpages 3 28436 NULL
+snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
+key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
@@ -121457,6 +121188,7 @@ index 0000000..06374de
+ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
+crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
+rtw_cfg80211_indicate_sta_assoc_29897 rtw_cfg80211_indicate_sta_assoc 3 29897 NULL
++nv94_gpio_intr_mask_29907 nv94_gpio_intr_mask 4-3 29907 NULL
+lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL
+write_file_queue_29922 write_file_queue 3 29922 NULL
+__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
@@ -121469,6 +121201,7 @@ index 0000000..06374de
+cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
+snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
+rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
++defragment_dma_buffer_30113 defragment_dma_buffer 0 30113 NULL
+spi_async_locked_30117 spi_async_locked 0 30117 NULL
+u_memcpya_30139 u_memcpya 3-2 30139 NULL
+dbg_port_buf_30145 dbg_port_buf 2 30145 NULL
@@ -121489,6 +121222,7 @@ index 0000000..06374de
+tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
+osc_contention_seconds_seq_write_30305 osc_contention_seconds_seq_write 3 30305 NULL
+ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
++i8254_read_30330 i8254_read 0 30330 NULL
+resource_from_user_30341 resource_from_user 3 30341 NULL
+o2nm_this_node_30342 o2nm_this_node 0 30342 NULL
+kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
@@ -121515,12 +121249,14 @@ index 0000000..06374de
+set_le_30581 set_le 4 30581 NULL
+blk_init_tags_30592 blk_init_tags 1 30592 NULL
+sgl_map_user_pages_30610 sgl_map_user_pages 2 30610 NULL
++SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
+macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
+ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL
+compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
+mlx5_ib_alloc_fast_reg_page_list_30638 mlx5_ib_alloc_fast_reg_page_list 2 30638 NULL
+SyS_listxattr_30647 SyS_listxattr 3 30647 NULL
+jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
++ni_ai_fifo_read_30681 ni_ai_fifo_read 3 30681 NULL
+sst_hsw_get_dsp_position_30691 sst_hsw_get_dsp_position 0 30691 NULL
+get_pages_alloc_iovec_30699 get_pages_alloc_iovec 3-0 30699 NULL
+dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
@@ -121908,6 +121644,7 @@ index 0000000..06374de
+__inode_permission_34925 __inode_permission 0 34925 &btrfs_super_chunk_root_34925
+sec_flags2str_34933 sec_flags2str 3 34933 NULL
+snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
++compat_SyS_kexec_load_34947 compat_SyS_kexec_load 2 34947 NULL
+do_add_page_to_bio_34974 do_add_page_to_bio 2-10 34974 NULL
+sdebug_change_qdepth_34994 sdebug_change_qdepth 2 34994 NULL
+rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
@@ -121963,6 +121700,7 @@ index 0000000..06374de
+ocfs2_write_zero_page_35539 ocfs2_write_zero_page 3 35539 NULL
+ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
+ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
++C_SYSC_kexec_load_35565 C_SYSC_kexec_load 2 35565 NULL
+ext4_blocks_for_truncate_35579 ext4_blocks_for_truncate 0 35579 NULL
+ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
+spk_msg_set_35586 spk_msg_set 3 35586 NULL
@@ -122213,7 +121951,8 @@ index 0000000..06374de
+_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
+nvkm_dmaobj_create__38250 nvkm_dmaobj_create_ 6 38250 NULL
+mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 NULL nohasharray
-+ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268
++ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &mthca_alloc_icm_table_38268 nohasharray
++SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 &ieee80211_if_read_auto_open_plinks_38268
+xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 NULL nohasharray
+xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 &xfs_bmbt_to_bmdr_38275
+ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
@@ -122224,6 +121963,7 @@ index 0000000..06374de
+__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
+usb_ext_prop_put_name_38352 usb_ext_prop_put_name 0-3 38352 NULL
+btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
++xfs_free_file_space_38383 xfs_free_file_space 2-3 38383 NULL
+dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
+ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
+pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
@@ -122380,7 +122120,7 @@ index 0000000..06374de
+ocrdma_dbgfs_ops_read_40232 ocrdma_dbgfs_ops_read 3 40232 NULL
+osst_read_40237 osst_read 3 40237 NULL
+lpage_info_slot_40243 lpage_info_slot 1-3 40243 NULL
-+ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4 40248 NULL
++ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4-3 40248 NULL
+of_get_child_count_40254 of_get_child_count 0 40254 NULL nohasharray
+fsl_edma_prep_dma_cyclic_40254 fsl_edma_prep_dma_cyclic 3-4 40254 &of_get_child_count_40254
+rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
@@ -122533,6 +122273,7 @@ index 0000000..06374de
+keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
+pci_map_single_41869 pci_map_single 0 41869 NULL
+usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
++v_APCI3120_InterruptDmaMoveBlock16bit_41914 v_APCI3120_InterruptDmaMoveBlock16bit 4 41914 NULL
+get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
+ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 4-3 41935 NULL
+sci_rxfill_41945 sci_rxfill 0 41945 NULL
@@ -122587,6 +122328,7 @@ index 0000000..06374de
+snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
+tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
+kuc_free_42455 kuc_free 2 42455 NULL
++cp2112_gpio_get_42467 cp2112_gpio_get 2 42467 NULL
+__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
+omfs_readpages_42490 omfs_readpages 4 42490 NULL
+bypass_write_42498 bypass_write 3 42498 NULL
@@ -122665,6 +122407,7 @@ index 0000000..06374de
+mmu_set_spte_43327 mmu_set_spte 7-6 43327 NULL
+__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
+xenfb_write_43412 xenfb_write 3 43412 NULL
++ext4_xattr_check_names_43422 ext4_xattr_check_names 0 43422 NULL
+__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
+usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
+ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
@@ -122685,6 +122428,7 @@ index 0000000..06374de
+handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
+lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
+proc_read_43614 proc_read 3 43614 NULL
++disable_dma_on_even_43618 disable_dma_on_even 0 43618 NULL
+alloc_thread_groups_43625 alloc_thread_groups 2 43625 NULL
+random_write_43656 random_write 3 43656 NULL
+bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
@@ -122820,7 +122564,7 @@ index 0000000..06374de
+cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL
+gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
+device_write_45156 device_write 3 45156 NULL nohasharray
-+ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3 45156 &device_write_45156
++ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 &device_write_45156
+tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
+sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
+snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 NULL nohasharray
@@ -123123,6 +122867,7 @@ index 0000000..06374de
+_iwl_dbgfs_bt_tx_prio_write_48473 _iwl_dbgfs_bt_tx_prio_write 3 48473 NULL
+ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
+r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 NULL
++ocfs2_refcount_cow_48495 ocfs2_refcount_cow 3 48495 NULL
+send_control_msg_48498 send_control_msg 6 48498 NULL
+count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
+diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
@@ -123151,6 +122896,7 @@ index 0000000..06374de
+atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
+azx_get_position_48841 azx_get_position 0 48841 NULL
+vc_do_resize_48842 vc_do_resize 3-4 48842 NULL
++comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL
+suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL
+viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 NULL nohasharray
+C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 &viafb_dvp1_proc_write_48864
@@ -123634,6 +123380,7 @@ index 0000000..06374de
+bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
+altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL nohasharray
+lustre_posix_acl_xattr_filter_54103 lustre_posix_acl_xattr_filter 2 54103 &altera_set_ir_pre_54103
++__comedi_buf_write_alloc_54112 __comedi_buf_write_alloc 0-2 54112 NULL
+strn_len_54122 strn_len 0 54122 NULL
+isku_receive_54130 isku_receive 4 54130 NULL
+isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
@@ -123851,6 +123598,7 @@ index 0000000..06374de
+journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
+snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
+vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
++mite_device_bytes_transferred_56355 mite_device_bytes_transferred 0 56355 NULL
+iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 0-4 56368 NULL
+dev_read_56369 dev_read 3 56369 NULL
+ath10k_read_simulate_fw_crash_56371 ath10k_read_simulate_fw_crash 3 56371 NULL
@@ -124022,6 +123770,7 @@ index 0000000..06374de
+ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
+ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
+iov_iter_npages_57979 iov_iter_npages 0-2 57979 NULL
++do_rx_dma_57996 do_rx_dma 5 57996 NULL
+rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
+iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
+io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
@@ -124050,6 +123799,7 @@ index 0000000..06374de
+lstcon_rpc_prep_58325 lstcon_rpc_prep 4 58325 NULL
+ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 3 58331 NULL
+__copy_from_user_swizzled_58337 __copy_from_user_swizzled 2-4 58337 NULL
++ec_i2c_parse_response_58347 ec_i2c_parse_response 0 58347 NULL
+brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 NULL
+il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
+_drbd_md_sync_page_io_58403 _drbd_md_sync_page_io 6 58403 NULL
@@ -124167,7 +123917,8 @@ index 0000000..06374de
+mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
+ioperm_get_59701 ioperm_get 4-3 59701 NULL
+prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
-+ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
++ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL nohasharray
++nv94_aux_mask_59740 nv94_aux_mask 2 59740 &ieee80211_if_read_fwded_unicast_59740
+qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
+strnlen_59746 strnlen 0 59746 NULL
+ext3_acl_count_59754 ext3_acl_count 0-1 59754 NULL
@@ -124311,6 +124062,7 @@ index 0000000..06374de
+f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
+debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
+system_enable_write_61396 system_enable_write 3 61396 NULL
++xfs_zero_remaining_bytes_61423 xfs_zero_remaining_bytes 3 61423 NULL
+unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
+snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
+btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
@@ -124365,6 +124117,7 @@ index 0000000..06374de
+il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
+squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
+fix_read_error_61965 fix_read_error 4 61965 NULL
++ocfs2_quota_write_61972 ocfs2_quota_write 4-5 61972 NULL
+fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
+cow_file_range_61979 cow_file_range 3 61979 NULL
+dequeue_event_62000 dequeue_event 3 62000 NULL
@@ -124511,7 +124264,8 @@ index 0000000..06374de
+mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
+copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
+C_SYSC_process_vm_readv_63811 C_SYSC_process_vm_readv 3-5 63811 NULL
-+regmap_multi_reg_write_63826 regmap_multi_reg_write 3 63826 NULL
++regmap_multi_reg_write_63826 regmap_multi_reg_write 3 63826 NULL nohasharray
++prepare_copy_63826 prepare_copy 2 63826 &regmap_multi_reg_write_63826
+sel_write_load_63830 sel_write_load 3 63830 NULL
+proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
+nv10_gpio_intr_mask_63862 nv10_gpio_intr_mask 4-3 63862 NULL
@@ -124613,7 +124367,8 @@ index 0000000..06374de
+isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
+regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL
+nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
-+rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
++rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL nohasharray
++nv_mask_64808 nv_mask 0 64808 &rfkill_fop_write_64808
+proc_projid_map_write_64810 proc_projid_map_write 3 64810 NULL
+megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
+ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
@@ -126135,44 +125890,6 @@ index 0a578fe..b81f62d 100644
0; \
})
-diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
-index 714b949..1f0dc1e 100644
---- a/virt/kvm/iommu.c
-+++ b/virt/kvm/iommu.c
-@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages);
-
- static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
-- unsigned long size)
-+ unsigned long npages)
- {
- gfn_t end_gfn;
- pfn_t pfn;
-
- pfn = gfn_to_pfn_memslot(slot, gfn);
-- end_gfn = gfn + (size >> PAGE_SHIFT);
-+ end_gfn = gfn + npages;
- gfn += 1;
-
- if (is_error_noslot_pfn(pfn))
-@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
- * Pin all pages we are about to map in memory. This is
- * important because we unmap and unpin in 4kb steps later.
- */
-- pfn = kvm_pin_pages(slot, gfn, page_size);
-+ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
- if (is_error_noslot_pfn(pfn)) {
- gfn += 1;
- continue;
-@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
- if (r) {
- printk(KERN_ERR "kvm_iommu_map_address:"
- "iommu failed to map pfn=%llx\n", pfn);
-- kvm_unpin_pages(kvm, pfn, page_size);
-+ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
- goto unmap_pages;
- }
-
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6a3f29b..a1d2e93 100644
--- a/virt/kvm/kvm_main.c
diff --git a/3.17.2/4425_grsec_remove_EI_PAX.patch b/3.17.3/4425_grsec_remove_EI_PAX.patch
index fc51f79..fc51f79 100644
--- a/3.17.2/4425_grsec_remove_EI_PAX.patch
+++ b/3.17.3/4425_grsec_remove_EI_PAX.patch
diff --git a/3.17.2/4427_force_XATTR_PAX_tmpfs.patch b/3.17.3/4427_force_XATTR_PAX_tmpfs.patch
index 21c0171..21c0171 100644
--- a/3.17.2/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.17.3/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.17.2/4430_grsec-remove-localversion-grsec.patch b/3.17.3/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.17.2/4430_grsec-remove-localversion-grsec.patch
+++ b/3.17.3/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.17.2/4435_grsec-mute-warnings.patch b/3.17.3/4435_grsec-mute-warnings.patch
index 4a959cc..4a959cc 100644
--- a/3.17.2/4435_grsec-mute-warnings.patch
+++ b/3.17.3/4435_grsec-mute-warnings.patch
diff --git a/3.17.2/4440_grsec-remove-protected-paths.patch b/3.17.3/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.17.2/4440_grsec-remove-protected-paths.patch
+++ b/3.17.3/4440_grsec-remove-protected-paths.patch
diff --git a/3.17.2/4450_grsec-kconfig-default-gids.patch b/3.17.3/4450_grsec-kconfig-default-gids.patch
index 8a63d7f..8a63d7f 100644
--- a/3.17.2/4450_grsec-kconfig-default-gids.patch
+++ b/3.17.3/4450_grsec-kconfig-default-gids.patch
diff --git a/3.17.2/4465_selinux-avc_audit-log-curr_ip.patch b/3.17.3/4465_selinux-avc_audit-log-curr_ip.patch
index 747ac53..747ac53 100644
--- a/3.17.2/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.17.3/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.17.2/4470_disable-compat_vdso.patch b/3.17.3/4470_disable-compat_vdso.patch
index dec59f7..dec59f7 100644
--- a/3.17.2/4470_disable-compat_vdso.patch
+++ b/3.17.3/4470_disable-compat_vdso.patch
diff --git a/3.17.2/4475_emutramp_default_on.patch b/3.17.3/4475_emutramp_default_on.patch
index cf88fd9..cf88fd9 100644
--- a/3.17.2/4475_emutramp_default_on.patch
+++ b/3.17.3/4475_emutramp_default_on.patch
diff --git a/3.2.64/0000_README b/3.2.64/0000_README
index 4dc0dd8..ebfeeef 100644
--- a/3.2.64/0000_README
+++ b/3.2.64/0000_README
@@ -174,7 +174,7 @@ Patch: 1063_linux-3.2.64.patch
From: http://www.kernel.org
Desc: Linux 3.2.64
-Patch: 4420_grsecurity-3.0-3.2.64-201411062032.patch
+Patch: 4420_grsecurity-3.0-3.2.64-201411150025.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.64/4420_grsecurity-3.0-3.2.64-201411062032.patch b/3.2.64/4420_grsecurity-3.0-3.2.64-201411150025.patch
index 7cb2c8e..c4ca76e 100644
--- a/3.2.64/4420_grsecurity-3.0-3.2.64-201411062032.patch
+++ b/3.2.64/4420_grsecurity-3.0-3.2.64-201411150025.patch
@@ -278,7 +278,7 @@ index 88fd7f5..b318a78 100644
==============================================================
diff --git a/Makefile b/Makefile
-index 2b58ffc..895bdb8 100644
+index 2b58ffc..6be5392 100644
--- a/Makefile
+++ b/Makefile
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -464,6 +464,15 @@ index 2b58ffc..895bdb8 100644
# Target to install modules
PHONY += modules_install
+@@ -1130,7 +1213,7 @@ _modinst_:
+ # boot a modules.dep even before / is mounted read-write. However the
+ # boot script depmod is the master version.
+ PHONY += _modinst_post
+-_modinst_post: _modinst_
++_modinst_post: include/config/kernel.release _modinst_
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modinst
+ $(call cmd,depmod)
+
@@ -1166,6 +1249,9 @@ MRPROPER_DIRS += include/config usr/include include/generated \
arch/*/include/generated
MRPROPER_FILES += .config .config.old .version .old_version \
@@ -483,7 +492,15 @@ index 2b58ffc..895bdb8 100644
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
-type f -print | xargs rm -f
-@@ -1363,6 +1449,8 @@ PHONY += $(module-dirs) modules
+@@ -1256,6 +1342,7 @@ help:
+ @echo ' gtags - Generate GNU GLOBAL index'
+ @echo ' kernelrelease - Output the release version string'
+ @echo ' kernelversion - Output the version stored in Makefile'
++ @echo ' image_name - Output the image name'
+ @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
+ echo ' (default: $(INSTALL_HDR_PATH))'; \
+ echo ''
+@@ -1363,6 +1450,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -492,7 +509,35 @@ index 2b58ffc..895bdb8 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1489,17 +1577,21 @@ else
+@@ -1449,7 +1538,7 @@ export_report:
+ endif #ifeq ($(config-targets),1)
+ endif #ifeq ($(mixed-targets),1)
+
+-PHONY += checkstack kernelrelease kernelversion
++PHONY += checkstack kernelrelease kernelversion image_name
+
+ # UML needs a little special treatment here. It wants to use the host
+ # toolchain, so needs $(SUBARCH) passed to checkstack.pl. Everyone
+@@ -1470,6 +1559,18 @@ kernelrelease:
+ kernelversion:
+ @echo $(KERNELVERSION)
+
++image_name:
++ @echo $(KBUILD_IMAGE)
++
++# Clear a bunch of variables before executing the submake
++tools/: FORCE
++ $(Q)mkdir -p $(objtree)/tools
++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
++
++tools/%: FORCE
++ $(Q)mkdir -p $(objtree)/tools
++ $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
++
+ # Single targets
+ # ---------------------------------------------------------------------------
+ # Single targets are compatible with:
+@@ -1489,17 +1590,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -518,7 +563,7 @@ index 2b58ffc..895bdb8 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1509,11 +1601,15 @@ endif
+@@ -1509,11 +1614,15 @@ endif
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
@@ -25149,7 +25194,7 @@ index 578b1c6..5a7039c 100644
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 2d7d0df..4476198 100644
+index 2d7d0df..1c1bd67 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1369,8 +1369,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -25228,6 +25273,15 @@ index 2d7d0df..4476198 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
+@@ -4846,7 +4857,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+
+ ++vcpu->stat.insn_emulation_fail;
+ trace_kvm_emulate_insn_failed(vcpu);
+- if (!is_guest_mode(vcpu)) {
++ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
@@ -5209,7 +5220,7 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask);
}
@@ -37699,7 +37753,7 @@ index 85661b0..cdd4560 100644
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
-index b97d4f0..7578a4d 100644
+index b97d4f0..86be331 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
@@ -37712,6 +37766,16 @@ index b97d4f0..7578a4d 100644
return -EINVAL;
r = kmalloc(sizeof(*r), GFP_KERNEL);
+@@ -1605,8 +1604,7 @@ static int dispatch_ioctl(struct client *client,
+ _IOC_SIZE(cmd) > sizeof(buffer))
+ return -ENOTTY;
+
+- if (_IOC_DIR(cmd) == _IOC_READ)
+- memset(&buffer, 0, _IOC_SIZE(cmd));
++ memset(&buffer, 0, sizeof(buffer));
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 1f3dd51..1ad071c 100644
--- a/drivers/firewire/core-device.c
@@ -43444,6 +43508,20 @@ index 0564192..75b16f5 100644
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
+diff --git a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+index 21260aa..852870b 100644
+--- a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
++++ b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+@@ -154,6 +154,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
++ if (cmd->msg_len > sizeof(b) - 4)
++ return -EINVAL;
++
+ memcpy(&b[4], cmd->msg, cmd->msg_len);
+
+ state->config->send_command(fe, 0x72,
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 16a089f..1661b11 100644
--- a/drivers/media/radio/radio-cadet.c
@@ -49818,6 +49896,58 @@ index 66a34ad..65f6aea 100644
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
+diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
+index 851b762..9cdf4528 100644
+--- a/drivers/staging/line6/driver.c
++++ b/drivers/staging/line6/driver.c
+@@ -551,7 +551,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ {
+ struct usb_device *usbdev = line6->usbdev;
+ int ret;
+- unsigned char len;
++ unsigned char *plen;
+
+ /* query the serial number: */
+ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
+@@ -564,27 +564,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
+ return ret;
+ }
+
++ plen = kmalloc(1, GFP_KERNEL);
++ if (plen == NULL)
++ return -ENOMEM;
++
+ /* Wait for data length. We'll get a couple of 0xff until length arrives. */
+ do {
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE |
+ USB_DIR_IN,
+- 0x0012, 0x0000, &len, 1,
++ 0x0012, 0x0000, plen, 1,
+ LINE6_TIMEOUT * HZ);
+ if (ret < 0) {
+ dev_err(line6->ifcdev,
+ "receive length failed (error %d)\n", ret);
++ kfree(plen);
+ return ret;
+ }
+- } while (len == 0xff);
++ } while (*plen == 0xff);
+
+- if (len != datalen) {
++ if (*plen != datalen) {
+ /* should be equal or something went wrong */
+ dev_err(line6->ifcdev,
+ "length mismatch (expected %d, got %d)\n",
+- (int)datalen, (int)len);
++ (int)datalen, (int)*plen);
++ kfree(plen);
+ return -EINVAL;
+ }
++ kfree(plen);
+
+ /* receive the result: */
+ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
diff --git a/drivers/staging/media/solo6x10/g723.c b/drivers/staging/media/solo6x10/g723.c
index 2cd0de2..0169c04 100644
--- a/drivers/staging/media/solo6x10/g723.c
@@ -84966,7 +85096,7 @@ index c7c40f1..5c31482 100644
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
-index d61febf..f0094f6 100644
+index d61febfb..f0094f6 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
@@ -101583,6 +101713,323 @@ index ba873c3..3b00036 100644
if (!can_dir) {
printk(KERN_INFO "can: failed to create /proc/net/can . "
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index 85f3bc0..21e777b 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+
++/*
++ * Should be used for buffers allocated with ceph_kvmalloc().
++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
++ * in-buffer (msg front).
++ *
++ * Dispose of @sgt with teardown_sgtable().
++ *
++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
++ * in cases where a single sg is sufficient. No attempt to reduce the
++ * number of sgs by squeezing physically contiguous pages together is
++ * made though, for simplicity.
++ */
++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
++ const void *buf, unsigned int buf_len)
++{
++ struct scatterlist *sg;
++ const bool is_vmalloc = is_vmalloc_addr(buf);
++ unsigned int off = offset_in_page(buf);
++ unsigned int chunk_cnt = 1;
++ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
++ int i;
++ int ret;
++
++ if (buf_len == 0) {
++ memset(sgt, 0, sizeof(*sgt));
++ return -EINVAL;
++ }
++
++ if (is_vmalloc) {
++ chunk_cnt = chunk_len >> PAGE_SHIFT;
++ chunk_len = PAGE_SIZE;
++ }
++
++ if (chunk_cnt > 1) {
++ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
++ if (ret)
++ return ret;
++ } else {
++ WARN_ON(chunk_cnt != 1);
++ sg_init_table(prealloc_sg, 1);
++ sgt->sgl = prealloc_sg;
++ sgt->nents = sgt->orig_nents = 1;
++ }
++
++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
++ struct page *page;
++ unsigned int len = min(chunk_len - off, buf_len);
++
++ if (is_vmalloc)
++ page = vmalloc_to_page(buf);
++ else
++ page = virt_to_page(buf);
++
++ sg_set_page(sg, page, len, off);
++
++ off = 0;
++ buf += len;
++ buf_len -= len;
++ }
++ WARN_ON(buf_len != 0);
++
++ return 0;
++}
++
++static void teardown_sgtable(struct sg_table *sgt)
++{
++ if (sgt->orig_nents > 1)
++ sg_free_table(sgt);
++}
++
+ static int ceph_aes_encrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[2], sg_out[1];
++ struct scatterlist sg_in[2], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+
+ *dst_len = src_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], src, src_len);
+ sg_set_buf(&sg_in[1], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len)
+ {
+- struct scatterlist sg_in[3], sg_out[1];
++ struct scatterlist sg_in[3], prealloc_sg;
++ struct sg_table sg_out;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+ int ret;
+@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+
+ *dst_len = src1_len + src2_len + zero_padding;
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ sg_init_table(sg_in, 3);
+ sg_set_buf(&sg_in[0], src1, src1_len);
+ sg_set_buf(&sg_in[1], src2, src2_len);
+ sg_set_buf(&sg_in[2], pad, zero_padding);
+- sg_init_table(sg_out, 1);
+- sg_set_buf(sg_out, dst, *dst_len);
++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
++
+ /*
+ print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+ key, key_len, 1);
+@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+ pad, zero_padding, 1);
+ */
+- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
+ src1_len + src2_len + zero_padding);
+- crypto_free_blkcipher(tfm);
+- if (ret < 0)
++ if (ret < 0) {
+ pr_err("ceph_aes_crypt2 failed %d\n", ret);
++ goto out_sg;
++ }
+ /*
+ print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_out);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[2];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[2], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- crypto_blkcipher_setkey((void *)tfm, key, key_len);
+- sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+- sg_set_buf(sg_in, src, src_len);
+ sg_set_buf(&sg_out[0], dst, *dst_len);
+ sg_set_buf(&sg_out[1], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
++ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst_len)
+@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+ dst, *dst_len, 1);
+ */
+- return 0;
++
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+ static int ceph_aes_decrypt2(const void *key, int key_len,
+@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len)
+ {
+- struct scatterlist sg_in[1], sg_out[3];
++ struct sg_table sg_in;
++ struct scatterlist sg_out[3], prealloc_sg;
+ struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
+ struct blkcipher_desc desc = { .tfm = tfm };
+ char pad[16];
+@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+- sg_init_table(sg_in, 1);
+- sg_set_buf(sg_in, src, src_len);
+ sg_init_table(sg_out, 3);
+ sg_set_buf(&sg_out[0], dst1, *dst1_len);
+ sg_set_buf(&sg_out[1], dst2, *dst2_len);
+ sg_set_buf(&sg_out[2], pad, sizeof(pad));
++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++ if (ret)
++ goto out_tfm;
+
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
+ iv = crypto_blkcipher_crt(tfm)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm);
+-
+ memcpy(iv, aes_iv, ivsize);
+
+ /*
+@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
+ src, src_len, 1);
+ */
+-
+- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
+- crypto_free_blkcipher(tfm);
++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+ if (ret < 0) {
+ pr_err("ceph_aes_decrypt failed %d\n", ret);
+- return ret;
++ goto out_sg;
+ }
+
+ if (src_len <= *dst1_len)
+@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
+ dst2, *dst2_len, 1);
+ */
+
+- return 0;
++out_sg:
++ teardown_sgtable(&sg_in);
++out_tfm:
++ crypto_free_blkcipher(tfm);
++ return ret;
+ }
+
+
diff --git a/net/compat.c b/net/compat.c
index 759e542..7cf6606 100644
--- a/net/compat.c
@@ -106843,6 +107290,19 @@ index 7635107..4670276 100644
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 333926d..53d455c 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -866,8 +866,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
+ list_add(&cur_key->key_list, sh_keys);
+
+ cur_key->key = key;
+- sctp_auth_key_hold(key);
+-
+ return 0;
+ nomem:
+ if (!replace)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0b6a391..febcef2 100644
--- a/net/sctp/ipv6.c
@@ -106953,6 +107413,20 @@ index de35e01..ef925b0 100644
}
static int sctp_v4_protosw_init(void)
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index d8d4704..c40952c 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2570,6 +2570,9 @@ do_addr_param:
+ addr_param = param.v + sizeof(sctp_addip_param_t);
+
+ af = sctp_get_af_specific(param_type2af(param.p->type));
++ if (af == NULL)
++ break;
++
+ af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0);
+
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 76388b0..a967f68 100644
--- a/net/sctp/sm_sideeffect.c
@@ -108846,6 +109320,19 @@ index 1ac414f..38575f7 100644
- $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
+ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 5d986d9..7c8f0e8 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -63,7 +63,7 @@ multi-objs := $(multi-objs-y) $(multi-objs-m)
+ subdir-obj-y := $(filter %/built-in.o, $(obj-y))
+
+ # $(obj-dirs) is a list of directories that contain object files
+-obj-dirs := $(dir $(multi-objs) $(subdir-obj-y))
++obj-dirs := $(dir $(multi-objs) $(obj-y))
+
+ # Replace multi-part objects by their individual parts, look at local dir only
+ real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index cb1f50c..cef2a7c 100644
--- a/scripts/basic/fixdep.c
@@ -109172,6 +109659,121 @@ index bee55f6..4108c4b 100644
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 4bf17dd..e4f4ac4 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -1,7 +1,7 @@
+ #!/bin/sh
+ #
+-# Output a simple RPM spec file that uses no fancy features requiring
+-# RPM v4. This is intended to work with any RPM distro.
++# Output a simple RPM spec file.
++# This version assumes a minimum of RPM 4.0.3.
+ #
+ # The only gothic bit here is redefining install_post to avoid
+ # stripping the symbols from files in the kernel which we want
+@@ -59,6 +59,14 @@ echo "header files define structures and constants that are needed for"
+ echo "building most standard programs and are also needed for rebuilding the"
+ echo "glibc package."
+ echo ""
++echo "%package devel"
++echo "Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel"
++echo "Group: System Environment/Kernel"
++echo "AutoReqProv: no"
++echo "%description -n kernel-devel"
++echo "This package provides kernel headers and makefiles sufficient to build modules"
++echo "against the $__KERNELRELEASE kernel package."
++echo ""
+
+ if ! $PREBUILT; then
+ echo "%prep"
+@@ -74,15 +82,27 @@ echo ""
+ fi
+
+ echo "%install"
++echo 'chmod -f 0500 /boot'
++echo 'if [ -d /lib/modules ]; then'
++echo 'chmod -f 0500 /lib/modules'
++echo 'fi'
++echo 'if [ -d /lib32/modules ]; then'
++echo 'chmod -f 0500 /lib32/modules'
++echo 'fi'
++echo 'if [ -d /lib64/modules ]; then'
++echo 'chmod -f 0500 /lib64/modules'
++echo 'fi'
++echo 'KBUILD_IMAGE=$(make image_name)'
+ echo "%ifarch ia64"
+ echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
+-echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
+ echo "%else"
+ echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
+-echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
+ echo "%endif"
++echo 'mkdir -p $RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
+
+-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
++echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= mod-fw= modules_install'
++echo 'INSTALL_FW_PATH=$RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
++echo 'make INSTALL_FW_PATH=$INSTALL_FW_PATH' firmware_install
+ echo "%ifarch ia64"
+ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
+ echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
+@@ -95,7 +115,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
+ echo "%endif"
+ echo "%endif"
+
+-echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
++echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr KBUILD_SRC= headers_install'
+ echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
+
+ echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
+@@ -107,18 +127,43 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
+ echo 'mv vmlinux.orig vmlinux'
+ echo "%endif"
+
++echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
++echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
++echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
++echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
++echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
++echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
++echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
++
+ echo ""
+ echo "%clean"
+ echo 'rm -rf $RPM_BUILD_ROOT'
+ echo ""
++echo "%post"
++echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
++echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
++echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
++echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
++echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
++echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
++echo "fi"
++echo ""
+ echo "%files"
+-echo '%defattr (-, root, root)'
++echo '%defattr (400, root, root, 500)'
+ echo "%dir /lib/modules"
+ echo "/lib/modules/$KERNELRELEASE"
+-echo "/lib/firmware"
++echo "%exclude /lib/modules/$KERNELRELEASE/build"
++echo "%exclude /lib/modules/$KERNELRELEASE/source"
++echo "/lib/firmware/$KERNELRELEASE"
+ echo "/boot/*"
+ echo ""
+ echo "%files headers"
+ echo '%defattr (-, root, root)'
+ echo "/usr/include"
+ echo ""
++echo "%files devel"
++echo '%defattr (400, root, root, 500)'
++echo "/usr/src/kernels/$KERNELRELEASE"
++echo "/lib/modules/$KERNELRELEASE/build"
++echo "/lib/modules/$KERNELRELEASE/source"
++echo ""
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 5c11312..72742b5 100644
--- a/scripts/pnmtologo.c