summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.8.7/0000_README (renamed from 4.8.6/0000_README)6
-rw-r--r--4.8.7/1001_linux-4.8.2.patch (renamed from 4.8.6/1001_linux-4.8.2.patch)0
-rw-r--r--4.8.7/1002_linux-4.8.3.patch (renamed from 4.8.6/1002_linux-4.8.3.patch)0
-rw-r--r--4.8.7/1003_linux-4.8.4.patch (renamed from 4.8.6/1003_linux-4.8.4.patch)0
-rw-r--r--4.8.7/1004_linux-4.8.5.patch (renamed from 4.8.6/1004_linux-4.8.5.patch)0
-rw-r--r--4.8.7/1005_linux-4.8.6.patch (renamed from 4.8.6/1005_linux-4.8.6.patch)0
-rw-r--r--4.8.7/1006_linux-4.8.7.patch4331
-rw-r--r--4.8.7/4420_grsecurity-3.1-4.8.7-201611102210.patch (renamed from 4.8.6/4420_grsecurity-3.1-4.8.6-201611091800.patch)264
-rw-r--r--4.8.7/4425_grsec_remove_EI_PAX.patch (renamed from 4.8.6/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.8.7/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.8.6/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.8.7/4430_grsec-remove-localversion-grsec.patch (renamed from 4.8.6/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.8.7/4435_grsec-mute-warnings.patch (renamed from 4.8.6/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.8.7/4440_grsec-remove-protected-paths.patch (renamed from 4.8.6/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.8.7/4450_grsec-kconfig-default-gids.patch (renamed from 4.8.6/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.8.7/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.8.6/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.8.7/4470_disable-compat_vdso.patch (renamed from 4.8.6/4470_disable-compat_vdso.patch)0
-rw-r--r--4.8.7/4475_emutramp_default_on.patch (renamed from 4.8.6/4475_emutramp_default_on.patch)0
17 files changed, 4501 insertions, 100 deletions
diff --git a/4.8.6/0000_README b/4.8.7/0000_README
index 11337e2..81b2200 100644
--- a/4.8.6/0000_README
+++ b/4.8.7/0000_README
@@ -22,7 +22,11 @@ Patch: 1005_linux-4.8.6.patch
From: http://www.kernel.org
Desc: Linux 4.8.6
-Patch: 4420_grsecurity-3.1-4.8.6-201611091800.patch
+Patch: 1006_linux-4.8.7.patch
+From: http://www.kernel.org
+Desc: Linux 4.8.7
+
+Patch: 4420_grsecurity-3.1-4.8.7-201611102210.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.8.6/1001_linux-4.8.2.patch b/4.8.7/1001_linux-4.8.2.patch
index 5e354b2..5e354b2 100644
--- a/4.8.6/1001_linux-4.8.2.patch
+++ b/4.8.7/1001_linux-4.8.2.patch
diff --git a/4.8.6/1002_linux-4.8.3.patch b/4.8.7/1002_linux-4.8.3.patch
index 4b4c68a..4b4c68a 100644
--- a/4.8.6/1002_linux-4.8.3.patch
+++ b/4.8.7/1002_linux-4.8.3.patch
diff --git a/4.8.6/1003_linux-4.8.4.patch b/4.8.7/1003_linux-4.8.4.patch
index b326925..b326925 100644
--- a/4.8.6/1003_linux-4.8.4.patch
+++ b/4.8.7/1003_linux-4.8.4.patch
diff --git a/4.8.6/1004_linux-4.8.5.patch b/4.8.7/1004_linux-4.8.5.patch
index b4a1ae0..b4a1ae0 100644
--- a/4.8.6/1004_linux-4.8.5.patch
+++ b/4.8.7/1004_linux-4.8.5.patch
diff --git a/4.8.6/1005_linux-4.8.6.patch b/4.8.7/1005_linux-4.8.6.patch
index 641ba27..641ba27 100644
--- a/4.8.6/1005_linux-4.8.6.patch
+++ b/4.8.7/1005_linux-4.8.6.patch
diff --git a/4.8.7/1006_linux-4.8.7.patch b/4.8.7/1006_linux-4.8.7.patch
new file mode 100644
index 0000000..d96f1e2
--- /dev/null
+++ b/4.8.7/1006_linux-4.8.7.patch
@@ -0,0 +1,4331 @@
+diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
+index e5b6497..c75b64a 100644
+--- a/Documentation/device-mapper/dm-raid.txt
++++ b/Documentation/device-mapper/dm-raid.txt
+@@ -309,3 +309,4 @@ Version History
+ with a reshape in progress.
+ 1.9.0 Add support for RAID level takeover/reshape/region size
+ and set size reduction.
++1.9.1 Fix activation of existing RAID 4/10 mapped devices
+diff --git a/Makefile b/Makefile
+index b249529..4d0f28c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
+index b3df1c6..386eee6 100644
+--- a/arch/arm/boot/dts/ste-snowball.dts
++++ b/arch/arm/boot/dts/ste-snowball.dts
+@@ -239,14 +239,25 @@
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <100000000>;
+ bus-width = <4>;
++ cap-sd-highspeed;
+ cap-mmc-highspeed;
++ sd-uhs-sdr12;
++ sd-uhs-sdr25;
++ /* All direction control is used */
++ st,sig-dir-cmd;
++ st,sig-dir-dat0;
++ st,sig-dir-dat2;
++ st,sig-dir-dat31;
++ st,sig-pin-fbclk;
++ full-pwr-cycle;
+ vmmc-supply = <&ab8500_ldo_aux3_reg>;
+ vqmmc-supply = <&vmmci>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdi0_default_mode>;
+ pinctrl-1 = <&sdi0_sleep_mode>;
+
+- cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
++ /* GPIO218 MMC_CD */
++ cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
+
+ status = "okay";
+ };
+@@ -549,7 +560,7 @@
+ /* VMMCI level-shifter enable */
+ snowball_cfg3 {
+ pins = "GPIO217_AH12";
+- ste,config = <&gpio_out_lo>;
++ ste,config = <&gpio_out_hi>;
+ };
+ /* VMMCI level-shifter voltage select */
+ snowball_cfg4 {
+diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
+index f9b6bd3..541647f 100644
+--- a/arch/arm/mach-mvebu/Kconfig
++++ b/arch/arm/mach-mvebu/Kconfig
+@@ -23,6 +23,7 @@ config MACH_MVEBU_V7
+ select CACHE_L2X0
+ select ARM_CPU_SUSPEND
+ select MACH_MVEBU_ANY
++ select MVEBU_CLK_COREDIV
+
+ config MACH_ARMADA_370
+ bool "Marvell Armada 370 boards"
+@@ -32,7 +33,6 @@ config MACH_ARMADA_370
+ select CPU_PJ4B
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_370
+- select MVEBU_CLK_COREDIV
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 370 SoC with device tree.
+@@ -50,7 +50,6 @@ config MACH_ARMADA_375
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_375
+- select MVEBU_CLK_COREDIV
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 375 SoC with device tree.
+@@ -68,7 +67,6 @@ config MACH_ARMADA_38X
+ select HAVE_SMP
+ select MACH_MVEBU_V7
+ select PINCTRL_ARMADA_38X
+- select MVEBU_CLK_COREDIV
+ help
+ Say 'Y' here if you want your kernel to support boards based
+ on the Marvell Armada 380/385 SoC with device tree.
+diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
+index 6d8e8e3..4cdfab3 100644
+--- a/arch/arm/mm/abort-lv4t.S
++++ b/arch/arm/mm/abort-lv4t.S
+@@ -7,7 +7,7 @@
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
+ *
+- * Returns : r4-r5, r10-r11, r13 preserved
++ * Returns : r4-r5, r9-r11, r13 preserved
+ *
+ * Purpose : obtain information about current aborted instruction.
+ * Note: we read user space. This means we might cause a data
+@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
+ /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+ /* d */ b do_DataAbort @ ldc rd, [rn, #m]
+ /* e */ b .data_unknown
+-/* f */
++/* f */ b .data_unknown
++
++.data_unknown_r9:
++ ldr r9, [sp], #4
+ .data_unknown: @ Part of jumptable
+ mov r0, r4
+ mov r1, r8
+@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
+ .data_arm_ldmstm:
+ tst r8, #1 << 21 @ check writeback bit
+ beq do_DataAbort @ no writeback -> no fixup
++ str r9, [sp, #-4]!
+ mov r7, #0x11
+ orr r7, r7, #0x1100
+ and r6, r8, r7
+@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
+ subne r7, r7, r6, lsl #2 @ Undo increment
+ addeq r7, r7, r6, lsl #2 @ Undo decrement
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
++ ldr r9, [sp], #4
+ b do_DataAbort
+
+ .data_arm_lateldrhpre:
+ tst r8, #1 << 21 @ Check writeback bit
+ beq do_DataAbort @ No writeback -> no fixup
+ .data_arm_lateldrhpost:
++ str r9, [sp, #-4]!
+ and r9, r8, #0x00f @ get Rm / low nibble of immediate value
+ tst r8, #1 << 22 @ if (immediate offset)
+ andne r6, r8, #0xf00 @ { immediate high nibble
+@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
+ subne r7, r7, r6 @ Undo incrmenet
+ addeq r7, r7, r6 @ Undo decrement
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
++ ldr r9, [sp], #4
+ b do_DataAbort
+
+ .data_arm_lateldrpreconst:
+@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
+ .data_arm_lateldrpostconst:
+ movs r6, r8, lsl #20 @ Get offset
+ beq do_DataAbort @ zero -> no fixup
++ str r9, [sp, #-4]!
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
+ tst r8, #1 << 23 @ Check U bit
+ subne r7, r7, r6, lsr #20 @ Undo increment
+ addeq r7, r7, r6, lsr #20 @ Undo decrement
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
++ ldr r9, [sp], #4
+ b do_DataAbort
+
+ .data_arm_lateldrprereg:
+@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
+ .data_arm_lateldrpostreg:
+ and r7, r8, #15 @ Extract 'm' from instruction
+ ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
++ str r9, [sp, #-4]!
+ mov r9, r8, lsr #7 @ get shift count
+ ands r9, r9, #31
+ and r7, r8, #0x70 @ get shift type
+@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
+ b .data_arm_apply_r6_and_rn
+ b .data_arm_apply_r6_and_rn @ 1: LSL #0
+ nop
+- b .data_unknown @ 2: MUL?
++ b .data_unknown_r9 @ 2: MUL?
+ nop
+- b .data_unknown @ 3: MUL?
++ b .data_unknown_r9 @ 3: MUL?
+ nop
+ mov r6, r6, lsr r9 @ 4: LSR #!0
+ b .data_arm_apply_r6_and_rn
+ mov r6, r6, lsr #32 @ 5: LSR #32
+ b .data_arm_apply_r6_and_rn
+- b .data_unknown @ 6: MUL?
++ b .data_unknown_r9 @ 6: MUL?
+ nop
+- b .data_unknown @ 7: MUL?
++ b .data_unknown_r9 @ 7: MUL?
+ nop
+ mov r6, r6, asr r9 @ 8: ASR #!0
+ b .data_arm_apply_r6_and_rn
+ mov r6, r6, asr #32 @ 9: ASR #32
+ b .data_arm_apply_r6_and_rn
+- b .data_unknown @ A: MUL?
++ b .data_unknown_r9 @ A: MUL?
+ nop
+- b .data_unknown @ B: MUL?
++ b .data_unknown_r9 @ B: MUL?
+ nop
+ mov r6, r6, ror r9 @ C: ROR #!0
+ b .data_arm_apply_r6_and_rn
+ mov r6, r6, rrx @ D: RRX
+ b .data_arm_apply_r6_and_rn
+- b .data_unknown @ E: MUL?
++ b .data_unknown_r9 @ E: MUL?
+ nop
+- b .data_unknown @ F: MUL?
++ b .data_unknown_r9 @ F: MUL?
+
+ .data_thumb_abort:
+ ldrh r8, [r4] @ read instruction
+@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
+ .data_thumb_pushpop:
+ tst r8, #1 << 10
+ beq .data_unknown
++ str r9, [sp, #-4]!
+ and r6, r8, #0x55 @ hweight8(r8) + R bit
+ and r9, r8, #0xaa
+ add r6, r6, r9, lsr #1
+@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
+ addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
+ subne r7, r7, r6, lsl #2 @ decrement SP if POP
+ str r7, [r2, #13 << 2]
++ ldr r9, [sp], #4
+ b do_DataAbort
+
+ .data_thumb_ldmstm:
++ str r9, [sp, #-4]!
+ and r6, r8, #0x55 @ hweight8(r8)
+ and r9, r8, #0xaa
+ add r6, r6, r9, lsr #1
+@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
+ and r6, r6, #15 @ number of regs to transfer
+ sub r7, r7, r6, lsl #2 @ always decrement
+ str r7, [r2, r9, lsr #6]
++ ldr r9, [sp], #4
+ b do_DataAbort
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+index da31bbb..3992718 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+@@ -131,7 +131,7 @@
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ cell-index = <1>;
+- clocks = <&cpm_syscon0 0 3>;
++ clocks = <&cpm_syscon0 1 21>;
+ status = "disabled";
+ };
+
+diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
+index b408fe6..3cef068 100644
+--- a/arch/h8300/include/asm/thread_info.h
++++ b/arch/h8300/include/asm/thread_info.h
+@@ -31,7 +31,6 @@ struct thread_info {
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ mm_segment_t addr_limit;
+- struct restart_block restart_block;
+ };
+
+ /*
+@@ -44,9 +43,6 @@ struct thread_info {
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+- .restart_block = { \
+- .fn = do_no_restart_syscall, \
+- }, \
+ }
+
+ #define init_thread_info (init_thread_union.thread_info)
+diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
+index ad1f81f..7138303 100644
+--- a/arch/h8300/kernel/signal.c
++++ b/arch/h8300/kernel/signal.c
+@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
+ unsigned int er0;
+
+ /* Always make any pending restarted system calls return -EINTR */
+- current_thread_info()->restart_block.fn = do_no_restart_syscall;
++ current->restart_block.fn = do_no_restart_syscall;
+
+ /* restore passed registers */
+ #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index b54bcad..45799ef 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -279,7 +279,10 @@ struct kvm_vcpu_arch {
+ /* Host KSEG0 address of the EI/DI offset */
+ void *kseg0_commpage;
+
+- u32 io_gpr; /* GPR used as IO source/target */
++ /* Resume PC after MMIO completion */
++ unsigned long io_pc;
++ /* GPR used as IO source/target */
++ u32 io_gpr;
+
+ struct hrtimer comparecount_timer;
+ /* Count timer control KVM register */
+@@ -301,8 +304,6 @@ struct kvm_vcpu_arch {
+ /* Bitmask of pending exceptions to be cleared */
+ unsigned long pending_exceptions_clr;
+
+- u32 pending_load_cause;
+-
+ /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+ unsigned long preempt_entryhi;
+
+diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
+index ca1cc30..1958910 100644
+--- a/arch/mips/kernel/relocate.c
++++ b/arch/mips/kernel/relocate.c
+@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
+
+ #if defined(CONFIG_USE_OF)
+ /* Get any additional entropy passed in device tree */
+- {
++ if (initial_boot_params) {
+ int node, len;
+ u64 *prop;
+
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index 43853ec..4d65285 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -791,15 +791,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
++ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
++ kvm_clear_c0_guest_status(cop0, ST0_ERL);
++ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
++ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ kvm_read_c0_guest_epc(cop0));
+ kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+- kvm_clear_c0_guest_status(cop0, ST0_ERL);
+- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else {
+ kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ vcpu->arch.pc);
+@@ -1522,13 +1522,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ struct kvm_vcpu *vcpu)
+ {
+ enum emulation_result er = EMULATE_DO_MMIO;
++ unsigned long curr_pc;
+ u32 op, rt;
+ u32 bytes;
+
+ rt = inst.i_format.rt;
+ op = inst.i_format.opcode;
+
+- vcpu->arch.pending_load_cause = cause;
++ /*
++ * Find the resume PC now while we have safe and easy access to the
++ * prior branch instruction, and save it for
++ * kvm_mips_complete_mmio_load() to restore later.
++ */
++ curr_pc = vcpu->arch.pc;
++ er = update_pc(vcpu, cause);
++ if (er == EMULATE_FAIL)
++ return er;
++ vcpu->arch.io_pc = vcpu->arch.pc;
++ vcpu->arch.pc = curr_pc;
++
+ vcpu->arch.io_gpr = rt;
+
+ switch (op) {
+@@ -2488,9 +2500,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ goto done;
+ }
+
+- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+- if (er == EMULATE_FAIL)
+- return er;
++ /* Restore saved resume PC */
++ vcpu->arch.pc = vcpu->arch.io_pc;
+
+ switch (run->mmio.len) {
+ case 4:
+@@ -2512,11 +2523,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ break;
+ }
+
+- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+- kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+- vcpu->mmio_needed);
+-
+ done:
+ return er;
+ }
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index d03422e..7ed036c 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -106,8 +106,6 @@ linux_gateway_entry:
+ mtsp %r0,%sr4 /* get kernel space into sr4 */
+ mtsp %r0,%sr5 /* get kernel space into sr5 */
+ mtsp %r0,%sr6 /* get kernel space into sr6 */
+- mfsp %sr7,%r1 /* save user sr7 */
+- mtsp %r1,%sr3 /* and store it in sr3 */
+
+ #ifdef CONFIG_64BIT
+ /* for now we can *always* set the W bit on entry to the syscall
+@@ -133,6 +131,14 @@ linux_gateway_entry:
+ depdi 0, 31, 32, %r21
+ 1:
+ #endif
++
++ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
++ * by external interrupts.
++ */
++ mfsp %sr7,%r1 /* save user sr7 */
++ rsm PSW_SM_I, %r0 /* disable interrupts */
++ mtsp %r1,%sr3 /* and store it in sr3 */
++
+ mfctl %cr30,%r1
+ xor %r1,%r30,%r30 /* ye olde xor trick */
+ xor %r1,%r30,%r1
+@@ -147,6 +153,7 @@ linux_gateway_entry:
+ */
+
+ mtsp %r0,%sr7 /* get kernel space into sr7 */
++ ssm PSW_SM_I, %r0 /* enable interrupts */
+ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
+ mfctl %cr30,%r1 /* get task ptr in %r1 */
+ LDREG TI_TASK(%r1),%r1
+diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
+index 01b8a13..3919332 100644
+--- a/arch/powerpc/include/asm/cpuidle.h
++++ b/arch/powerpc/include/asm/cpuidle.h
+@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
+ std r0,0(r1); \
+ ptesync; \
+ ld r0,0(r1); \
+-1: cmp cr0,r0,r0; \
++1: cmpd cr0,r0,r0; \
+ bne 1b; \
+ IDLE_INST; \
+ b .
+diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
+index f6f68f7..99e1397 100644
+--- a/arch/powerpc/include/asm/tlb.h
++++ b/arch/powerpc/include/asm/tlb.h
+@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
+ return cpumask_subset(mm_cpumask(mm),
+ topology_sibling_cpumask(smp_processor_id()));
+ }
++
++static inline int mm_is_thread_local(struct mm_struct *mm)
++{
++ return cpumask_equal(mm_cpumask(mm),
++ cpumask_of(smp_processor_id()));
++}
++
+ #else
+ static inline int mm_is_core_local(struct mm_struct *mm)
+ {
+ return 1;
+ }
++
++static inline int mm_is_thread_local(struct mm_struct *mm)
++{
++ return 1;
++}
+ #endif
+
+ #endif /* __KERNEL__ */
+diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
+index bd739fe..72dac0b 100644
+--- a/arch/powerpc/kernel/idle_book3s.S
++++ b/arch/powerpc/kernel/idle_book3s.S
+@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
+ * Threads will spin in HMT_LOW until the lock bit is cleared.
+ * r14 - pointer to core_idle_state
+ * r15 - used to load contents of core_idle_state
++ * r9 - used as a temporary variable
+ */
+
+ core_idle_lock_held:
+@@ -99,6 +100,8 @@ core_idle_lock_held:
+ bne 3b
+ HMT_MEDIUM
+ lwarx r15,0,r14
++ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
++ bne core_idle_lock_held
+ blr
+
+ /*
+@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
+ std r9,_MSR(r1)
+ std r1,PACAR1(r13)
+
+-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+- /* Tell KVM we're entering idle */
+- li r4,KVM_HWTHREAD_IN_IDLE
+- stb r4,HSTATE_HWTHREAD_STATE(r13)
+-#endif
+-
+ /*
+ * Go to real mode to do the nap, as required by the architecture.
+ * Also, we need to be in real mode before setting hwthread_state,
+@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
+
+ .globl pnv_enter_arch207_idle_mode
+ pnv_enter_arch207_idle_mode:
++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
++ /* Tell KVM we're entering idle */
++ li r4,KVM_HWTHREAD_IN_IDLE
++ /******************************************************/
++ /* N O T E W E L L ! ! ! N O T E W E L L */
++ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
++ /* MUST occur in real mode, i.e. with the MMU off, */
++ /* and the MMU must stay off until we clear this flag */
++ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
++ /* reset interrupt vector in exceptions-64s.S. */
++ /* The reason is that another thread can switch the */
++ /* MMU to a guest context whenever this flag is set */
++ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
++ /* that would potentially cause this thread to start */
++ /* executing instructions from guest memory in */
++ /* hypervisor mode, leading to a host crash or data */
++ /* corruption, or worse. */
++ /******************************************************/
++ stb r4,HSTATE_HWTHREAD_STATE(r13)
++#endif
+ stb r3,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr3,r3,PNV_THREAD_SLEEP
+ bge cr3,2f
+@@ -250,6 +267,12 @@ enter_winkle:
+ * r3 - requested stop state
+ */
+ power_enter_stop:
++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
++ /* Tell KVM we're entering idle */
++ li r4,KVM_HWTHREAD_IN_IDLE
++ /* DO THIS IN REAL MODE! See comment above. */
++ stb r4,HSTATE_HWTHREAD_STATE(r13)
++#endif
+ /*
+ * Check if the requested state is a deep idle state.
+ */
+diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
+index 48df05e..d696068 100644
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+- if (!mm_is_core_local(mm)) {
++ if (!mm_is_thread_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+- if (!mm_is_core_local(mm)) {
++ if (!mm_is_thread_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ pid = mm ? mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto bail;
+- if (!mm_is_core_local(mm)) {
++ if (!mm_is_thread_local(mm)) {
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ {
+ unsigned long pid;
+ unsigned long addr;
+- int local = mm_is_core_local(mm);
++ int local = mm_is_thread_local(mm);
+ unsigned long ap = mmu_get_ap(psize);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+ unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
+diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
+index bd98b7d..05c98bb 100644
+--- a/arch/s390/kvm/sthyi.c
++++ b/arch/s390/kvm/sthyi.c
+@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
+ if (r < 0)
+ goto out;
+
+- diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
++ diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!diag224_buf || diag224(diag224_buf))
+ goto out;
+
+@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
+ sctns->par.infpval1 |= PAR_WGHT_VLD;
+
+ out:
+- kfree(diag224_buf);
++ free_page((unsigned long)diag224_buf);
+ vfree(diag204_buf);
+ }
+
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 620ab06..017bda1 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
+ * We need the physical address of the container for both bitness since
+ * boot_params.hdr.ramdisk_image is a physical address.
+ */
+- cont = __pa(container);
++ cont = __pa_nodebug(container);
+ cont_va = container;
+ #endif
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 98c9cd6..d5219b1 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1222,11 +1222,16 @@ void __init setup_arch(char **cmdline_p)
+ if (smp_found_config)
+ get_smp_config();
+
++ /*
++ * Systems w/o ACPI and mptables might not have it mapped the local
++ * APIC yet, but prefill_possible_map() might need to access it.
++ */
++ init_apic_mappings();
++
+ prefill_possible_map();
+
+ init_cpu_to_node();
+
+- init_apic_mappings();
+ io_apic_init_mappings();
+
+ kvm_guest_init();
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 4e95d3e..cbd7b92 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5045,7 +5045,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ /* Decode and fetch the destination operand: register or memory. */
+ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
+
+- if (ctxt->rip_relative)
++ if (ctxt->rip_relative && likely(ctxt->memopp))
+ ctxt->memopp->addr.mem.ea = address_mask(ctxt,
+ ctxt->memopp->addr.mem.ea + ctxt->_eip);
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 699f872..46f74d4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7372,10 +7372,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+
+ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ {
++ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
++
+ kvmclock_reset(vcpu);
+
+- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+ kvm_x86_ops->vcpu_free(vcpu);
++ free_cpumask_var(wbinvd_dirty_mask);
+ }
+
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 16288e7..4b1e4ea 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
+
+
+ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+- uint32_t desc)
++ u32 desc, bool need_strong_ref)
+ {
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+@@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+- if (desc < ref->desc)
++ if (desc < ref->desc) {
+ n = n->rb_left;
+- else if (desc > ref->desc)
++ } else if (desc > ref->desc) {
+ n = n->rb_right;
+- else
++ } else if (need_strong_ref && !ref->strong) {
++ binder_user_error("tried to use weak ref as strong ref\n");
++ return NULL;
++ } else {
+ return ref;
++ }
+ }
+ return NULL;
+ }
+@@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++ struct binder_ref *ref;
++
++ ref = binder_get_ref(proc, fp->handle,
++ fp->type == BINDER_TYPE_HANDLE);
+
+ if (ref == NULL) {
+ pr_err("transaction release %d bad handle %d\n",
+@@ -1381,7 +1388,7 @@ static void binder_transaction(struct binder_proc *proc,
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+
+- ref = binder_get_ref(proc, tr->target.handle);
++ ref = binder_get_ref(proc, tr->target.handle, true);
+ if (ref == NULL) {
+ binder_user_error("%d:%d got transaction to invalid handle\n",
+ proc->pid, thread->pid);
+@@ -1578,7 +1585,9 @@ static void binder_transaction(struct binder_proc *proc,
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
++ fp->binder = 0;
+ fp->handle = ref->desc;
++ fp->cookie = 0;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+@@ -1590,7 +1599,10 @@ static void binder_transaction(struct binder_proc *proc,
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++ struct binder_ref *ref;
++
++ ref = binder_get_ref(proc, fp->handle,
++ fp->type == BINDER_TYPE_HANDLE);
+
+ if (ref == NULL) {
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+@@ -1625,7 +1637,9 @@ static void binder_transaction(struct binder_proc *proc,
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
++ fp->binder = 0;
+ fp->handle = new_ref->desc;
++ fp->cookie = 0;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ trace_binder_transaction_ref_to_ref(t, ref,
+ new_ref);
+@@ -1679,6 +1693,7 @@ static void binder_transaction(struct binder_proc *proc,
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %d -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
++ fp->binder = 0;
+ fp->handle = target_fd;
+ } break;
+
+@@ -1801,7 +1816,9 @@ static int binder_thread_write(struct binder_proc *proc,
+ ref->desc);
+ }
+ } else
+- ref = binder_get_ref(proc, target);
++ ref = binder_get_ref(proc, target,
++ cmd == BC_ACQUIRE ||
++ cmd == BC_RELEASE);
+ if (ref == NULL) {
+ binder_user_error("%d:%d refcount change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+@@ -1997,7 +2014,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+- ref = binder_get_ref(proc, target);
++ ref = binder_get_ref(proc, target, false);
+ if (ref == NULL) {
+ binder_user_error("%d:%d %s invalid ref %d\n",
+ proc->pid, thread->pid,
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 5da47e26..4aae0d2 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1540,19 +1540,29 @@ static void remove_port_data(struct port *port)
+ spin_lock_irq(&port->inbuf_lock);
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
++ spin_unlock_irq(&port->inbuf_lock);
+
+ /* Remove buffers we queued up for the Host to send us data in. */
+- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+- free_buf(buf, true);
+- spin_unlock_irq(&port->inbuf_lock);
++ do {
++ spin_lock_irq(&port->inbuf_lock);
++ buf = virtqueue_detach_unused_buf(port->in_vq);
++ spin_unlock_irq(&port->inbuf_lock);
++ if (buf)
++ free_buf(buf, true);
++ } while (buf);
+
+ spin_lock_irq(&port->outvq_lock);
+ reclaim_consumed_buffers(port);
++ spin_unlock_irq(&port->outvq_lock);
+
+ /* Free pending buffers from the out-queue. */
+- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+- free_buf(buf, true);
+- spin_unlock_irq(&port->outvq_lock);
++ do {
++ spin_lock_irq(&port->outvq_lock);
++ buf = virtqueue_detach_unused_buf(port->out_vq);
++ spin_unlock_irq(&port->outvq_lock);
++ if (buf)
++ free_buf(buf, true);
++ } while (buf);
+ }
+
+ /*
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index b46547e..8c347f5 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1133,10 +1133,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+ *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
+ }
+
+-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
++static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+ {
+- int pstate = cpu->pstate.min_pstate;
+-
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ cpu->pstate.current_pstate = pstate;
+ /*
+@@ -1148,6 +1146,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+ pstate_funcs.get_val(cpu, pstate));
+ }
+
++static void intel_pstate_set_min_pstate(struct cpudata *cpu)
++{
++ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
++}
++
++static void intel_pstate_max_within_limits(struct cpudata *cpu)
++{
++ int min_pstate, max_pstate;
++
++ update_turbo_state();
++ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
++ intel_pstate_set_pstate(cpu, max_pstate);
++}
++
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+@@ -1465,7 +1477,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+- cpu = all_cpu_data[0];
++ cpu = all_cpu_data[policy->cpu];
+ if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ policy->max < policy->cpuinfo.max_freq &&
+ policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+@@ -1509,6 +1521,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
+
+ out:
++ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
++ /*
++ * NOHZ_FULL CPUs need this as the governor callback may not
++ * be invoked on them.
++ */
++ intel_pstate_clear_update_util_hook(policy->cpu);
++ intel_pstate_max_within_limits(cpu);
++ }
++
+ intel_pstate_set_update_util_hook(policy->cpu);
+
+ intel_pstate_hwp_set_policy(policy);
+diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
+index 1f01e98..73ae849 100644
+--- a/drivers/dax/pmem.c
++++ b/drivers/dax/pmem.c
+@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
+
+ dev_dbg(dax_pmem->dev, "%s\n", __func__);
+ percpu_ref_exit(ref);
+- wait_for_completion(&dax_pmem->cmp);
+ }
+
+ static void dax_pmem_percpu_kill(void *data)
+@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
+
+ dev_dbg(dax_pmem->dev, "%s\n", __func__);
+ percpu_ref_kill(ref);
++ wait_for_completion(&dax_pmem->cmp);
+ }
+
+ static int dax_pmem_probe(struct device *dev)
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 309311b..1547589 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -73,13 +73,13 @@ struct rfc2734_header {
+
+ #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
+ #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
+-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
++#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
+ #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
+ #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
+
+-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
++#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+ #define fwnet_set_hdr_ether_type(et) (et)
+-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
++#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
+ #define fwnet_set_hdr_fg_off(fgo) (fgo)
+
+ #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
+@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ int retval;
+ u16 ether_type;
+
++ if (len <= RFC2374_UNFRAG_HDR_SIZE)
++ return 0;
++
+ hdr.w0 = be32_to_cpu(buf[0]);
+ lf = fwnet_get_hdr_lf(&hdr);
+ if (lf == RFC2374_HDR_UNFRAG) {
+@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ is_broadcast, ether_type);
+ }
++
+ /* A datagram fragment has been received, now the fun begins. */
++
++ if (len <= RFC2374_FRAG_HDR_SIZE)
++ return 0;
++
+ hdr.w1 = ntohl(buf[1]);
+ buf += 2;
+ len -= RFC2374_FRAG_HDR_SIZE;
+@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ fg_off = fwnet_get_hdr_fg_off(&hdr);
+ }
+ datagram_label = fwnet_get_hdr_dgl(&hdr);
+- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
++ dg_size = fwnet_get_hdr_dg_size(&hdr);
++
++ if (fg_off + len > dg_size)
++ return 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ fw_send_response(card, r, rcode);
+ }
+
++static int gasp_source_id(__be32 *p)
++{
++ return be32_to_cpu(p[0]) >> 16;
++}
++
++static u32 gasp_specifier_id(__be32 *p)
++{
++ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
++ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
++}
++
++static u32 gasp_version(__be32 *p)
++{
++ return be32_to_cpu(p[1]) & 0xffffff;
++}
++
+ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ u32 cycle, size_t header_length, void *header, void *data)
+ {
+@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ __be32 *buf_ptr;
+ int retval;
+ u32 length;
+- u16 source_node_id;
+- u32 specifier_id;
+- u32 ver;
+ unsigned long offset;
+ unsigned long flags;
+
+@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+-
+- if (specifier_id == IANA_SPECIFIER_ID &&
+- (ver == RFC2734_SW_VERSION
++ if (length > IEEE1394_GASP_HDR_SIZE &&
++ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
++ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
+ #if IS_ENABLED(CONFIG_IPV6)
+- || ver == RFC3146_SW_VERSION
++ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
+ #endif
+- )) {
+- buf_ptr += 2;
+- length -= IEEE1394_GASP_HDR_SIZE;
+- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++ ))
++ fwnet_incoming_packet(dev, buf_ptr + 2,
++ length - IEEE1394_GASP_HDR_SIZE,
++ gasp_source_id(buf_ptr),
+ context->card->generation, true);
+- }
+
+ packet.payload_length = dev->rcv_buffer_size;
+ packet.interrupt = 1;
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index af51461..14f2d98 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -602,14 +602,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+ {
+ int idx, i;
+ unsigned int irq_flags;
++ int ret = -ENOENT;
+
+ for (i = 0, idx = 0; idx <= index; i++) {
+ struct acpi_gpio_info info;
+ struct gpio_desc *desc;
+
+ desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
+- if (IS_ERR(desc))
++ if (IS_ERR(desc)) {
++ ret = PTR_ERR(desc);
+ break;
++ }
+ if (info.gpioint && idx++ == index) {
+ int irq = gpiod_to_irq(desc);
+
+@@ -628,7 +631,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
+ }
+
+ }
+- return -ENOENT;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 53ff25a..b2dee10 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -21,6 +21,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/compat.h>
+ #include <linux/anon_inodes.h>
++#include <linux/file.h>
+ #include <linux/kfifo.h>
+ #include <linux/poll.h>
+ #include <linux/timekeeping.h>
+@@ -331,6 +332,13 @@ struct linehandle_state {
+ u32 numdescs;
+ };
+
++#define GPIOHANDLE_REQUEST_VALID_FLAGS \
++ (GPIOHANDLE_REQUEST_INPUT | \
++ GPIOHANDLE_REQUEST_OUTPUT | \
++ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
++ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
++ GPIOHANDLE_REQUEST_OPEN_SOURCE)
++
+ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+ {
+@@ -342,6 +350,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
+ if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
+ int val;
+
++ memset(&ghd, 0, sizeof(ghd));
++
+ /* TODO: check if descriptors are really input */
+ for (i = 0; i < lh->numdescs; i++) {
+ val = gpiod_get_value_cansleep(lh->descs[i]);
+@@ -412,6 +422,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ {
+ struct gpiohandle_request handlereq;
+ struct linehandle_state *lh;
++ struct file *file;
+ int fd, i, ret;
+
+ if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+@@ -442,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ u32 lflags = handlereq.flags;
+ struct gpio_desc *desc;
+
++ if (offset >= gdev->ngpio) {
++ ret = -EINVAL;
++ goto out_free_descs;
++ }
++
++ /* Return an error if a unknown flag is set */
++ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
++ ret = -EINVAL;
++ goto out_free_descs;
++ }
++
+ desc = &gdev->descs[offset];
+ ret = gpiod_request(desc, lh->label);
+ if (ret)
+@@ -477,26 +499,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ i--;
+ lh->numdescs = handlereq.lines;
+
+- fd = anon_inode_getfd("gpio-linehandle",
+- &linehandle_fileops,
+- lh,
+- O_RDONLY | O_CLOEXEC);
++ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_free_descs;
+ }
+
++ file = anon_inode_getfile("gpio-linehandle",
++ &linehandle_fileops,
++ lh,
++ O_RDONLY | O_CLOEXEC);
++ if (IS_ERR(file)) {
++ ret = PTR_ERR(file);
++ goto out_put_unused_fd;
++ }
++
+ handlereq.fd = fd;
+ if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
+- ret = -EFAULT;
+- goto out_free_descs;
++ /*
++ * fput() will trigger the release() callback, so do not go onto
++ * the regular error cleanup path here.
++ */
++ fput(file);
++ put_unused_fd(fd);
++ return -EFAULT;
+ }
+
++ fd_install(fd, file);
++
+ dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
+ lh->numdescs);
+
+ return 0;
+
++out_put_unused_fd:
++ put_unused_fd(fd);
+ out_free_descs:
+ for (; i >= 0; i--)
+ gpiod_free(lh->descs[i]);
+@@ -534,6 +571,10 @@ struct lineevent_state {
+ struct mutex read_lock;
+ };
+
++#define GPIOEVENT_REQUEST_VALID_FLAGS \
++ (GPIOEVENT_REQUEST_RISING_EDGE | \
++ GPIOEVENT_REQUEST_FALLING_EDGE)
++
+ static unsigned int lineevent_poll(struct file *filep,
+ struct poll_table_struct *wait)
+ {
+@@ -621,6 +662,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
+ if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
+ int val;
+
++ memset(&ghd, 0, sizeof(ghd));
++
+ val = gpiod_get_value_cansleep(le->desc);
+ if (val < 0)
+ return val;
+@@ -693,6 +736,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ struct gpioevent_request eventreq;
+ struct lineevent_state *le;
+ struct gpio_desc *desc;
++ struct file *file;
+ u32 offset;
+ u32 lflags;
+ u32 eflags;
+@@ -724,6 +768,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ lflags = eventreq.handleflags;
+ eflags = eventreq.eventflags;
+
++ if (offset >= gdev->ngpio) {
++ ret = -EINVAL;
++ goto out_free_label;
++ }
++
++ /* Return an error if a unknown flag is set */
++ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
++ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
++ ret = -EINVAL;
++ goto out_free_label;
++ }
++
+ /* This is just wrong: we don't look for events on output lines */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ ret = -EINVAL;
+@@ -775,23 +831,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ if (ret)
+ goto out_free_desc;
+
+- fd = anon_inode_getfd("gpio-event",
+- &lineevent_fileops,
+- le,
+- O_RDONLY | O_CLOEXEC);
++ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_free_irq;
+ }
+
++ file = anon_inode_getfile("gpio-event",
++ &lineevent_fileops,
++ le,
++ O_RDONLY | O_CLOEXEC);
++ if (IS_ERR(file)) {
++ ret = PTR_ERR(file);
++ goto out_put_unused_fd;
++ }
++
+ eventreq.fd = fd;
+ if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
+- ret = -EFAULT;
+- goto out_free_irq;
++ /*
++ * fput() will trigger the release() callback, so do not go onto
++ * the regular error cleanup path here.
++ */
++ fput(file);
++ put_unused_fd(fd);
++ return -EFAULT;
+ }
+
++ fd_install(fd, file);
++
+ return 0;
+
++out_put_unused_fd:
++ put_unused_fd(fd);
+ out_free_irq:
+ free_irq(le->irq, le);
+ out_free_desc:
+@@ -821,6 +892,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
+ struct gpiochip_info chipinfo;
+
++ memset(&chipinfo, 0, sizeof(chipinfo));
++
+ strncpy(chipinfo.name, dev_name(&gdev->dev),
+ sizeof(chipinfo.name));
+ chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
+@@ -837,7 +910,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+- if (lineinfo.line_offset > gdev->ngpio)
++ if (lineinfo.line_offset >= gdev->ngpio)
+ return -EINVAL;
+
+ desc = &gdev->descs[lineinfo.line_offset];
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index 2a3ded4..7c8c185 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
+ ssize_t expected_size,
+ bool *replaced)
+ {
+- struct drm_device *dev = crtc->dev;
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+- new_blob = drm_property_lookup_blob(dev, blob_id);
++ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
+ if (new_blob == NULL)
+ return -EINVAL;
+- if (expected_size > 0 && expected_size != new_blob->length)
++
++ if (expected_size > 0 && expected_size != new_blob->length) {
++ drm_property_unreference_blob(new_blob);
+ return -EINVAL;
++ }
+ }
+
+ drm_atomic_replace_property_blob(blob, new_blob, replaced);
++ drm_property_unreference_blob(new_blob);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 04e4571..aa64448 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
+ /* no need to clean up vcpi
+ * as if we have no connector we never setup a vcpi */
+ drm_dp_port_teardown_pdt(port, port->pdt);
++ port->pdt = DP_PEER_DEVICE_NONE;
+ }
+ kfree(port);
+ }
+@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ drm_dp_put_port(port);
+ goto out;
+ }
+- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
++ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
++ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
++ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(port->connector);
+ }
+@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
++ port->pdt = DP_PEER_DEVICE_NONE;
+
+ if (!port->input && port->vcpi.vcpi > 0) {
+ drm_dp_mst_reset_vcpi_slots(mgr, port);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 0a06f91..337c555 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -129,7 +129,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+ return 0;
+ fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+- kfree(fb_helper->connector_info[i]);
++ struct drm_fb_helper_connector *fb_helper_connector =
++ fb_helper->connector_info[i];
++
++ drm_connector_unreference(fb_helper_connector->connector);
++
++ kfree(fb_helper_connector);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+@@ -601,6 +606,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
+ }
+ EXPORT_SYMBOL(drm_fb_helper_blank);
+
++static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
++ struct drm_mode_set *modeset)
++{
++ int i;
++
++ for (i = 0; i < modeset->num_connectors; i++) {
++ drm_connector_unreference(modeset->connectors[i]);
++ modeset->connectors[i] = NULL;
++ }
++ modeset->num_connectors = 0;
++
++ drm_mode_destroy(helper->dev, modeset->mode);
++ modeset->mode = NULL;
++
++ /* FIXME should hold a ref? */
++ modeset->fb = NULL;
++}
++
+ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+ {
+ int i;
+@@ -610,10 +633,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+ kfree(helper->connector_info[i]);
+ }
+ kfree(helper->connector_info);
++
+ for (i = 0; i < helper->crtc_count; i++) {
+- kfree(helper->crtc_info[i].mode_set.connectors);
+- if (helper->crtc_info[i].mode_set.mode)
+- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
++ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
++
++ drm_fb_helper_modeset_release(helper, modeset);
++ kfree(modeset->connectors);
+ }
+ kfree(helper->crtc_info);
+ }
+@@ -632,7 +657,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
+ clip->x2 = clip->y2 = 0;
+ spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
++ /* call dirty callback only when it has been really touched */
++ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
++ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
+
+ /**
+@@ -2027,7 +2054,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_fb_offset *offsets;
+- struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i;
+@@ -2075,45 +2101,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+- for (i = 0; i < fb_helper->crtc_count; i++) {
+- modeset = &fb_helper->crtc_info[i].mode_set;
+- modeset->num_connectors = 0;
+- modeset->fb = NULL;
+- }
++ for (i = 0; i < fb_helper->crtc_count; i++)
++ drm_fb_helper_modeset_release(fb_helper,
++ &fb_helper->crtc_info[i].mode_set);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ struct drm_fb_offset *offset = &offsets[i];
+- modeset = &fb_crtc->mode_set;
++ struct drm_mode_set *modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
++ struct drm_connector *connector =
++ fb_helper->connector_info[i]->connector;
++
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
++
+ fb_crtc->desired_mode = mode;
+ fb_crtc->x = offset->x;
+ fb_crtc->y = offset->y;
+- if (modeset->mode)
+- drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
++ drm_connector_reference(connector);
++ modeset->connectors[modeset->num_connectors++] = connector;
+ modeset->fb = fb_helper->fb;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
+ }
+ }
+-
+- /* Clear out any old modes if there are no more connected outputs. */
+- for (i = 0; i < fb_helper->crtc_count; i++) {
+- modeset = &fb_helper->crtc_info[i].mode_set;
+- if (modeset->num_connectors == 0) {
+- BUG_ON(modeset->fb);
+- if (modeset->mode)
+- drm_mode_destroy(dev, modeset->mode);
+- modeset->mode = NULL;
+- }
+- }
+ out:
+ kfree(crtcs);
+ kfree(modes);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index c6e69e4..1f8af87 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
+ return mapping[val];
+ }
+
++static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
++ enum port port)
++{
++ const struct ddi_vbt_port_info *info =
++ &dev_priv->vbt.ddi_port_info[port];
++ enum port p;
++
++ if (!info->alternate_ddc_pin)
++ return;
++
++ for_each_port_masked(p, (1 << port) - 1) {
++ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
++
++ if (info->alternate_ddc_pin != i->alternate_ddc_pin)
++ continue;
++
++ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
++ "disabling port %c DVI/HDMI support\n",
++ port_name(p), i->alternate_ddc_pin,
++ port_name(port), port_name(p));
++
++ /*
++ * If we have multiple ports supposedly sharing the
++ * pin, then dvi/hdmi couldn't exist on the shared
++ * port. Otherwise they share the same ddc bin and
++ * system couldn't communicate with them separately.
++ *
++ * Due to parsing the ports in alphabetical order,
++ * a higher port will always clobber a lower one.
++ */
++ i->supports_dvi = false;
++ i->supports_hdmi = false;
++ i->alternate_ddc_pin = 0;
++ }
++}
++
++static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
++ enum port port)
++{
++ const struct ddi_vbt_port_info *info =
++ &dev_priv->vbt.ddi_port_info[port];
++ enum port p;
++
++ if (!info->alternate_aux_channel)
++ return;
++
++ for_each_port_masked(p, (1 << port) - 1) {
++ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
++
++ if (info->alternate_aux_channel != i->alternate_aux_channel)
++ continue;
++
++ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
++ "disabling port %c DP support\n",
++ port_name(p), i->alternate_aux_channel,
++ port_name(port), port_name(p));
++
++ /*
++ * If we have multiple ports supposedlt sharing the
++ * aux channel, then DP couldn't exist on the shared
++ * port. Otherwise they share the same aux channel
++ * and system couldn't communicate with them separately.
++ *
++ * Due to parsing the ports in alphabetical order,
++ * a higher port will always clobber a lower one.
++ */
++ i->supports_dp = false;
++ i->alternate_aux_channel = 0;
++ }
++}
++
+ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ const struct bdb_header *bdb)
+ {
+@@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+ if (is_dvi) {
+- if (port == PORT_E) {
+- info->alternate_ddc_pin = ddc_pin;
+- /* if DDIE share ddc pin with other port, then
+- * dvi/hdmi couldn't exist on the shared port.
+- * Otherwise they share the same ddc bin and system
+- * couldn't communicate with them seperately. */
+- if (ddc_pin == DDC_PIN_B) {
+- dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+- dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+- } else if (ddc_pin == DDC_PIN_C) {
+- dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+- dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+- } else if (ddc_pin == DDC_PIN_D) {
+- dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+- dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+- }
+- } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
+- DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+- else if (ddc_pin == DDC_PIN_C && port != PORT_C)
+- DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+- else if (ddc_pin == DDC_PIN_D && port != PORT_D)
+- DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
++ info->alternate_ddc_pin = ddc_pin;
++
++ sanitize_ddc_pin(dev_priv, port);
+ }
+
+ if (is_dp) {
+- if (port == PORT_E) {
+- info->alternate_aux_channel = aux_channel;
+- /* if DDIE share aux channel with other port, then
+- * DP couldn't exist on the shared port. Otherwise
+- * they share the same aux channel and system
+- * couldn't communicate with them seperately. */
+- if (aux_channel == DP_AUX_A)
+- dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+- else if (aux_channel == DP_AUX_B)
+- dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+- else if (aux_channel == DP_AUX_C)
+- dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+- else if (aux_channel == DP_AUX_D)
+- dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+- }
+- else if (aux_channel == DP_AUX_A && port != PORT_A)
+- DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+- else if (aux_channel == DP_AUX_B && port != PORT_B)
+- DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+- else if (aux_channel == DP_AUX_C && port != PORT_C)
+- DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+- else if (aux_channel == DP_AUX_D && port != PORT_D)
+- DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
++ info->alternate_aux_channel = aux_channel;
++
++ sanitize_aux_ch(dev_priv, port);
+ }
+
+ if (bdb->version >= 158) {
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e9a64fb..63462f2 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -13834,7 +13834,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *intel_plane_state =
+- to_intel_plane_state(plane_state);
++ to_intel_plane_state(plane->state);
+
+ if (!intel_plane_state->wait_req)
+ continue;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 1ca155f..3051182 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1090,6 +1090,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ return ret;
+ }
+
++static enum port intel_aux_port(struct drm_i915_private *dev_priv,
++ enum port port)
++{
++ const struct ddi_vbt_port_info *info =
++ &dev_priv->vbt.ddi_port_info[port];
++ enum port aux_port;
++
++ if (!info->alternate_aux_channel) {
++ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
++ port_name(port), port_name(port));
++ return port;
++ }
++
++ switch (info->alternate_aux_channel) {
++ case DP_AUX_A:
++ aux_port = PORT_A;
++ break;
++ case DP_AUX_B:
++ aux_port = PORT_B;
++ break;
++ case DP_AUX_C:
++ aux_port = PORT_C;
++ break;
++ case DP_AUX_D:
++ aux_port = PORT_D;
++ break;
++ default:
++ MISSING_CASE(info->alternate_aux_channel);
++ aux_port = PORT_A;
++ break;
++ }
++
++ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
++ port_name(aux_port), port_name(port));
++
++ return aux_port;
++}
++
+ static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
+ enum port port)
+ {
+@@ -1150,36 +1188,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
+ }
+ }
+
+-/*
+- * On SKL we don't have Aux for port E so we rely
+- * on VBT to set a proper alternate aux channel.
+- */
+-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
+-{
+- const struct ddi_vbt_port_info *info =
+- &dev_priv->vbt.ddi_port_info[PORT_E];
+-
+- switch (info->alternate_aux_channel) {
+- case DP_AUX_A:
+- return PORT_A;
+- case DP_AUX_B:
+- return PORT_B;
+- case DP_AUX_C:
+- return PORT_C;
+- case DP_AUX_D:
+- return PORT_D;
+- default:
+- MISSING_CASE(info->alternate_aux_channel);
+- return PORT_A;
+- }
+-}
+-
+ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
+ enum port port)
+ {
+- if (port == PORT_E)
+- port = skl_porte_aux_port(dev_priv);
+-
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+@@ -1195,9 +1206,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
+ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
+ enum port port, int index)
+ {
+- if (port == PORT_E)
+- port = skl_porte_aux_port(dev_priv);
+-
+ switch (port) {
+ case PORT_A:
+ case PORT_B:
+@@ -1235,7 +1243,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
+ static void intel_aux_reg_init(struct intel_dp *intel_dp)
+ {
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+- enum port port = dp_to_dig_port(intel_dp)->port;
++ enum port port = intel_aux_port(dev_priv,
++ dp_to_dig_port(intel_dp)->port);
+ int i;
+
+ intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
+diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
+index 3836a1c..ad483376 100644
+--- a/drivers/gpu/drm/i915/intel_fbc.c
++++ b/drivers/gpu/drm/i915/intel_fbc.c
+@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+ int lines;
+
+ intel_fbc_get_plane_source_size(cache, NULL, &lines);
+- if (INTEL_INFO(dev_priv)->gen >= 7)
++ if (INTEL_GEN(dev_priv) == 7)
+ lines = min(lines, 2048);
++ else if (INTEL_GEN(dev_priv) >= 8)
++ lines = min(lines, 2560);
+
+ /* Hardware needs the full buffer stride, not just the active area. */
+ return lines * cache->fb.stride;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index e59a28c..a691605 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3363,13 +3363,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+ int num_active;
+ int id, i;
+
++ /* Clear the partitioning for disabled planes. */
++ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
++ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
++
+ if (WARN_ON(!state))
+ return 0;
+
+ if (!cstate->base.active) {
+ ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ return 0;
+ }
+
+@@ -3469,12 +3471,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+ return 0;
+ }
+
+-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
+-{
+- /* TODO: Take into account the scalers once we support them */
+- return config->base.adjusted_mode.crtc_clock;
+-}
+-
+ /*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and cpp should always be <= 8, so that
+@@ -3525,7 +3521,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
+ * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
+ * with additional adjustments for plane-specific scaling.
+ */
+- adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
++ adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
+ downscale_amount = skl_plane_downscale_amount(pstate);
+
+ pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
+@@ -3737,11 +3733,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+ if (!cstate->base.active)
+ return 0;
+
+- if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
++ if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
+ return 0;
+
+ return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
+- skl_pipe_pixel_rate(cstate));
++ ilk_pipe_pixel_rate(cstate));
+ }
+
+ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
+@@ -4051,6 +4047,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
+ intel_state->wm_results.dirty_pipes = ~0;
+ }
+
++ /*
++ * We're not recomputing for the pipes not included in the commit, so
++ * make sure we start with the current state.
++ */
++ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
++
+ for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+ struct intel_crtc_state *cstate;
+
+diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
+index 29423e75..927c51e 100644
+--- a/drivers/gpu/drm/imx/ipuv3-plane.c
++++ b/drivers/gpu/drm/imx/ipuv3-plane.c
+@@ -108,6 +108,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+ {
+ struct drm_plane *plane = &ipu_plane->base;
+ struct drm_plane_state *state = plane->state;
++ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
+ int active;
+@@ -149,7 +150,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+ break;
+ }
+
+- if (old_state->fb) {
++ if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
+ ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
+ ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+@@ -359,7 +360,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+ return -EINVAL;
+
+- if (old_fb) {
++ if (old_fb &&
++ (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
++ old_fb->pixel_format == DRM_FORMAT_YVU420)) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ old_vbo = drm_plane_state_to_vbo(old_state);
+ if (ubo != old_ubo || vbo != old_vbo)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index dc57b62..193573d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
+ if (!parent_adev)
+ return false;
+
+- return acpi_has_method(parent_adev->handle, "_PR3");
++ return parent_adev->power.flags.power_resources &&
++ acpi_has_method(parent_adev->handle, "_PR3");
+ }
+
+ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 4a3d7ca..4b9c2d5 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+ void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl)
+ {
+- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+-
+- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
++ WREG32(SRBM_GFX_CNTL, RINGID(ring));
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index db64e00..3b0c229 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+
+ tmp &= AUX_HPD_SEL(0x7);
+ tmp |= AUX_HPD_SEL(chan->rec.hpd);
+- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
++ tmp |= AUX_EN | AUX_LS_READ_EN;
+
+ WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 89bdf20..c4993452 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
++ /* limit all SI kickers */
++ if (rdev->family == CHIP_PITCAIRN) {
++ if ((rdev->pdev->revision == 0x81) ||
++ (rdev->pdev->device == 0x6810) ||
++ (rdev->pdev->device == 0x6811) ||
++ (rdev->pdev->device == 0x6816) ||
++ (rdev->pdev->device == 0x6817) ||
++ (rdev->pdev->device == 0x6806))
++ max_mclk = 120000;
++ } else if (rdev->family == CHIP_VERDE) {
++ if ((rdev->pdev->revision == 0x81) ||
++ (rdev->pdev->revision == 0x83) ||
++ (rdev->pdev->revision == 0x87) ||
++ (rdev->pdev->device == 0x6820) ||
++ (rdev->pdev->device == 0x6821) ||
++ (rdev->pdev->device == 0x6822) ||
++ (rdev->pdev->device == 0x6823) ||
++ (rdev->pdev->device == 0x682A) ||
++ (rdev->pdev->device == 0x682B)) {
++ max_sclk = 75000;
++ max_mclk = 80000;
++ }
++ } else if (rdev->family == CHIP_OLAND) {
++ if ((rdev->pdev->revision == 0xC7) ||
++ (rdev->pdev->revision == 0x80) ||
++ (rdev->pdev->revision == 0x81) ||
++ (rdev->pdev->revision == 0x83) ||
++ (rdev->pdev->device == 0x6604) ||
++ (rdev->pdev->device == 0x6605)) {
++ max_sclk = 75000;
++ max_mclk = 80000;
++ }
++ } else if (rdev->family == CHIP_HAINAN) {
++ if ((rdev->pdev->revision == 0x81) ||
++ (rdev->pdev->revision == 0x83) ||
++ (rdev->pdev->revision == 0xC3) ||
++ (rdev->pdev->device == 0x6664) ||
++ (rdev->pdev->device == 0x6665) ||
++ (rdev->pdev->device == 0x6667)) {
++ max_sclk = 75000;
++ max_mclk = 80000;
++ }
++ }
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ }
+ ++p;
+ }
+- /* limit mclk on all R7 370 parts for stability */
+- if (rdev->pdev->device == 0x6811 &&
+- rdev->pdev->revision == 0x81)
+- max_mclk = 120000;
+- /* limit sclk/mclk on Jet parts for stability */
+- if (rdev->pdev->device == 0x6665 &&
+- rdev->pdev->revision == 0xc3) {
+- max_sclk = 75000;
+- max_mclk = 80000;
+- }
+
+ if (rps->vce_active) {
+ rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e92b09d..9ab703c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -179,6 +179,7 @@
+ #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
+ #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+ #define USB_DEVICE_ID_ATEN_CS682 0x2213
++#define USB_DEVICE_ID_ATEN_CS692 0x8021
+
+ #define USB_VENDOR_ID_ATMEL 0x03eb
+ #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index bb40008..85fcf60 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -63,6 +63,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index d5acaa2..9dc6372 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
+ u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+ struct icmsg_negotiate *negop = NULL;
+
+- vmbus_recvpacket(channel, hbeat_txf_buf,
+- PAGE_SIZE, &recvlen, &requestid);
++ while (1) {
++
++ vmbus_recvpacket(channel, hbeat_txf_buf,
++ PAGE_SIZE, &recvlen, &requestid);
++
++ if (!recvlen)
++ break;
+
+- if (recvlen > 0) {
+ icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
+ sizeof(struct vmbuspipe_hdr)];
+
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 5c5b7ca..dfae435 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
+ t_calc->div_low--;
+ t_calc->div_high--;
+
++ /* Give the tuning value 0, that would not update con register */
++ t_calc->tuning = 0;
+ /* Maximum divider supported by hw is 0xffff */
+ if (t_calc->div_low > 0xffff) {
+ t_calc->div_low = 0xffff;
+diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
+index 4233f56..3c38029 100644
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
+ struct mbox_chan *mbox_chan;
+ struct mbox_client mbox_client;
+ struct completion rd_complete;
+- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
++ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
+ u32 *resp_msg;
+ };
+
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index da3a02e..a9a9f66 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -1592,6 +1592,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
+ static void of_i2c_register_devices(struct i2c_adapter *adap)
+ {
+ struct device_node *node;
++ struct i2c_client *client;
+
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adap->dev.of_node)
+@@ -1602,7 +1603,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
+ for_each_available_child_of_node(adap->dev.of_node, node) {
+ if (of_node_test_and_set_flag(node, OF_POPULATED))
+ continue;
+- of_i2c_register_device(adap, node);
++
++ client = of_i2c_register_device(adap, node);
++ if (IS_ERR(client)) {
++ dev_warn(&adap->dev,
++ "Failed to create I2C device for %s\n",
++ node->full_name);
++ of_node_clear_flag(node, OF_POPULATED);
++ }
+ }
+ }
+
+@@ -2073,6 +2081,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
+ /* add the driver to the list of i2c drivers in the driver core */
+ driver->driver.owner = owner;
+ driver->driver.bus = &i2c_bus_type;
++ INIT_LIST_HEAD(&driver->clients);
+
+ /* When registration returns, the driver core
+ * will have called probe() for all matching-but-unbound devices.
+@@ -2083,7 +2092,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
+
+ pr_debug("driver [%s] registered\n", driver->driver.name);
+
+- INIT_LIST_HEAD(&driver->clients);
+ /* Walk the adapters that are already present */
+ i2c_for_each_dev(driver, __process_new_driver);
+
+@@ -2201,6 +2209,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
+ if (IS_ERR(client)) {
+ dev_err(&adap->dev, "failed to create client for '%s'\n",
+ rd->dn->full_name);
++ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(PTR_ERR(client));
+ }
+ break;
+diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
+index 407f141..a3fbdb7 100644
+--- a/drivers/iio/chemical/atlas-ph-sensor.c
++++ b/drivers/iio/chemical/atlas-ph-sensor.c
+@@ -207,13 +207,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
++ __be16 rval;
+
+- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
++ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
+ if (ret)
+ return ret;
+
+- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
+- be16_to_cpu(val) % 100);
++ val = be16_to_cpu(rval);
++ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
+
+ ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
+ if (ret)
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index f4bfb4b..073246c 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
++ {
++ /* Schenker XMG C504 - Elantech touchpad */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 8abde6b..6d53810 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -266,7 +266,7 @@ static struct raid_type {
+ {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
+ {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
+ {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
+- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
++ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
+ {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
+ {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+ {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ /*
+ * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
+ */
+- if (le32_to_cpu(sb->level) != mddev->level) {
++ if (le32_to_cpu(sb->level) != mddev->new_level) {
+ DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
+ return -EINVAL;
+ }
+- if (le32_to_cpu(sb->layout) != mddev->layout) {
++ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
+ DMERR("Reshaping raid sets not yet supported. (raid layout change)");
+ DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+ DMERR(" Old layout: %s w/ %d copies",
+@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ raid10_md_layout_to_copies(mddev->layout));
+ return -EINVAL;
+ }
+- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
++ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
+ DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
+ return -EINVAL;
+ }
+@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ return -EINVAL;
+ }
+
++ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
++
+ /* Table line is checked vs. authoritative superblock */
+ rs_set_new(rs);
+ }
+@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
+ if (!mddev->events && super_init_validation(rs, rdev))
+ return -EINVAL;
+
+- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
++ if (le32_to_cpu(sb->compat_features) &&
++ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
+ return -EINVAL;
+ }
+@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
+
+ static struct target_type raid_target = {
+ .name = "raid",
+- .version = {1, 9, 0},
++ .version = {1, 9, 1},
+ .module = THIS_MODULE,
+ .ctr = raid_ctr,
+ .dtr = raid_dtr,
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index bdf1606..7a6254d 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1292,6 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
+
+ dm_bio_restore(bd, bio);
+ bio_record->details.bi_bdev = NULL;
++ bio->bi_error = 0;
+
+ queue_bio(ms, bio, rw);
+ return DM_ENDIO_INCOMPLETE;
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 5da86c8..2154596 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -835,8 +835,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+- if (IS_ERR(md->kworker_task))
+- return PTR_ERR(md->kworker_task);
++ if (IS_ERR(md->kworker_task)) {
++ int error = PTR_ERR(md->kworker_task);
++ md->kworker_task = NULL;
++ return error;
++ }
+
+ elv_register_queue(md->queue);
+
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 3e407a9..c4b53b3 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+
+ tgt->type = dm_get_target_type(type);
+ if (!tgt->type) {
+- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+- type);
++ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
+ return -EINVAL;
+ }
+
+ if (dm_target_needs_singleton(tgt->type)) {
+ if (t->num_targets) {
+- DMERR("%s: target type %s must appear alone in table",
+- dm_device_name(t->md), type);
+- return -EINVAL;
++ tgt->error = "singleton target type must appear alone in table";
++ goto bad;
+ }
+ t->singleton = true;
+ }
+
+ if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
+- DMERR("%s: target type %s may not be included in read-only tables",
+- dm_device_name(t->md), type);
+- return -EINVAL;
++ tgt->error = "target type may not be included in a read-only table";
++ goto bad;
+ }
+
+ if (t->immutable_target_type) {
+ if (t->immutable_target_type != tgt->type) {
+- DMERR("%s: immutable target type %s cannot be mixed with other target types",
+- dm_device_name(t->md), t->immutable_target_type->name);
+- return -EINVAL;
++ tgt->error = "immutable target type cannot be mixed with other target types";
++ goto bad;
+ }
+ } else if (dm_target_is_immutable(tgt->type)) {
+ if (t->num_targets) {
+- DMERR("%s: immutable target type %s cannot be mixed with other target types",
+- dm_device_name(t->md), tgt->type->name);
+- return -EINVAL;
++ tgt->error = "immutable target type cannot be mixed with other target types";
++ goto bad;
+ }
+ t->immutable_target_type = tgt->type;
+ }
+@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ */
+ if (!adjoin(t, tgt)) {
+ tgt->error = "Gap in table";
+- r = -EINVAL;
+ goto bad;
+ }
+
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 0f2928b..eeef575 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ if (md->bs)
+ bioset_free(md->bs);
+
+- cleanup_srcu_struct(&md->io_barrier);
+-
+ if (md->disk) {
+ spin_lock(&_minor_lock);
+ md->disk->private_data = NULL;
+@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ if (md->queue)
+ blk_cleanup_queue(md->queue);
+
++ cleanup_srcu_struct(&md->io_barrier);
++
+ if (md->bdev) {
+ bdput(md->bdev);
+ md->bdev = NULL;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 915e84d..db0aa6c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8120,14 +8120,14 @@ void md_do_sync(struct md_thread *thread)
+
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+- mddev->curr_resync > 2) {
++ mddev->curr_resync > 3) {
+ mddev->curr_resync_completed = mddev->curr_resync;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ }
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
+
+ if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+- mddev->curr_resync > 2) {
++ mddev->curr_resync > 3) {
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ if (mddev->curr_resync >= mddev->recovery_cp) {
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 21dc00e..95bf4cd 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -407,11 +407,14 @@ static void raid1_end_write_request(struct bio *bio)
+ struct bio *to_put = NULL;
+ int mirror = find_bio_disk(r1_bio, bio);
+ struct md_rdev *rdev = conf->mirrors[mirror].rdev;
++ bool discard_error;
++
++ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+
+ /*
+ * 'one mirror IO has finished' event handler:
+ */
+- if (bio->bi_error) {
++ if (bio->bi_error && !discard_error) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+@@ -448,7 +451,7 @@ static void raid1_end_write_request(struct bio *bio)
+
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
+- &first_bad, &bad_sectors)) {
++ &first_bad, &bad_sectors) && !discard_error) {
+ r1_bio->bios[mirror] = IO_MADE_GOOD;
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index be1a9fc..39fddda 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
+ struct r10conf *conf = r10_bio->mddev->private;
+ int slot, repl;
+ struct md_rdev *rdev = NULL;
++ bool discard_error;
++
++ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+
+ dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+
+@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
+ /*
+ * this branch is our 'one mirror IO has finished' event handler:
+ */
+- if (bio->bi_error) {
++ if (bio->bi_error && !discard_error) {
+ if (repl)
+ /* Never record new bad blocks to replacement,
+ * just fail it.
+@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
+ if (is_badblock(rdev,
+ r10_bio->devs[slot].addr,
+ r10_bio->sectors,
+- &first_bad, &bad_sectors)) {
++ &first_bad, &bad_sectors) && !discard_error) {
+ bio_put(bio);
+ if (repl)
+ r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
+index 9fb4fc2..ed9759e 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.c
++++ b/drivers/media/platform/vsp1/vsp1_video.c
+@@ -675,6 +675,13 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
+ unsigned long flags;
+ int ret;
+
++ /* Clear the buffers ready flag to make sure the device won't be started
++ * by a QBUF on the video node on the other side of the pipeline.
++ */
++ spin_lock_irqsave(&video->irqlock, flags);
++ pipe->buffers_ready &= ~(1 << video->pipe_index);
++ spin_unlock_irqrestore(&video->irqlock, flags);
++
+ mutex_lock(&pipe->lock);
+ if (--pipe->stream_count == pipe->num_inputs) {
+ /* Stop the pipeline. */
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index af23d7d..2e5233b 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -247,7 +247,9 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
+ cxl_ctx_get();
+
+ if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
++ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
++ ctx->glpid = ctx->pid = NULL;
+ cxl_adapter_context_put(ctx->afu->adapter);
+ cxl_ctx_put();
+ goto out;
+diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
+index d0b421f..77080cc 100644
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+ ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+
+ /*
++ * Increment the mapped context count for adapter. This also checks
++ * if adapter_context_lock is taken.
++ */
++ rc = cxl_adapter_context_get(ctx->afu->adapter);
++ if (rc) {
++ afu_release_irqs(ctx, ctx);
++ goto out;
++ }
++
++ /*
+ * We grab the PID here and not in the file open to allow for the case
+ * where a process (master, some daemon, etc) has opened the chardev on
+ * behalf of another process, so the AFU's mm gets bound to the process
+@@ -205,15 +215,6 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
+- /*
+- * Increment the mapped context count for adapter. This also checks
+- * if adapter_context_lock is taken.
+- */
+- rc = cxl_adapter_context_get(ctx->afu->adapter);
+- if (rc) {
+- afu_release_irqs(ctx, ctx);
+- goto out;
+- }
+
+ trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+
+@@ -221,6 +222,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+ amr))) {
+ afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
++ put_pid(ctx->glpid);
++ put_pid(ctx->pid);
++ ctx->glpid = ctx->pid = NULL;
+ goto out;
+ }
+
+diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
+index 222367c..524660510 100644
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ if (copy_from_user(sgl->lpage, user_addr + user_size -
+ sgl->lpage_size, sgl->lpage_size)) {
+ rc = -EFAULT;
+- goto err_out1;
++ goto err_out2;
+ }
+ }
+ return 0;
+
++ err_out2:
++ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
++ sgl->lpage_dma_addr);
++ sgl->lpage = NULL;
++ sgl->lpage_dma_addr = 0;
+ err_out1:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
+ sgl->fpage_dma_addr);
++ sgl->fpage = NULL;
++ sgl->fpage_dma_addr = 0;
+ err_out:
+ __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
+ sgl->sgl_dma_addr);
++ sgl->sgl = NULL;
++ sgl->sgl_dma_addr = 0;
++ sgl->sgl_size = 0;
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
+index 4a6c1b8..2d23cdf 100644
+--- a/drivers/misc/mei/hw-txe.c
++++ b/drivers/misc/mei/hw-txe.c
+@@ -978,11 +978,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
+ hisr = mei_txe_br_reg_read(hw, HISR_REG);
+
+ aliveness = mei_txe_aliveness_get(dev);
+- if (hhisr & IPC_HHIER_SEC && aliveness)
++ if (hhisr & IPC_HHIER_SEC && aliveness) {
+ ipc_isr = mei_txe_sec_reg_read_silent(hw,
+ SEC_IPC_HOST_INT_STATUS_REG);
+- else
++ } else {
+ ipc_isr = 0;
++ hhisr &= ~IPC_HHIER_SEC;
++ }
+
+ generated = generated ||
+ (hisr & HISR_INT_STS_MSK) ||
+diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
+index c0bb0c7..dbbc430 100644
+--- a/drivers/mmc/host/dw_mmc-pltfm.c
++++ b/drivers/mmc/host/dw_mmc-pltfm.c
+@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
+ host->pdata = pdev->dev.platform_data;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- /* Get registers' physical base address */
+- host->phy_regs = regs->start;
+ host->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(host->regs))
+ return PTR_ERR(host->regs);
+
++ /* Get registers' physical base address */
++ host->phy_regs = regs->start;
++
+ platform_set_drvdata(pdev, host);
+ return dw_mci_probe(host);
+ }
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 48eb55f..a01a70a 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -515,10 +515,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
++
+ if (err == UBI_IO_FF_BITFLIPS)
+- add_aeb(ai, free, pnum, ec, 1);
+- else
+- add_aeb(ai, free, pnum, ec, 0);
++ scrub = 1;
++
++ add_aeb(ai, free, pnum, ec, scrub);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+@@ -750,11 +751,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+- if (!av)
+- goto fail_bad;
+- if (PTR_ERR(av) == -EINVAL) {
+- ubi_err(ubi, "volume (ID %i) already exists",
+- fmvhdr->vol_id);
++ if (IS_ERR(av)) {
++ if (PTR_ERR(av) == -EEXIST)
++ ubi_err(ubi, "volume (ID %i) already exists",
++ fmvhdr->vol_id);
++
+ goto fail_bad;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index 30ae5bf..76ad825 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -445,6 +445,7 @@ struct ath10k_debug {
+ u32 pktlog_filter;
+ u32 reg_addr;
+ u32 nf_cal_period;
++ void *cal_data;
+
+ struct ath10k_fw_crash_data *fw_crash_data;
+ };
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 8f0fd41..8c6a5dd 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -30,6 +30,8 @@
+ /* ms */
+ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
++#define ATH10K_DEBUG_CAL_DATA_LEN 12064
++
+ #define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+ /**
+@@ -1450,56 +1452,51 @@ static const struct file_operations fops_fw_dbglog = {
+ .llseek = default_llseek,
+ };
+
+-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
++static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
+ {
+- struct ath10k *ar = inode->i_private;
+- void *buf;
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+- mutex_lock(&ar->conf_mutex);
+-
+- if (ar->state != ATH10K_STATE_ON &&
+- ar->state != ATH10K_STATE_UTF) {
+- ret = -ENETDOWN;
+- goto err;
+- }
++ lockdep_assert_held(&ar->conf_mutex);
+
+- buf = vmalloc(ar->hw_params.cal_data_len);
+- if (!buf) {
+- ret = -ENOMEM;
+- goto err;
+- }
++ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
++ return -EINVAL;
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
+- goto err_vfree;
++ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
++ ret);
++ return ret;
+ }
+
+- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
++ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+- goto err_vfree;
++ return ret;
+ }
+
+- file->private_data = buf;
++ return 0;
++}
+
+- mutex_unlock(&ar->conf_mutex);
++static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
++{
++ struct ath10k *ar = inode->i_private;
+
+- return 0;
++ mutex_lock(&ar->conf_mutex);
+
+-err_vfree:
+- vfree(buf);
++ if (ar->state == ATH10K_STATE_ON ||
++ ar->state == ATH10K_STATE_UTF) {
++ ath10k_debug_cal_data_fetch(ar);
++ }
+
+-err:
++ file->private_data = ar;
+ mutex_unlock(&ar->conf_mutex);
+
+- return ret;
++ return 0;
+ }
+
+ static ssize_t ath10k_debug_cal_data_read(struct file *file,
+@@ -1507,18 +1504,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ size_t count, loff_t *ppos)
+ {
+ struct ath10k *ar = file->private_data;
+- void *buf = file->private_data;
+
+- return simple_read_from_buffer(user_buf, count, ppos,
+- buf, ar->hw_params.cal_data_len);
+-}
++ mutex_lock(&ar->conf_mutex);
+
+-static int ath10k_debug_cal_data_release(struct inode *inode,
+- struct file *file)
+-{
+- vfree(file->private_data);
++ count = simple_read_from_buffer(user_buf, count, ppos,
++ ar->debug.cal_data,
++ ar->hw_params.cal_data_len);
+
+- return 0;
++ mutex_unlock(&ar->conf_mutex);
++
++ return count;
+ }
+
+ static ssize_t ath10k_write_ani_enable(struct file *file,
+@@ -1579,7 +1574,6 @@ static const struct file_operations fops_ani_enable = {
+ static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+- .release = ath10k_debug_cal_data_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+ };
+@@ -1931,6 +1925,8 @@ void ath10k_debug_stop(struct ath10k *ar)
+ {
+ lockdep_assert_held(&ar->conf_mutex);
+
++ ath10k_debug_cal_data_fetch(ar);
++
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer(). */
+@@ -2343,6 +2339,10 @@ int ath10k_debug_create(struct ath10k *ar)
+ if (!ar->debug.fw_crash_data)
+ return -ENOMEM;
+
++ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
++ if (!ar->debug.cal_data)
++ return -ENOMEM;
++
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+@@ -2356,6 +2356,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
+ vfree(ar->debug.fw_crash_data);
+ ar->debug.fw_crash_data = NULL;
+
++ vfree(ar->debug.cal_data);
++ ar->debug.cal_data = NULL;
++
+ ath10k_debug_fw_stats_reset(ar);
+
+ kfree(ar->debug.tpc_stats);
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index b6f064a..7e27a06 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -33,7 +33,6 @@ struct coeff {
+
+ enum ar9003_cal_types {
+ IQ_MISMATCH_CAL = BIT(0),
+- TEMP_COMP_CAL = BIT(1),
+ };
+
+ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
+ /* Kick-off cal */
+ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
+ break;
+- case TEMP_COMP_CAL:
+- ath_dbg(common, CALIBRATE,
+- "starting Temperature Compensation Calibration\n");
+- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
+- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
+- break;
+ default:
+ ath_err(common, "Invalid calibration type\n");
+ break;
+@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ /*
+ * Accumulate cal measures for active chains
+ */
+- if (cur_caldata->calCollect)
+- cur_caldata->calCollect(ah);
++ cur_caldata->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >= cur_caldata->calNumSamples) {
+@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
+ /*
+ * Process accumulated data
+ */
+- if (cur_caldata->calPostProc)
+- cur_caldata->calPostProc(ah, numChains);
++ cur_caldata->calPostProc(ah, numChains);
+
+ /* Calibration has finished. */
+ caldata->CalValid |= cur_caldata->calType;
+@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
+ ar9003_hw_iqcalibrate
+ };
+
+-static const struct ath9k_percal_data temp_cal_single_sample = {
+- TEMP_COMP_CAL,
+- MIN_CAL_SAMPLES,
+- PER_MAX_LOG_COUNT,
+-};
+-
+ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+ {
+ ah->iq_caldata.calData = &iq_cal_single_sample;
+- ah->temp_caldata.calData = &temp_cal_single_sample;
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->enabled_cals |= TX_IQ_CAL;
+@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
+ ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
+ }
+
+- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
++ ah->supp_cals = IQ_MISMATCH_CAL;
+ }
+
+ #define OFF_UPPER_LT 24
+@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+
+- INIT_CAL(&ah->temp_caldata);
+- INSERT_CAL(ah, &ah->temp_caldata);
+-
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 2a5d3ad..9cbca12 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -830,7 +830,6 @@ struct ath_hw {
+ /* Calibration */
+ u32 supp_cals;
+ struct ath9k_cal_list iq_caldata;
+- struct ath9k_cal_list temp_caldata;
+ struct ath9k_cal_list adcgain_caldata;
+ struct ath9k_cal_list adcdc_caldata;
+ struct ath9k_cal_list *cal_list;
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+index 4341d56..a280932 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+@@ -231,7 +231,7 @@ struct rtl8xxxu_rxdesc16 {
+ u32 pattern1match:1;
+ u32 pattern0match:1;
+ #endif
+- __le32 tsfl;
++ u32 tsfl;
+ #if 0
+ u32 bassn:12;
+ u32 bavld:1;
+@@ -361,7 +361,7 @@ struct rtl8xxxu_rxdesc24 {
+ u32 ldcp:1;
+ u32 splcp:1;
+ #endif
+- __le32 tsfl;
++ u32 tsfl;
+ };
+
+ struct rtl8xxxu_txdesc32 {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+index 9d45afb..c831a586 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+ u32 val32;
+ u8 val8;
+
++ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
++ val32 |= (BIT(22) | BIT(23));
++ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
++
+ /*
+ * No indication anywhere as to what 0x0790 does. The 2 antenna
+ * vendor code preserves bits 6-7 here.
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 77048db..c6b246a 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -5201,7 +5201,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
+ sizeof(struct rtl8xxxu_rxdesc16), 128);
+
+- if (pkt_cnt > 1)
++ /*
++ * Only clone the skb if there's enough data at the end to
++ * at least cover the rx descriptor
++ */
++ if (pkt_cnt > 1 &&
++ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
+ next_skb = skb_clone(skb, GFP_ATOMIC);
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+@@ -5219,7 +5224,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs);
+
+- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
++ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+@@ -5289,7 +5294,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs);
+
+- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
++ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index 0dbd29e..172ef82 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -339,6 +339,8 @@ int pwmchip_remove(struct pwm_chip *chip)
+ unsigned int i;
+ int ret = 0;
+
++ pwmchip_sysfs_unexport_children(chip);
++
+ mutex_lock(&pwm_lock);
+
+ for (i = 0; i < chip->npwm; i++) {
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 18ed725..0296d81 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -409,6 +409,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+ }
+ }
+
++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
++{
++ struct device *parent;
++ unsigned int i;
++
++ parent = class_find_device(&pwm_class, NULL, chip,
++ pwmchip_sysfs_match);
++ if (!parent)
++ return;
++
++ for (i = 0; i < chip->npwm; i++) {
++ struct pwm_device *pwm = &chip->pwms[i];
++
++ if (test_bit(PWMF_EXPORTED, &pwm->flags))
++ pwm_unexport_child(parent, pwm);
++ }
++}
++
+ static int __init pwm_sysfs_init(void)
+ {
+ return class_register(&pwm_class);
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 3d53d63..f0cfb04 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct CommandControlBlock *ccb;
+ int target = cmd->device->id;
+- int lun = cmd->device->lun;
+- uint8_t scsicmd = cmd->cmnd[0];
+ cmd->scsi_done = done;
+ cmd->host_scribble = NULL;
+ cmd->result = 0;
+- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
+- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
+- cmd->result = (DID_NO_CONNECT << 16);
+- }
+- cmd->scsi_done(cmd);
+- return 0;
+- }
+ if (target == 16) {
+ /* virtual device for iop message transfer */
+ arcmsr_handle_virtual_command(acb, cmd);
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 6a219a0..05e892a 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
+ bus_unregister(&pseudo_lld_bus);
+ root_device_unregister(pseudo_primary);
+
++ vfree(map_storep);
+ vfree(dif_storep);
+ vfree(fake_storep);
+ kfree(sdebug_q_arr);
+diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
+index 8d85a3c..3f35613 100644
+--- a/drivers/spi/spi-fsl-espi.c
++++ b/drivers/spi/spi-fsl-espi.c
+@@ -581,7 +581,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+
+ mspi->len -= rx_nr_bytes;
+
+- if (mspi->rx)
++ if (rx_nr_bytes && mspi->rx)
+ mspi->get_rx(rx_data, mspi);
+ }
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 200ca22..935f1a5 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1607,9 +1607,11 @@ static void of_register_spi_devices(struct spi_master *master)
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ spi = of_register_spi_device(master, nc);
+- if (IS_ERR(spi))
++ if (IS_ERR(spi)) {
+ dev_warn(&master->dev, "Failed to create SPI device for %s\n",
+ nc->full_name);
++ of_node_clear_flag(nc, OF_POPULATED);
++ }
+ }
+ }
+ #else
+@@ -3120,6 +3122,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
+ if (IS_ERR(spi)) {
+ pr_err("%s: failed to create for '%s'\n",
+ __func__, rd->dn->full_name);
++ of_node_clear_flag(rd->dn, OF_POPULATED);
+ return notifier_from_errno(PTR_ERR(spi));
+ }
+ break;
+diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
+index 78f524f..f4dbcb1 100644
+--- a/drivers/staging/wilc1000/host_interface.c
++++ b/drivers/staging/wilc1000/host_interface.c
+@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
+
+ clients_count++;
+
+- destroy_workqueue(hif_workqueue);
+ _fail_:
+ return result;
+ }
+diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
+index 0e4dc0a..7a22307 100644
+--- a/drivers/thermal/intel_powerclamp.c
++++ b/drivers/thermal/intel_powerclamp.c
+@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
+ .set_cur_state = powerclamp_set_cur_state,
+ };
+
+-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
+- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
+- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
+- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
+- {}
+-};
+-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+-
+ static int __init powerclamp_probe(void)
+ {
+- if (!x86_match_cpu(intel_powerclamp_ids)) {
+- pr_err("Intel powerclamp does not run on family %d model %d\n",
+- boot_cpu_data.x86, boot_cpu_data.x86_model);
++ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
++ pr_err("CPU does not support MWAIT");
+ return -ENODEV;
+ }
+
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 2705ca9..fd375f1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
+ return 0;
+
++ if (new_screen_size > (4 << 20))
++ return -EINVAL;
+ newscreen = kmalloc(new_screen_size, GFP_USER);
+ if (!newscreen)
+ return -ENOMEM;
+
++ if (vc == sel_cons)
++ clear_selection();
++
+ old_rows = vc->vc_rows;
+ old_row_size = vc->vc_size_row;
+
+@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
+ break;
+ case 3: /* erase scroll-back buffer (and whole display) */
+ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
+- vc->vc_screenbuf_size >> 1);
++ vc->vc_screenbuf_size);
+ set_origin(vc);
+ if (con_is_visible(vc))
+ update_screen(vc);
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 053bac9..887be34 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -185,6 +185,8 @@ static void host_stop(struct ci_hdrc *ci)
+
+ if (hcd) {
+ usb_remove_hcd(hcd);
++ ci->role = CI_ROLE_END;
++ synchronize_irq(ci->irq);
+ usb_put_hcd(hcd);
+ if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
+ (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 6854461..6443cfb 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3055,7 +3055,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ kfree(dwc->setup_buf);
+
+ err2:
+- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
++ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+
+ err1:
+@@ -3080,7 +3080,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
+ kfree(dwc->setup_buf);
+ kfree(dwc->zlp_buf);
+
+- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
++ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
+
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 5f562c1..9b9e71f 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -587,8 +587,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+
+ /* throttle high/super speed IRQ rate back slightly */
+ if (gadget_is_dualspeed(dev->gadget))
+- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+- dev->gadget->speed == USB_SPEED_SUPER)
++ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
++ dev->gadget->speed == USB_SPEED_SUPER)) &&
++ !list_empty(&dev->tx_reqs))
+ ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
+ : 0;
+
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index bb1f6c8..45bc997 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
+ dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
+ goto err;
+ }
+- ep->ep.name = name;
++ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+
+ ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
+ ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 1700908..86612ac 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -72,7 +72,7 @@
+ static const char hcd_name [] = "ohci_hcd";
+
+ #define STATECHANGE_DELAY msecs_to_jiffies(300)
+-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
++#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
+
+ #include "ohci.h"
+ #include "pci-quirks.h"
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 730b9fd..0ef1690 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_RESUME);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
+@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ return 0;
+ }
+
++/*
++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
++ * warm reset a USB3 device stuck in polling or compliance mode after resume.
++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
++ */
++static bool xhci_port_missing_cas_quirk(int port_index,
++ __le32 __iomem **port_array)
++{
++ u32 portsc;
++
++ portsc = readl(port_array[port_index]);
++
++ /* if any of these are set we are not stuck */
++ if (portsc & (PORT_CONNECT | PORT_CAS))
++ return false;
++
++ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
++ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
++ return false;
++
++ /* clear wakeup/change bits, and do a warm port reset */
++ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
++ portsc |= PORT_WR;
++ writel(portsc, port_array[port_index]);
++ /* flush write */
++ readl(port_array[port_index]);
++ return true;
++}
++
+ int xhci_bus_resume(struct usb_hcd *hcd)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ u32 temp;
+
+ temp = readl(port_array[port_index]);
++
++ /* warm reset CAS limited ports stuck in polling/compliance */
++ if ((xhci->quirks & XHCI_MISSING_CAS) &&
++ (hcd->speed >= HCD_USB3) &&
++ xhci_port_missing_cas_quirk(port_index, port_array)) {
++ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
++ continue;
++ }
+ if (DEV_SUPERSPEED_ANY(temp))
+ temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ else
+@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+
+ if (need_usb2_u3_exit) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index d7b0f97..e96ae80 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -45,11 +45,13 @@
+
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+ #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
++#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
++ xhci->quirks |= XHCI_MISSING_CAS;
++
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ168) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index b2c1dc5..f945380 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -314,6 +314,8 @@ struct xhci_op_regs {
+ #define XDEV_U2 (0x2 << 5)
+ #define XDEV_U3 (0x3 << 5)
+ #define XDEV_INACTIVE (0x6 << 5)
++#define XDEV_POLLING (0x7 << 5)
++#define XDEV_COMP_MODE (0xa << 5)
+ #define XDEV_RESUME (0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER (1 << 9)
+@@ -1653,6 +1655,7 @@ struct xhci_hcd {
+ #define XHCI_MTK_HOST (1 << 21)
+ #define XHCI_SSIC_PORT_UNUSED (1 << 22)
+ #define XHCI_NO_64BIT_SUPPORT (1 << 23)
++#define XHCI_MISSING_CAS (1 << 24)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index 0b4cec9..dae92de 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -337,6 +337,7 @@ static int omap2430_musb_init(struct musb *musb)
+ }
+ musb->isr = omap2430_musb_interrupt;
+ phy_init(musb->phy);
++ phy_power_on(musb->phy);
+
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
+
+@@ -373,8 +374,6 @@ static void omap2430_musb_enable(struct musb *musb)
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
+ struct omap_musb_board_data *data = pdata->board_data;
+
+- if (!WARN_ON(!musb->phy))
+- phy_power_on(musb->phy);
+
+ omap2430_set_power(musb, true, glue->cable_connected);
+
+@@ -413,9 +412,6 @@ static void omap2430_musb_disable(struct musb *musb)
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+
+- if (!WARN_ON(!musb->phy))
+- phy_power_off(musb->phy);
+-
+ if (glue->status != MUSB_UNKNOWN)
+ omap_control_usb_set_mode(glue->control_otghs,
+ USB_MODE_DISCONNECT);
+@@ -429,6 +425,7 @@ static int omap2430_musb_exit(struct musb *musb)
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+
+ omap2430_low_level_exit(musb);
++ phy_power_off(musb->phy);
+ phy_exit(musb->phy);
+ musb->phy = NULL;
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
+diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
+index 1d70add..d544b33 100644
+--- a/drivers/usb/renesas_usbhs/rcar3.c
++++ b/drivers/usb/renesas_usbhs/rcar3.c
+@@ -9,6 +9,7 @@
+ *
+ */
+
++#include <linux/delay.h>
+ #include <linux/io.h>
+ #include "common.h"
+ #include "rcar3.h"
+@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
+
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+
+- if (enable)
++ if (enable) {
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+- else
++ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
++ udelay(45);
++ } else {
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 54a4de0..f61477b 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
+ u8 control;
+ int result;
+
+- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
++ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
++ if (result)
++ return result;
+
+ result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
+ |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b2d767e..0ff7f38 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
+ /* ekey Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ /* Infineon Devices */
+- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
++ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
+ /* GE Healthcare devices */
+ { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+ /* Active Research (Actisense) devices */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index f87a938..21011c0 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -626,8 +626,9 @@
+ /*
+ * Infineon Technologies
+ */
+-#define INFINEON_VID 0x058b
+-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_VID 0x058b
++#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
+
+ /*
+ * Acton Research Corp.
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index d213cf4..4a037b4 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
+
+ serial->disconnected = 0;
+
+- usb_serial_console_init(serial->port[0]->minor);
++ if (num_ports > 0)
++ usb_serial_console_init(serial->port[0]->minor);
+ exit:
+ module_put(type->driver.owner);
+ return 0;
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+index 9e4800a..951dd93 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+@@ -5348,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
+
+ dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+- if (!dsi->proto_base) {
++ if (!dsi->phy_base) {
+ DSSERR("can't ioremap DSI PHY\n");
+ return -ENOMEM;
+ }
+@@ -5368,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
+
+ dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
+ resource_size(res));
+- if (!dsi->proto_base) {
++ if (!dsi->pll_base) {
+ DSSERR("can't ioremap DSI PLL\n");
+ return -ENOMEM;
+ }
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index 2c0487f..ed41fdb 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -2125,7 +2125,7 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
+
+ timings = of_get_display_timings(disp);
+ if (!timings)
+- goto out;
++ return -EINVAL;
+
+ ret = -ENOMEM;
+ info->modes = kmalloc_array(timings->num_timings,
+diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
+index 8c4e617..6d9e517 100644
+--- a/drivers/virtio/virtio_pci_legacy.c
++++ b/drivers/virtio/virtio_pci_legacy.c
+@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
+ return -ENODEV;
+ }
+
+- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+- if (rc)
+- rc = dma_set_mask_and_coherent(&pci_dev->dev,
+- DMA_BIT_MASK(32));
++ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
++ if (rc) {
++ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
++ } else {
++ /*
++ * The virtio ring base address is expressed as a 32-bit PFN,
++ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
++ */
++ dma_set_coherent_mask(&pci_dev->dev,
++ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
++ }
++
+ if (rc)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index ed9c9ee..6b2cd92 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
++ if (!vq->event)
++ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
+
+ }
+@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+ * entry. Always do both to keep code simple. */
+ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
++ if (!vq->event)
++ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
+ vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
+ END_USE(vq);
+@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+- * entry. Always do both to keep code simple. */
++ * entry. Always update the event index to keep code simple. */
+ if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
++ if (!vq->event)
++ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ }
+ /* TODO: tune this threshold */
+ bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
+@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
++ if (!vq->event)
++ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ }
+
+ /* Put everything in free lists. */
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e6811c4..bc1a004 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8915,9 +8915,14 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+ * So even we call qgroup_free_data(), it won't decrease reserved
+ * space.
+ * 2) Not written to disk
+- * This means the reserved space should be freed here.
++ * This means the reserved space should be freed here. However,
++ * if a truncate invalidates the page (by clearing PageDirty)
++ * and the page is accounted for while allocating extent
++ * in btrfs_check_data_free_space() we let delayed_ref to
++ * free the entire extent.
+ */
+- btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
++ if (PageDirty(page))
++ btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
+ if (!inode_evicting) {
+ clear_extent_bit(tree, page_start, page_end,
+ EXTENT_LOCKED | EXTENT_DIRTY |
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index ef9c55b..90e1198 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
+ int index, int error)
+ {
+ struct btrfs_log_ctx *ctx;
++ struct btrfs_log_ctx *safe;
+
+- if (!error) {
+- INIT_LIST_HEAD(&root->log_ctxs[index]);
+- return;
+- }
+-
+- list_for_each_entry(ctx, &root->log_ctxs[index], list)
++ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
++ list_del_init(&ctx->list);
+ ctx->log_ret = error;
++ }
+
+ INIT_LIST_HEAD(&root->log_ctxs[index]);
+ }
+@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ mutex_unlock(&root->log_mutex);
+
+ out_wake_log_root:
+- /*
+- * We needn't get log_mutex here because we are sure all
+- * the other tasks are blocked.
+- */
++ mutex_lock(&log_root_tree->log_mutex);
+ btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
+
+- mutex_lock(&log_root_tree->log_mutex);
+ log_root_tree->log_transid_committed++;
+ atomic_set(&log_root_tree->log_commit[index2], 0);
+ mutex_unlock(&log_root_tree->log_mutex);
+@@ -2978,10 +2972,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
+ wake_up(&log_root_tree->log_commit_wait[index2]);
+ out:
+- /* See above. */
+- btrfs_remove_all_log_ctxs(root, index1, ret);
+-
+ mutex_lock(&root->log_mutex);
++ btrfs_remove_all_log_ctxs(root, index1, ret);
+ root->log_transid_committed++;
+ atomic_set(&root->log_commit[index1], 0);
+ mutex_unlock(&root->log_mutex);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a204d7e..0fe31b4 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1147,9 +1147,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
+
+ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+-
+- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
++ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+
+ list_del_init(&stp->st_locks);
+ nfs4_unhash_stid(&stp->st_stid);
+@@ -1158,12 +1156,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
++ struct nfs4_client *clp = stp->st_stid.sc_client;
+ bool unhashed;
+
+- spin_lock(&oo->oo_owner.so_client->cl_lock);
++ spin_lock(&clp->cl_lock);
+ unhashed = unhash_lock_stateid(stp);
+- spin_unlock(&oo->oo_owner.so_client->cl_lock);
++ spin_unlock(&clp->cl_lock);
+ if (unhashed)
+ nfs4_put_stid(&stp->st_stid);
+ }
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index abadbc3..767377e 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -171,6 +171,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
+ len -= bytes;
+ }
+
++ if (!error)
++ error = vfs_fsync(new_file, 0);
+ fput(new_file);
+ out_fput:
+ fput(old_file);
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index c75625c..cf2bfeb 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -294,9 +294,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
+ if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
+ return NULL;
+
+- if (!realinode->i_op->get_acl)
+- return NULL;
+-
+ old_cred = ovl_override_creds(inode->i_sb);
+ acl = get_acl(realinode, type);
+ revert_creds(old_cred);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index e2a94a2..a78415d 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -1026,6 +1026,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+
+ posix_acl_release(acl);
+
++ /*
++ * Check if sgid bit needs to be cleared (actual setacl operation will
++ * be done with mounter's capabilities and so that won't do it for us).
++ */
++ if (unlikely(inode->i_mode & S_ISGID) &&
++ handler->flags == ACL_TYPE_ACCESS &&
++ !in_group_p(inode->i_gid) &&
++ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
++ struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
++
++ err = ovl_setattr(dentry, &iattr);
++ if (err)
++ return err;
++ }
++
+ err = ovl_xattr_set(dentry, handler->name, value, size, flags);
+ if (!err)
+ ovl_copyattr(ovl_inode_real(inode, NULL), inode);
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 4b86d3a..3b27145 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
+ */
+ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ {
+- int err;
++ int err = 0;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+@@ -452,14 +452,20 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ kfree(file->private_data);
+ file->private_data = NULL;
+
+- if (err != -ENOENT) {
++ if (err != -ENOENT)
+ ubifs_err(c, "cannot find next direntry, error %d", err);
+- return err;
+- }
++ else
++ /*
++ * -ENOENT is a non-fatal error in this context, the TNC uses
++ * it to indicate that the cursor moved past the current directory
++ * and readdir() has to stop.
++ */
++ err = 0;
++
+
+ /* 2 is a special value indicating that there are no more direntries */
+ ctx->pos = 2;
+- return 0;
++ return err;
+ }
+
+ /* Free saved readdir() state when the directory is closed */
+diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
+index 3cc3cf7..ac9a003 100644
+--- a/fs/xfs/libxfs/xfs_dquot_buf.c
++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
+@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
+ if (mp->m_quotainfo)
+ ndquots = mp->m_quotainfo->qi_dqperchunk;
+ else
+- ndquots = xfs_calc_dquots_per_chunk(
+- XFS_BB_TO_FSB(mp, bp->b_length));
++ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
+
+ for (i = 0; i < ndquots; i++, d++) {
+ if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+diff --git a/include/linux/pwm.h b/include/linux/pwm.h
+index f1bbae0..2c6c511 100644
+--- a/include/linux/pwm.h
++++ b/include/linux/pwm.h
+@@ -641,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
+ #ifdef CONFIG_PWM_SYSFS
+ void pwmchip_sysfs_export(struct pwm_chip *chip);
+ void pwmchip_sysfs_unexport(struct pwm_chip *chip);
++void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
+ #else
+ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+ {
+@@ -649,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+ static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+ {
+ }
++
++static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
++{
++}
+ #endif /* CONFIG_PWM_SYSFS */
+
+ #endif /* __LINUX_PWM_H */
+diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
+index 185f8ea..407ca0d 100644
+--- a/include/uapi/linux/Kbuild
++++ b/include/uapi/linux/Kbuild
+@@ -396,6 +396,7 @@ header-y += string.h
+ header-y += suspend_ioctls.h
+ header-y += swab.h
+ header-y += synclink.h
++header-y += sync_file.h
+ header-y += sysctl.h
+ header-y += sysinfo.h
+ header-y += target_core_user.h
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 32bf6f7..96db64b 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ static inline struct timer_base *
+-__get_target_base(struct timer_base *base, unsigned tflags)
++get_target_base(struct timer_base *base, unsigned tflags)
+ {
+ #ifdef CONFIG_SMP
+ if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
+
+ static inline void forward_timer_base(struct timer_base *base)
+ {
++ unsigned long jnow = READ_ONCE(jiffies);
++
+ /*
+ * We only forward the base when it's idle and we have a delta between
+ * base clock and jiffies.
+ */
+- if (!base->is_idle || (long) (jiffies - base->clk) < 2)
++ if (!base->is_idle || (long) (jnow - base->clk) < 2)
+ return;
+
+ /*
+ * If the next expiry value is > jiffies, then we fast forward to
+ * jiffies otherwise we forward to the next expiry value.
+ */
+- if (time_after(base->next_expiry, jiffies))
+- base->clk = jiffies;
++ if (time_after(base->next_expiry, jnow))
++ base->clk = jnow;
+ else
+ base->clk = base->next_expiry;
+ }
+ #else
+ static inline struct timer_base *
+-__get_target_base(struct timer_base *base, unsigned tflags)
++get_target_base(struct timer_base *base, unsigned tflags)
+ {
+ return get_timer_this_cpu_base(tflags);
+ }
+@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
+ static inline void forward_timer_base(struct timer_base *base) { }
+ #endif
+
+-static inline struct timer_base *
+-get_target_base(struct timer_base *base, unsigned tflags)
+-{
+- struct timer_base *target = __get_target_base(base, tflags);
+-
+- forward_timer_base(target);
+- return target;
+-}
+
+ /*
+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
+@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
+ {
+ for (;;) {
+ struct timer_base *base;
+- u32 tf = timer->flags;
++ u32 tf;
++
++ /*
++ * We need to use READ_ONCE() here, otherwise the compiler
++ * might re-read @tf between the check for TIMER_MIGRATING
++ * and spin_lock().
++ */
++ tf = READ_ONCE(timer->flags);
+
+ if (!(tf & TIMER_MIGRATING)) {
+ base = get_timer_base(tf);
+@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ unsigned long clk = 0, flags;
+ int ret = 0;
+
++ BUG_ON(!timer->function);
++
+ /*
+ * This is a common optimization triggered by the networking code - if
+ * the timer is re-modified to have the same timeout or ends up in the
+@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ if (timer_pending(timer)) {
+ if (timer->expires == expires)
+ return 1;
++
+ /*
+- * Take the current timer_jiffies of base, but without holding
+- * the lock!
++ * We lock timer base and calculate the bucket index right
++ * here. If the timer ends up in the same bucket, then we
++ * just update the expiry time and avoid the whole
++ * dequeue/enqueue dance.
+ */
+- base = get_timer_base(timer->flags);
+- clk = base->clk;
++ base = lock_timer_base(timer, &flags);
+
++ clk = base->clk;
+ idx = calc_wheel_index(expires, clk);
+
+ /*
+@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ */
+ if (idx == timer_get_idx(timer)) {
+ timer->expires = expires;
+- return 1;
++ ret = 1;
++ goto out_unlock;
+ }
++ } else {
++ base = lock_timer_base(timer, &flags);
+ }
+
+ timer_stats_timer_set_start_info(timer);
+- BUG_ON(!timer->function);
+-
+- base = lock_timer_base(timer, &flags);
+
+ ret = detach_if_pending(timer, base, false);
+ if (!ret && pending_only)
+@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ }
+ }
+
++ /* Try to forward a stale timer base clock */
++ forward_timer_base(base);
++
+ timer->expires = expires;
+ /*
+ * If 'idx' was calculated above and the base time did not advance
+- * between calculating 'idx' and taking the lock, only enqueue_timer()
+- * and trigger_dyntick_cpu() is required. Otherwise we need to
+- * (re)calculate the wheel index via internal_add_timer().
++ * between calculating 'idx' and possibly switching the base, only
++ * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
++ * we need to (re)calculate the wheel index via
++ * internal_add_timer().
+ */
+ if (idx != UINT_MAX && clk == base->clk) {
+ enqueue_timer(base, timer, idx);
+@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+ is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
+ base->next_expiry = nextevt;
+ /*
+- * We have a fresh next event. Check whether we can forward the base:
++ * We have a fresh next event. Check whether we can forward the
++ * base. We can only do that when @basej is past base->clk
++ * otherwise we might rewind base->clk.
+ */
+- if (time_after(nextevt, jiffies))
+- base->clk = jiffies;
+- else if (time_after(nextevt, base->clk))
+- base->clk = nextevt;
++ if (time_after(basej, base->clk)) {
++ if (time_after(nextevt, basej))
++ base->clk = basej;
++ else if (time_after(nextevt, base->clk))
++ base->clk = nextevt;
++ }
+
+ if (time_before_eq(nextevt, basej)) {
+ expires = basem;
+diff --git a/mm/list_lru.c b/mm/list_lru.c
+index 1d05cb9..234676e 100644
+--- a/mm/list_lru.c
++++ b/mm/list_lru.c
+@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ err = memcg_init_list_lru(lru, memcg_aware);
+ if (err) {
+ kfree(lru->node);
++ /* Do this so a list_lru_destroy() doesn't crash: */
++ lru->node = NULL;
+ goto out;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 4be518d..dddead1 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1947,6 +1947,15 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ current->flags & PF_EXITING))
+ goto force;
+
++ /*
++ * Prevent unbounded recursion when reclaim operations need to
++ * allocate memory. This might exceed the limits temporarily,
++ * but we prefer facilitating memory reclaim and getting back
++ * under the limit over triggering OOM kills in these cases.
++ */
++ if (unlikely(current->flags & PF_MEMALLOC))
++ goto force;
++
+ if (unlikely(task_in_memcg_oom(current)))
+ goto nomem;
+
+diff --git a/mm/slab.c b/mm/slab.c
+index b672710..525a911 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -964,7 +964,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
+ * guaranteed to be valid until irq is re-enabled, because it will be
+ * freed after synchronize_sched().
+ */
+- if (force_change)
++ if (old_shared && force_change)
+ synchronize_sched();
+
+ fail:
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 0fe8b71..ba0fad7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3048,7 +3048,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ sc.gfp_mask,
+ sc.reclaim_idx);
+
++ current->flags |= PF_MEMALLOC;
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
++ current->flags &= ~PF_MEMALLOC;
+
+ trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 9dce3b1..59a9603 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2253,16 +2253,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ if (!(status->rx_flags & IEEE80211_RX_AMSDU))
+ return RX_CONTINUE;
+
+- if (ieee80211_has_a4(hdr->frame_control) &&
+- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+- !rx->sdata->u.vlan.sta)
+- return RX_DROP_UNUSABLE;
++ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
++ switch (rx->sdata->vif.type) {
++ case NL80211_IFTYPE_AP_VLAN:
++ if (!rx->sdata->u.vlan.sta)
++ return RX_DROP_UNUSABLE;
++ break;
++ case NL80211_IFTYPE_STATION:
++ if (!rx->sdata->u.mgd.use_4addr)
++ return RX_DROP_UNUSABLE;
++ break;
++ default:
++ return RX_DROP_UNUSABLE;
++ }
++ }
+
+- if (is_multicast_ether_addr(hdr->addr1) &&
+- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+- rx->sdata->u.vlan.sta) ||
+- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+- rx->sdata->u.mgd.use_4addr)))
++ if (is_multicast_ether_addr(hdr->addr1))
+ return RX_DROP_UNUSABLE;
+
+ skb->dev = dev;
+diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
+index 018eed7..8668a5c 100644
+--- a/net/netfilter/xt_NFLOG.c
++++ b/net/netfilter/xt_NFLOG.c
+@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
+ li.u.ulog.copy_len = info->len;
+ li.u.ulog.group = info->group;
+ li.u.ulog.qthreshold = info->threshold;
++ li.u.ulog.flags = 0;
+
+ if (info->flags & XT_NFLOG_F_COPY_LEN)
+ li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
+diff --git a/security/keys/Kconfig b/security/keys/Kconfig
+index f826e87..d942c7c 100644
+--- a/security/keys/Kconfig
++++ b/security/keys/Kconfig
+@@ -41,7 +41,7 @@ config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+- select CRYPTO
++ depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
+ select CRYPTO_AES
+ select CRYPTO_ECB
+ select CRYPTO_RNG
+diff --git a/security/keys/big_key.c b/security/keys/big_key.c
+index c0b3030..835c1ab 100644
+--- a/security/keys/big_key.c
++++ b/security/keys/big_key.c
+@@ -9,6 +9,7 @@
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
++#define pr_fmt(fmt) "big_key: "fmt
+ #include <linux/init.h>
+ #include <linux/seq_file.h>
+ #include <linux/file.h>
+@@ -341,44 +342,48 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+ */
+ static int __init big_key_init(void)
+ {
+- return register_key_type(&key_type_big_key);
+-}
+-
+-/*
+- * Initialize big_key crypto and RNG algorithms
+- */
+-static int __init big_key_crypto_init(void)
+-{
+- int ret = -EINVAL;
++ struct crypto_skcipher *cipher;
++ struct crypto_rng *rng;
++ int ret;
+
+- /* init RNG */
+- big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+- if (IS_ERR(big_key_rng)) {
+- big_key_rng = NULL;
+- return -EFAULT;
++ rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
++ if (IS_ERR(rng)) {
++ pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
++ return PTR_ERR(rng);
+ }
+
++ big_key_rng = rng;
++
+ /* seed RNG */
+- ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
+- if (ret)
+- goto error;
++ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
++ if (ret) {
++ pr_err("Can't reset rng: %d\n", ret);
++ goto error_rng;
++ }
+
+ /* init block cipher */
+- big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
+- 0, CRYPTO_ALG_ASYNC);
+- if (IS_ERR(big_key_skcipher)) {
+- big_key_skcipher = NULL;
+- ret = -EFAULT;
+- goto error;
++ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
++ if (IS_ERR(cipher)) {
++ ret = PTR_ERR(cipher);
++ pr_err("Can't alloc crypto: %d\n", ret);
++ goto error_rng;
++ }
++
++ big_key_skcipher = cipher;
++
++ ret = register_key_type(&key_type_big_key);
++ if (ret < 0) {
++ pr_err("Can't register type: %d\n", ret);
++ goto error_cipher;
+ }
+
+ return 0;
+
+-error:
++error_cipher:
++ crypto_free_skcipher(big_key_skcipher);
++error_rng:
+ crypto_free_rng(big_key_rng);
+- big_key_rng = NULL;
+ return ret;
+ }
+
+-device_initcall(big_key_init);
+-late_initcall(big_key_crypto_init);
++late_initcall(big_key_init);
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index f0611a6..b9f531c 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ struct timespec now;
+ unsigned long timo;
+ key_ref_t key_ref, skey_ref;
+- char xbuf[12];
++ char xbuf[16];
+ int rc;
+
+ struct keyring_search_context ctx = {
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index dcc1028..37d9cfb 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+
+ ktime_get_ts64(&tm);
+ tm = timespec64_sub(tm, tmr->last_update);
+- cur_time.tv_nsec = tm.tv_nsec;
+- cur_time.tv_sec = tm.tv_sec;
++ cur_time.tv_nsec += tm.tv_nsec;
++ cur_time.tv_sec += tm.tv_sec;
+ snd_seq_sanity_real_time(&cur_time);
+ }
+ spin_unlock_irqrestore(&tmr->lock, flags);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 160c7f7..487fcbf 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -340,8 +340,7 @@ enum {
+
+ /* quirks for Nvidia */
+ #define AZX_DCAPS_PRESET_NVIDIA \
+- (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
+- AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
++ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+ AZX_DCAPS_SNOOP_TYPE(NVIDIA))
+
+ #define AZX_DCAPS_PRESET_CTHDA \
+@@ -1699,6 +1698,10 @@ static int azx_first_init(struct azx *chip)
+ }
+ }
+
++ /* NVidia hardware normally only supports up to 40 bits of DMA */
++ if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
++ dma_bits = 40;
++
+ /* disable 64bit DMA address on some devices */
+ if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
+ dev_dbg(card->dev, "Disabling 64bit DMA\n");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index bd481ac..26e866f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5809,8 +5809,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ #define ALC295_STANDARD_PINS \
+ {0x12, 0xb7a60130}, \
+ {0x14, 0x90170110}, \
+- {0x17, 0x21014020}, \
+- {0x18, 0x21a19030}, \
+ {0x21, 0x04211020}
+
+ #define ALC298_STANDARD_PINS \
+@@ -5857,11 +5855,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x1b, 0x02011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ {0x14, 0x90170110},
++ {0x1b, 0x01011020},
++ {0x21, 0x0221101f}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
+ {0x1b, 0x01014020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
++ {0x1b, 0x01011020},
++ {0x21, 0x0221103f}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ {0x14, 0x90170130},
+ {0x1b, 0x02011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -6037,7 +6043,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ ALC292_STANDARD_PINS,
+ {0x13, 0x90a60140}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+- ALC295_STANDARD_PINS),
++ ALC295_STANDARD_PINS,
++ {0x17, 0x21014020},
++ {0x18, 0x21a19030}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC295_STANDARD_PINS,
++ {0x17, 0x21014040},
++ {0x18, 0x21a19050}),
+ SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_STANDARD_PINS,
+ {0x17, 0x90170110}),
+@@ -6611,6 +6623,7 @@ enum {
+ ALC891_FIXUP_HEADSET_MODE,
+ ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+ ALC662_FIXUP_ACER_VERITON,
++ ALC892_FIXUP_ASROCK_MOBO,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6887,6 +6900,16 @@ static const struct hda_fixup alc662_fixups[] = {
+ { }
+ }
+ },
++ [ALC892_FIXUP_ASROCK_MOBO] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x15, 0x40f000f0 }, /* disabled */
++ { 0x16, 0x40f000f0 }, /* disabled */
++ { 0x18, 0x01014011 }, /* LO */
++ { 0x1a, 0x01014012 }, /* LO */
++ { }
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6924,6 +6947,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
++ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+ SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
+ SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
+ SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index c60a776..8a59d47 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+
++/* Syntek STK1160 */
++{
++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++ USB_DEVICE_ID_MATCH_INT_CLASS |
++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++ .idVendor = 0x05e1,
++ .idProduct = 0x0408,
++ .bInterfaceClass = USB_CLASS_AUDIO,
++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "Syntek",
++ .product_name = "STK1160",
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_AUDIO_ALIGN_TRANSFER
++ }
++},
++
+ /* Digidesign Mbox */
+ {
+ /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
diff --git a/4.8.6/4420_grsecurity-3.1-4.8.6-201611091800.patch b/4.8.7/4420_grsecurity-3.1-4.8.7-201611102210.patch
index 3336ca8..76a16d1 100644
--- a/4.8.6/4420_grsecurity-3.1-4.8.6-201611091800.patch
+++ b/4.8.7/4420_grsecurity-3.1-4.8.7-201611102210.patch
@@ -407,7 +407,7 @@ index ffab8b5..b8fcd61 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index b249529..d525945 100644
+index 4d0f28c..1fee46b 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -25907,7 +25907,7 @@ index 169963f..d5caf11 100644
movq pt_regs_bp(%rax), %rbp
movq pt_regs_si(%rax), %rsi
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index 5cb272a..cddd2e9 100644
+index 5cb272a..d22cf79 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -21,6 +21,7 @@
@@ -26072,7 +26072,17 @@ index 5cb272a..cddd2e9 100644
text_poke(ptr, ((unsigned char []){0x3E}), 1);
}
mutex_unlock(&text_mutex);
-@@ -593,7 +641,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+@@ -545,7 +593,9 @@ void alternatives_enable_smp(void)
+ if (uniproc_patched) {
+ pr_info("switching to SMP code\n");
+ BUG_ON(num_online_cpus() != 1);
++ pax_open_kernel();
+ clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
++ pax_close_kernel();
+ clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
+ list_for_each_entry(mod, &smp_alt_modules, next)
+ alternatives_smp_lock(mod->locks, mod->locks_end,
+@@ -593,7 +643,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
@@ -26081,7 +26091,7 @@ index 5cb272a..cddd2e9 100644
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
(unsigned long)p->instr, p->len);
-@@ -640,7 +688,7 @@ void __init alternative_instructions(void)
+@@ -640,7 +690,7 @@ void __init alternative_instructions(void)
if (!uniproc_patched || num_possible_cpus() == 1)
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
@@ -26090,7 +26100,7 @@ index 5cb272a..cddd2e9 100644
#endif
apply_paravirt(__parainstructions, __parainstructions_end);
-@@ -661,13 +709,17 @@ void __init alternative_instructions(void)
+@@ -661,13 +711,17 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
@@ -26110,7 +26120,7 @@ index 5cb272a..cddd2e9 100644
local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
-@@ -689,20 +741,29 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+@@ -689,20 +743,29 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
@@ -26147,7 +26157,7 @@ index 5cb272a..cddd2e9 100644
local_irq_save(flags);
set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
if (pages[1])
-@@ -719,6 +780,7 @@ void *text_poke(void *addr, const void *opcode, size_t len)
+@@ -719,6 +782,7 @@ void *text_poke(void *addr, const void *opcode, size_t len)
for (i = 0; i < len; i++)
BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
local_irq_restore(flags);
@@ -26155,7 +26165,7 @@ index 5cb272a..cddd2e9 100644
return addr;
}
-@@ -772,7 +834,7 @@ int poke_int3_handler(struct pt_regs *regs)
+@@ -772,7 +836,7 @@ int poke_int3_handler(struct pt_regs *regs)
*/
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
@@ -26891,6 +26901,41 @@ index bcc9ccc..84b8a82 100644
struct thread_struct *thread = &curr->thread;
wait_for_master_cpu(cpu);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index fcd484d..8a466c1 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -56,7 +56,9 @@ void check_mpx_erratum(struct cpuinfo_x86 *c)
+ * is no such hardware known at the moment.
+ */
+ if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
++ pax_open_kernel();
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
++ pax_close_kernel();
+ pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
+ }
+ }
+@@ -177,8 +179,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
+ pr_info("Disabled fast string operations\n");
++ pax_open_kernel();
+ setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
+ setup_clear_cpu_cap(X86_FEATURE_ERMS);
++ pax_close_kernel();
+ }
+ }
+
+@@ -194,7 +198,9 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ */
+ if (c->x86 == 5 && c->x86_model == 9) {
+ pr_info("Disabling PGE capability bit\n");
++ pax_open_kernel();
+ setup_clear_cpu_cap(X86_FEATURE_PGE);
++ pax_close_kernel();
+ }
+
+ if (c->cpuid_level >= 0x00000001) {
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index de6626c..c84e8c1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -31202,7 +31247,7 @@ index 98111b3..73ca125 100644
identity_mapped:
/* set return address to 0 if not preserving context */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 98c9cd6..c32f54c 100644
+index d5219b1..0e04ab1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
@@ -32220,10 +32265,18 @@ index b70ca12..2eb1474 100644
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index 78b9cb5..79fb053 100644
+index 78b9cb5..88ba83a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
-@@ -157,7 +157,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
+@@ -23,6 +23,7 @@
+ #include <asm/x86_init.h>
+ #include <asm/geode.h>
+ #include <asm/apic.h>
++#include <asm/pgtable.h>
+
+ unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
+ EXPORT_SYMBOL(cpu_khz);
+@@ -157,7 +158,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
*/
smp_wmb();
@@ -32232,6 +32285,16 @@ index 78b9cb5..79fb053 100644
}
/*
+@@ -992,7 +993,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ }
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
++ pax_open_kernel();
+ *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
++ pax_close_kernel();
+
+ tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
+ if (!(freq->flags & CPUFREQ_CONST_LOOPS))
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 495c776..c0427ef 100644
--- a/arch/x86/kernel/uprobes.c
@@ -32713,7 +32776,7 @@ index 3235e0f..60b5e71 100644
out:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 4e95d3e..e3e58b1 100644
+index cbd7b92..d7ea4b9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -971,7 +971,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
@@ -33098,7 +33161,7 @@ index 5cede40..f932797 100644
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 699f872..52b660d 100644
+index 46f74d4..eca57f1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1948,8 +1948,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -33221,7 +33284,7 @@ index 699f872..52b660d 100644
trace_kvm_fpu(1);
}
-@@ -7640,6 +7644,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+@@ -7642,6 +7646,8 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
struct static_key kvm_no_apic_vcpu __read_mostly;
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
@@ -33230,7 +33293,7 @@ index 699f872..52b660d 100644
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
-@@ -7657,11 +7663,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+@@ -7659,11 +7665,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -33249,7 +33312,7 @@ index 699f872..52b660d 100644
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
-@@ -7719,6 +7728,9 @@ fail_mmu_destroy:
+@@ -7721,6 +7730,9 @@ fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
@@ -33259,7 +33322,7 @@ index 699f872..52b660d 100644
fail:
return r;
}
-@@ -7737,6 +7749,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+@@ -7739,6 +7751,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.pio_data);
if (!lapic_in_kernel(vcpu))
static_key_slow_dec(&kvm_no_apic_vcpu);
@@ -41825,7 +41888,7 @@ index a6b36fc53..dc320a6 100644
backlight_notifier_registered = true;
init_done = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
-index 16288e7..91ab5f3 100644
+index 4b1e4ea..db24123 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -120,7 +120,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -44848,7 +44911,7 @@ index e722886..78a48b9 100644
(*pos)++;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
-index 5da47e26..fbfa419 100644
+index 4aae0d2..3d84db8 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -692,11 +692,11 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
@@ -45308,7 +45371,7 @@ index 3a1f49f..42a478e 100644
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index b46547e..79b533d 100644
+index 8c347f5..cb2b54b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -275,13 +275,13 @@ struct pstate_funcs {
@@ -45388,7 +45451,7 @@ index b46547e..79b533d 100644
const char *buf, size_t count)
{
unsigned int input;
-@@ -1145,19 +1145,19 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+@@ -1143,7 +1143,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
* right CPU.
*/
wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
@@ -45396,6 +45459,9 @@ index b46547e..79b533d 100644
+ pstate_funcs->get_val(cpu, pstate));
}
+ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+@@ -1162,14 +1162,14 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -45416,7 +45482,7 @@ index b46547e..79b533d 100644
intel_pstate_set_min_pstate(cpu);
}
-@@ -1303,7 +1303,7 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+@@ -1315,7 +1315,7 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
return;
cpu->pstate.current_pstate = pstate;
@@ -45425,7 +45491,7 @@ index b46547e..79b533d 100644
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
-@@ -1313,7 +1313,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+@@ -1325,7 +1325,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
@@ -45434,7 +45500,7 @@ index b46547e..79b533d 100644
intel_pstate_update_pstate(cpu, target_pstate);
-@@ -1601,15 +1601,15 @@ static unsigned int force_load __initdata;
+@@ -1622,15 +1622,15 @@ static unsigned int force_load __initdata;
static int __init intel_pstate_msrs_not_valid(void)
{
@@ -45454,7 +45520,7 @@ index b46547e..79b533d 100644
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
-@@ -1622,15 +1622,7 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
+@@ -1643,15 +1643,7 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
@@ -46538,10 +46604,10 @@ index ac8deb0..f3caa10 100644
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index 53ff25a..6f88b8f 100644
+index b2dee10..fc44efe 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
-@@ -1558,8 +1558,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+@@ -1631,8 +1631,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
if (gpiochip->irqchip) {
@@ -46554,7 +46620,7 @@ index 53ff25a..6f88b8f 100644
gpiochip->irqchip = NULL;
}
}
-@@ -1636,8 +1638,10 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+@@ -1709,8 +1711,10 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
*/
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
@@ -48538,7 +48604,7 @@ index 1c2aec3..f807515 100644
/**
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index e9a64fb..54a2344 100644
+index 63462f2..37eef36 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15569,13 +15569,13 @@ struct intel_quirk {
@@ -48834,7 +48900,7 @@ index ed7143d..527b26a 100644
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
-index dc57b62..8f2a3d8 100644
+index 193573d..3f62e53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -194,7 +194,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -57195,10 +57261,10 @@ index 15db5e9..16fc91b 100644
DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
-index 8abde6b..b9cdbef 100644
+index 6d53810..35207e1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
-@@ -3190,7 +3190,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+@@ -3193,7 +3193,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
mddev->resync_max_sectors : mddev->dev_sectors;
progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
@@ -57208,7 +57274,7 @@ index 8abde6b..b9cdbef 100644
/* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
-index bdf1606..443a023 100644
+index 7a6254d..6d6b8fb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -42,7 +42,7 @@ enum dm_raid1_error {
@@ -57283,7 +57349,7 @@ index bdf1606..443a023 100644
ms->mirror[mirror].error_type = 0;
ms->mirror[mirror].offset = offset;
-@@ -1388,7 +1388,7 @@ static void mirror_resume(struct dm_target *ti)
+@@ -1389,7 +1389,7 @@ static void mirror_resume(struct dm_target *ti)
*/
static char device_status_char(struct mirror *m)
{
@@ -57359,7 +57425,7 @@ index 28193a5..0543cc9 100644
schedule_work(&sc->trigger_event);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
-index 3e407a9c..5c5cbdb 100644
+index c4b53b3..801848c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -308,7 +308,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
@@ -57394,7 +57460,7 @@ index a15091a..2d20208 100644
pmd->bl_info.value_type.inc = data_block_inc;
pmd->bl_info.value_type.dec = data_block_dec;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 0f2928b..f9c3589 100644
+index eeef575..af16c27 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -569,14 +569,16 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
@@ -57477,7 +57543,7 @@ index 0f2928b..f9c3589 100644
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 915e84d..5155da8 100644
+index db0aa6c..3383744 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -198,10 +198,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
@@ -57712,10 +57778,10 @@ index 3e6d115..ffecdeb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 21dc00e..14b01ea 100644
+index 95bf4cd..3aa17b0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
-@@ -1050,7 +1050,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+@@ -1053,7 +1053,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
int first_clone;
@@ -57724,7 +57790,7 @@ index 21dc00e..14b01ea 100644
int max_sectors;
sector_t start_next_window;
-@@ -1880,7 +1880,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+@@ -1883,7 +1883,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (r1_sync_page_io(rdev, sect, s,
bio->bi_io_vec[idx].bv_page,
READ) != 0)
@@ -57733,7 +57799,7 @@ index 21dc00e..14b01ea 100644
}
sectors -= s;
sect += s;
-@@ -1971,7 +1971,7 @@ static void process_checks(struct r1bio *r1_bio)
+@@ -1974,7 +1974,7 @@ static void process_checks(struct r1bio *r1_bio)
} else
j = 0;
if (j >= 0)
@@ -57742,7 +57808,7 @@ index 21dc00e..14b01ea 100644
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
&& !error)) {
/* No need to write to this device. */
-@@ -2122,7 +2122,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -2125,7 +2125,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
rcu_read_unlock();
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
@@ -57752,10 +57818,10 @@ index 21dc00e..14b01ea 100644
"md/raid1:%s: read error corrected "
"(%d sectors at %llu on %s)\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index be1a9fc..6694394 100644
+index 39fddda..be1dd54 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
-@@ -1060,7 +1060,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
+@@ -1063,7 +1063,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL;
@@ -57764,7 +57830,7 @@ index be1a9fc..6694394 100644
int max_sectors;
int sectors;
-@@ -1438,7 +1438,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
+@@ -1441,7 +1441,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -57773,7 +57839,7 @@ index be1a9fc..6694394 100644
struct bio *split;
-@@ -1826,7 +1826,7 @@ static void end_sync_read(struct bio *bio)
+@@ -1829,7 +1829,7 @@ static void end_sync_read(struct bio *bio)
/* The write handler will notice the lack of
* R10BIO_Uptodate and record any errors etc
*/
@@ -57782,7 +57848,7 @@ index be1a9fc..6694394 100644
&conf->mirrors[d].rdev->corrected_errors);
/* for reconstruct, we always reschedule after a read.
-@@ -1975,7 +1975,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+@@ -1978,7 +1978,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
if (j == vcnt)
continue;
@@ -57791,7 +57857,7 @@ index be1a9fc..6694394 100644
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */
continue;
-@@ -2174,7 +2174,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2177,7 +2177,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
{
long cur_time_mon;
unsigned long hours_since_last;
@@ -57800,7 +57866,7 @@ index be1a9fc..6694394 100644
cur_time_mon = ktime_get_seconds();
-@@ -2195,9 +2195,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2198,9 +2198,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
* overflowing the shift of read_errors by hours_since_last.
*/
if (hours_since_last >= 8 * sizeof(read_errors))
@@ -57812,7 +57878,7 @@ index be1a9fc..6694394 100644
}
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-@@ -2251,8 +2251,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2254,8 +2254,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
return;
check_decay_read_errors(mddev, rdev);
@@ -57823,7 +57889,7 @@ index be1a9fc..6694394 100644
char b[BDEVNAME_SIZE];
bdevname(rdev->bdev, b);
-@@ -2260,7 +2260,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2263,7 +2263,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
"md/raid10:%s: %s: Raid device exceeded "
"read_error threshold [cur %d:max %d]\n",
mdname(mddev), b,
@@ -57832,7 +57898,7 @@ index be1a9fc..6694394 100644
printk(KERN_NOTICE
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
-@@ -2417,7 +2417,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2420,7 +2420,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sect +
choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
@@ -57841,7 +57907,7 @@ index be1a9fc..6694394 100644
}
rdev_dec_pending(rdev, mddev);
-@@ -3188,6 +3188,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+@@ -3191,6 +3191,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
@@ -57849,7 +57915,7 @@ index be1a9fc..6694394 100644
bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
-@@ -3213,7 +3214,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+@@ -3216,7 +3217,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sector = sector_nr;
set_bit(R10BIO_IsSync, &r10_bio->state);
raid10_find_phys(conf, r10_bio);
@@ -66479,7 +66545,7 @@ index da84b70..83e4978 100644
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
-index 2a5d3ad..59d9ad3 100644
+index 9cbca12..eae7c79 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -672,7 +672,7 @@ struct ath_hw_private_ops {
@@ -75488,7 +75554,7 @@ index 1deb6ad..3057db5 100644
if (drv->done)
good_bytes = drv->done(cmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
-index 6a219a0..fd669fd 100644
+index 05e892a..3fffafa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -289,10 +289,10 @@ struct sdebug_queue {
@@ -75576,7 +75642,7 @@ index 6a219a0..fd669fd 100644
seq_printf(m, "submit_queues=%d\n", submit_queues);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
-@@ -5252,7 +5252,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
+@@ -5253,7 +5253,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
static bool fake_timeout(struct scsi_cmnd *scp)
{
@@ -75585,7 +75651,7 @@ index 6a219a0..fd669fd 100644
if (sdebug_every_nth < -1)
sdebug_every_nth = -1;
if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
-@@ -5283,7 +5283,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
+@@ -5284,7 +5284,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
scsi_set_resid(scp, 0);
if (sdebug_statistics)
@@ -75871,10 +75937,10 @@ index de2c1bf..60b8563 100644
.read = fuse_read,
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
-index 200ca22..170ab80 100644
+index 935f1a5..3c4bf16 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
-@@ -2982,7 +2982,7 @@ int spi_bus_unlock(struct spi_master *master)
+@@ -2984,7 +2984,7 @@ int spi_bus_unlock(struct spi_master *master)
EXPORT_SYMBOL_GPL(spi_bus_unlock);
/* portable code must never pass more than 32 bytes */
@@ -82287,7 +82353,7 @@ index 1db0626..2e9f5ea 100644
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
-index 1700908..3b49b2e 100644
+index 86612ac..d432505 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -444,7 +444,7 @@ static int ohci_init (struct ohci_hcd *ohci)
@@ -82326,7 +82392,7 @@ index a7de8e8..e1ef134 100644
spin_lock_init(&uhci->lock);
setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
-index d7b0f97..378d99d 100644
+index e96ae80..130fe39 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -32,7 +32,7 @@
@@ -100725,7 +100791,7 @@ index 95d4191..f804459 100644
spin_lock_init(&cur_trans->delayed_refs.lock);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
-index ef9c55b..fcd9451 100644
+index 90e1198..65ac2c2 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -174,7 +174,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
@@ -100737,7 +100803,7 @@ index ef9c55b..fcd9451 100644
atomic_inc(&root->log_writers);
if (ctx) {
int index = root->log_transid % 2;
-@@ -2771,7 +2771,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+@@ -2769,7 +2769,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
wait_log_commit(root, log_transid - 1);
while (1) {
@@ -100746,7 +100812,7 @@ index ef9c55b..fcd9451 100644
/* when we're on an ssd, just kick the log commit out */
if (!btrfs_test_opt(root->fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
-@@ -2780,7 +2780,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+@@ -2778,7 +2778,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_lock(&root->log_mutex);
}
wait_for_writer(root);
@@ -100755,7 +100821,7 @@ index ef9c55b..fcd9451 100644
break;
}
-@@ -2826,7 +2826,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+@@ -2824,7 +2824,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_init_log_ctx(&root_log_ctx, NULL);
mutex_lock(&log_root_tree->log_mutex);
@@ -113082,10 +113148,10 @@ index 1fb2227..150c145 100644
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index a204d7e..f97b734 100644
+index 0fe31b4..89b42db 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
-@@ -2341,8 +2341,9 @@ static bool client_has_state(struct nfs4_client *clp)
+@@ -2339,8 +2339,9 @@ static bool client_has_state(struct nfs4_client *clp)
__be32
nfsd4_exchange_id(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113096,7 +113162,7 @@ index a204d7e..f97b734 100644
struct nfs4_client *conf, *new;
struct nfs4_client *unconf = NULL;
__be32 status;
-@@ -2636,8 +2637,9 @@ static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
+@@ -2634,8 +2635,9 @@ static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113107,7 +113173,7 @@ index a204d7e..f97b734 100644
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
-@@ -2761,8 +2763,9 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
+@@ -2759,8 +2761,9 @@ static __be32 nfsd4_map_bcts_dir(u32 *dir)
return nfserr_inval;
}
@@ -113118,7 +113184,7 @@ index a204d7e..f97b734 100644
struct nfsd4_session *session = cstate->session;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
__be32 status;
-@@ -2782,8 +2785,9 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state
+@@ -2780,8 +2783,9 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113129,7 +113195,7 @@ index a204d7e..f97b734 100644
__be32 status;
struct nfsd4_conn *conn;
struct nfsd4_session *session;
-@@ -2825,8 +2829,9 @@ static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4
+@@ -2823,8 +2827,9 @@ static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4
__be32
nfsd4_destroy_session(struct svc_rqst *r,
struct nfsd4_compound_state *cstate,
@@ -113140,7 +113206,7 @@ index a204d7e..f97b734 100644
struct nfsd4_session *ses;
__be32 status;
int ref_held_by_me = 0;
-@@ -2922,8 +2927,9 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
+@@ -2920,8 +2925,9 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
__be32
nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113151,7 +113217,7 @@ index a204d7e..f97b734 100644
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct xdr_stream *xdr = &resp->xdr;
struct nfsd4_session *session;
-@@ -3057,8 +3063,9 @@ nfsd4_sequence_done(struct nfsd4_compoundres *resp)
+@@ -3055,8 +3061,9 @@ nfsd4_sequence_done(struct nfsd4_compoundres *resp)
}
__be32
@@ -113162,7 +113228,7 @@ index a204d7e..f97b734 100644
struct nfs4_client *conf, *unconf;
struct nfs4_client *clp = NULL;
__be32 status = 0;
-@@ -3098,8 +3105,9 @@ out:
+@@ -3096,8 +3103,9 @@ out:
}
__be32
@@ -113173,7 +113239,7 @@ index a204d7e..f97b734 100644
__be32 status = 0;
if (rc->rca_one_fs) {
-@@ -3136,8 +3144,9 @@ out:
+@@ -3134,8 +3142,9 @@ out:
__be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113184,7 +113250,7 @@ index a204d7e..f97b734 100644
struct xdr_netobj clname = setclid->se_name;
nfs4_verifier clverifier = setclid->se_verf;
struct nfs4_client *conf, *new;
-@@ -3195,8 +3204,9 @@ out:
+@@ -3193,8 +3202,9 @@ out:
__be32
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113195,7 +113261,7 @@ index a204d7e..f97b734 100644
struct nfs4_client *conf, *unconf;
struct nfs4_client *old = NULL;
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
-@@ -4440,8 +4450,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
+@@ -4438,8 +4448,9 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
__be32
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113206,7 +113272,7 @@ index a204d7e..f97b734 100644
struct nfs4_client *clp;
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-@@ -4891,8 +4902,9 @@ out:
+@@ -4889,8 +4900,9 @@ out:
*/
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113217,7 +113283,7 @@ index a204d7e..f97b734 100644
struct nfsd4_test_stateid_id *stateid;
struct nfs4_client *cl = cstate->session->se_client;
-@@ -4931,8 +4943,9 @@ out:
+@@ -4929,8 +4941,9 @@ out:
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113228,7 +113294,7 @@ index a204d7e..f97b734 100644
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
-@@ -5060,8 +5073,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+@@ -5058,8 +5071,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
__be32
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113239,7 +113305,7 @@ index a204d7e..f97b734 100644
__be32 status;
struct nfs4_openowner *oo;
struct nfs4_ol_stateid *stp;
-@@ -5129,8 +5143,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
+@@ -5127,8 +5141,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
__be32
nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113250,7 +113316,7 @@ index a204d7e..f97b734 100644
__be32 status;
struct nfs4_ol_stateid *stp;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-@@ -5198,8 +5213,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+@@ -5196,8 +5211,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
*/
__be32
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113261,7 +113327,7 @@ index a204d7e..f97b734 100644
__be32 status;
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
-@@ -5228,8 +5244,9 @@ out:
+@@ -5226,8 +5242,9 @@ out:
__be32
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113272,7 +113338,7 @@ index a204d7e..f97b734 100644
struct nfs4_delegation *dp;
stateid_t *stateid = &dr->dr_stateid;
struct nfs4_stid *s;
-@@ -5580,8 +5597,9 @@ out:
+@@ -5578,8 +5595,9 @@ out:
*/
__be32
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113283,7 +113349,7 @@ index a204d7e..f97b734 100644
struct nfs4_openowner *open_sop = NULL;
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp = NULL;
-@@ -5782,8 +5800,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
+@@ -5780,8 +5798,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
*/
__be32
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113294,7 +113360,7 @@ index a204d7e..f97b734 100644
struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo = NULL;
__be32 status;
-@@ -5855,8 +5874,9 @@ out:
+@@ -5853,8 +5872,9 @@ out:
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -113305,7 +113371,7 @@ index a204d7e..f97b734 100644
struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
struct file_lock *file_lock = NULL;
-@@ -5962,8 +5982,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+@@ -5960,8 +5980,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
__be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -113316,7 +113382,7 @@ index a204d7e..f97b734 100644
clientid_t *clid = &rlockowner->rl_clientid;
struct nfs4_stateowner *sop;
struct nfs4_lockowner *lo = NULL;
-@@ -6922,26 +6943,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
+@@ -6920,26 +6941,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
* functions to set current state id
*/
void
@@ -113355,7 +113421,7 @@ index a204d7e..f97b734 100644
put_stateid(cstate, &lock->lk_resp_stateid);
}
-@@ -6950,49 +6979,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
+@@ -6948,49 +6977,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
*/
void
@@ -116531,10 +116597,10 @@ index b9da9a0..e146758 100644
if (!orangefs_inode_cache) {
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
-index abadbc30..a67f44c 100644
+index 767377e..2e4b2e8 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
-@@ -197,7 +197,7 @@ static char *ovl_read_symlink(struct dentry *realdentry)
+@@ -199,7 +199,7 @@ static char *ovl_read_symlink(struct dentry *realdentry)
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = inode->i_op->readlink(realdentry,
@@ -116544,7 +116610,7 @@ index abadbc30..a67f44c 100644
if (res < 0) {
free_page((unsigned long) buf);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index e2a94a2..f2ac233 100644
+index a78415d..557e8db 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -148,8 +148,8 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -116576,7 +116642,7 @@ index e2a94a2..f2ac233 100644
if (!S_ISDIR(upperinode->i_mode))
__insert_inode_hash(inode, (unsigned long) upperinode);
}
-@@ -1107,8 +1107,8 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
+@@ -1122,8 +1122,8 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
static int ovl_fill_super(struct super_block *sb, void *data, int silent)
{
@@ -120053,7 +120119,7 @@ index 96a70fd..de3d84c 100644
tip = XFS_I(file_inode(tmp.file));
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
-index b8d64d5..3e87626 100644
+index b8d64d5..3e876267 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -218,7 +218,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
@@ -149240,10 +149306,10 @@ index 37dec7e..6a6ac85 100644
write_seqcount_begin(&tk_core.seq);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index 32bf6f7..a0ba7cb 100644
+index 96db64b..9133bad 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base)
+@@ -1647,7 +1647,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
@@ -152292,7 +152358,7 @@ index 93fb63e..0aa6448 100644
if (end == start)
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 4be518d..450a2ae 100644
+index dddead1..8832645 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -702,7 +702,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
@@ -155267,7 +155333,7 @@ index 971fc83..6afaf44 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index b672710..9ebcec1 100644
+index 525a911..227387b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -116,6 +116,7 @@
diff --git a/4.8.6/4425_grsec_remove_EI_PAX.patch b/4.8.7/4425_grsec_remove_EI_PAX.patch
index 594598a..594598a 100644
--- a/4.8.6/4425_grsec_remove_EI_PAX.patch
+++ b/4.8.7/4425_grsec_remove_EI_PAX.patch
diff --git a/4.8.6/4427_force_XATTR_PAX_tmpfs.patch b/4.8.7/4427_force_XATTR_PAX_tmpfs.patch
index 2562d2f..2562d2f 100644
--- a/4.8.6/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.8.7/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.8.6/4430_grsec-remove-localversion-grsec.patch b/4.8.7/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.8.6/4430_grsec-remove-localversion-grsec.patch
+++ b/4.8.7/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.8.6/4435_grsec-mute-warnings.patch b/4.8.7/4435_grsec-mute-warnings.patch
index 8929222..8929222 100644
--- a/4.8.6/4435_grsec-mute-warnings.patch
+++ b/4.8.7/4435_grsec-mute-warnings.patch
diff --git a/4.8.6/4440_grsec-remove-protected-paths.patch b/4.8.7/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.8.6/4440_grsec-remove-protected-paths.patch
+++ b/4.8.7/4440_grsec-remove-protected-paths.patch
diff --git a/4.8.6/4450_grsec-kconfig-default-gids.patch b/4.8.7/4450_grsec-kconfig-default-gids.patch
index 6fd0511..6fd0511 100644
--- a/4.8.6/4450_grsec-kconfig-default-gids.patch
+++ b/4.8.7/4450_grsec-kconfig-default-gids.patch
diff --git a/4.8.6/4465_selinux-avc_audit-log-curr_ip.patch b/4.8.7/4465_selinux-avc_audit-log-curr_ip.patch
index 7248385..7248385 100644
--- a/4.8.6/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.8.7/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.8.6/4470_disable-compat_vdso.patch b/4.8.7/4470_disable-compat_vdso.patch
index 1e4b84a..1e4b84a 100644
--- a/4.8.6/4470_disable-compat_vdso.patch
+++ b/4.8.7/4470_disable-compat_vdso.patch
diff --git a/4.8.6/4475_emutramp_default_on.patch b/4.8.7/4475_emutramp_default_on.patch
index 7b468ee..7b468ee 100644
--- a/4.8.6/4475_emutramp_default_on.patch
+++ b/4.8.7/4475_emutramp_default_on.patch