summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-05-09 21:22:16 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-05-09 21:22:16 -0400
commit7e9e5b964c30f9230238ab7bd71a23552d82d7b5 (patch)
tree034d45268edb3b1e9fb53492fa14430b888525cb
parentRestore 3.19.6 to backport patch. Bug #548508. (diff)
downloadhardened-patchset-20150509.tar.gz
hardened-patchset-20150509.tar.bz2
hardened-patchset-20150509.zip
Grsec/PaX: 3.1-{3.2.68,3.14.41,4.0.2}-20150509172420150509
-rw-r--r--.gitignore1
-rw-r--r--3.14.40/1039_linux-3.14.40.patch2462
-rw-r--r--3.14.41/0000_README (renamed from 3.14.40/0000_README)6
-rw-r--r--3.14.41/1040_linux-3.14.41.patch3586
-rw-r--r--3.14.41/4420_grsecurity-3.1-3.14.41-201505091723.patch (renamed from 3.14.40/4420_grsecurity-3.1-3.14.40-201505042052.patch)532
-rw-r--r--3.14.41/4425_grsec_remove_EI_PAX.patch (renamed from 4.0.1/4425_grsec_remove_EI_PAX.patch)2
-rw-r--r--3.14.41/4427_force_XATTR_PAX_tmpfs.patch (renamed from 3.14.40/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--3.14.41/4430_grsec-remove-localversion-grsec.patch (renamed from 3.14.40/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--3.14.41/4435_grsec-mute-warnings.patch (renamed from 3.14.40/4435_grsec-mute-warnings.patch)0
-rw-r--r--3.14.41/4440_grsec-remove-protected-paths.patch (renamed from 3.14.40/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--3.14.41/4450_grsec-kconfig-default-gids.patch (renamed from 3.14.40/4450_grsec-kconfig-default-gids.patch)8
-rw-r--r--3.14.41/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.14.40/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--3.14.41/4470_disable-compat_vdso.patch (renamed from 3.14.40/4470_disable-compat_vdso.patch)0
-rw-r--r--3.14.41/4475_emutramp_default_on.patch (renamed from 4.0.1/4475_emutramp_default_on.patch)4
-rw-r--r--3.2.68/0000_README2
-rw-r--r--3.2.68/4420_grsecurity-3.1-3.2.68-201505091720.patch (renamed from 3.2.68/4420_grsecurity-3.1-3.2.68-201505042051.patch)172
-rw-r--r--3.2.68/4425_grsec_remove_EI_PAX.patch2
-rw-r--r--3.2.68/4450_grsec-kconfig-default-gids.patch8
-rw-r--r--3.2.68/4475_emutramp_default_on.patch4
-rw-r--r--4.0.2/0000_README (renamed from 4.0.1/0000_README)6
-rw-r--r--4.0.2/1001_linux-4.0.2.patch8587
-rw-r--r--4.0.2/4420_grsecurity-3.1-4.0.2-201505091724.patch (renamed from 4.0.1/4420_grsecurity-3.1-4.0.1-201505042053.patch)1226
-rw-r--r--4.0.2/4425_grsec_remove_EI_PAX.patch (renamed from 3.14.40/4425_grsec_remove_EI_PAX.patch)2
-rw-r--r--4.0.2/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.0.1/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.0.2/4430_grsec-remove-localversion-grsec.patch (renamed from 4.0.1/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.0.2/4435_grsec-mute-warnings.patch (renamed from 4.0.1/4435_grsec-mute-warnings.patch)10
-rw-r--r--4.0.2/4440_grsec-remove-protected-paths.patch (renamed from 4.0.1/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.0.2/4450_grsec-kconfig-default-gids.patch (renamed from 4.0.1/4450_grsec-kconfig-default-gids.patch)8
-rw-r--r--4.0.2/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.0.1/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.0.2/4470_disable-compat_vdso.patch (renamed from 4.0.1/4470_disable-compat_vdso.patch)0
-rw-r--r--4.0.2/4475_emutramp_default_on.patch (renamed from 3.14.40/4475_emutramp_default_on.patch)4
31 files changed, 13645 insertions, 2987 deletions
diff --git a/.gitignore b/.gitignore
index 284ec2b..2b7bb0a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
grsecurity*
hardened*
+genpatches*
rsbac*
xtpax*
diff --git a/3.14.40/1039_linux-3.14.40.patch b/3.14.40/1039_linux-3.14.40.patch
deleted file mode 100644
index 6afa4bd..0000000
--- a/3.14.40/1039_linux-3.14.40.patch
+++ /dev/null
@@ -1,2462 +0,0 @@
-diff --git a/Makefile b/Makefile
-index b40845e..070e0eb 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 3
- PATCHLEVEL = 14
--SUBLEVEL = 39
-+SUBLEVEL = 40
- EXTRAVERSION =
- NAME = Remembering Coco
-
-diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
-index 98838a0..9d0ac09 100644
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -156,6 +156,8 @@ retry:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
-index 9c69552..01e18b5 100644
---- a/arch/arc/mm/fault.c
-+++ b/arch/arc/mm/fault.c
-@@ -162,6 +162,8 @@ good_area:
- /* TBD: switch to pagefault_out_of_memory() */
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
-
-diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
-index 626989f..9fd61c7 100644
---- a/arch/arm/include/asm/pgtable-3level-hwdef.h
-+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
-@@ -43,7 +43,7 @@
- #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
- #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
- #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
--#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
-+#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
- #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
- #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
- #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
-@@ -72,6 +72,7 @@
- #define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
- #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
- #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
-+#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
- #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
- #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
- #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
-diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
-index 85c60ad..06e0bc0 100644
---- a/arch/arm/include/asm/pgtable-3level.h
-+++ b/arch/arm/include/asm/pgtable-3level.h
-@@ -79,18 +79,19 @@
- #define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
- #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
- #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
--#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
- #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
- #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
- #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
--#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
--#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
-+#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
-+#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
- #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
-+#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
-
--#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
--#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
--#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
--#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
-+#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-+#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
-+#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
-+#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
-+#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
-
- /*
- * To be used in assembly code with the upper page attributes.
-@@ -207,27 +208,32 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
- #define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
- #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
-
--#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
-+#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
-+ : !!(pmd_val(pmd) & (val)))
-+#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
-+
-+#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
-
- #define __HAVE_ARCH_PMD_WRITE
--#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
-+#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
-+#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-
- #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
- #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
-
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
--#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
--#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
-+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
-+#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
- #endif
-
- #define PMD_BIT_FUNC(fn,op) \
- static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
-
--PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
-+PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
- PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
--PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
--PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
--PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
-+PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
-+PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
-+PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
- PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
-
- #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
-@@ -241,8 +247,8 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
-
- static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
- {
-- const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
-- PMD_SECT_VALID | PMD_SECT_NONE;
-+ const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
-+ L_PMD_SECT_VALID | L_PMD_SECT_NONE;
- pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
- return pmd;
- }
-@@ -253,8 +259,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
- BUG_ON(addr >= TASK_SIZE);
-
- /* create a faulting entry if PROT_NONE protected */
-- if (pmd_val(pmd) & PMD_SECT_NONE)
-- pmd_val(pmd) &= ~PMD_SECT_VALID;
-+ if (pmd_val(pmd) & L_PMD_SECT_NONE)
-+ pmd_val(pmd) &= ~L_PMD_SECT_VALID;
-+
-+ if (pmd_write(pmd) && pmd_dirty(pmd))
-+ pmd_val(pmd) &= ~PMD_SECT_AP2;
-+ else
-+ pmd_val(pmd) |= PMD_SECT_AP2;
-
- *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
- flush_pmd_entry(pmdp);
-diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
-index 7d59b52..89dba13 100644
---- a/arch/arm/include/asm/pgtable.h
-+++ b/arch/arm/include/asm/pgtable.h
-@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
-
- #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
-
-+#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
-+ : !!(pte_val(pte) & (val)))
-+#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
-+
- #define pte_none(pte) (!pte_val(pte))
--#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
--#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
--#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
--#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
--#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-+#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
-+#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
-+#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
-+#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
-+#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
- #define pte_special(pte) (0)
-
- #define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
-diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
-index 22e3ad6..eb81123 100644
---- a/arch/arm/mm/proc-v7-3level.S
-+++ b/arch/arm/mm/proc-v7-3level.S
-@@ -86,8 +86,13 @@ ENTRY(cpu_v7_set_pte_ext)
- tst rh, #1 << (57 - 32) @ L_PTE_NONE
- bicne rl, #L_PTE_VALID
- bne 1f
-- tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
-- orreq rl, #L_PTE_RDONLY
-+
-+ eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
-+ @ test for !L_PTE_DIRTY || L_PTE_RDONLY
-+ tst ip, #1 << (55 - 32) | 1 << (58 - 32)
-+ orrne rl, #PTE_AP2
-+ biceq rl, #PTE_AP2
-+
- 1: strd r2, r3, [r0]
- ALT_SMP(W(nop))
- ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
-diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index 0eca933..d223a8b 100644
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -142,6 +142,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
-index 1790f22..2686a7a 100644
---- a/arch/cris/mm/fault.c
-+++ b/arch/cris/mm/fault.c
-@@ -176,6 +176,8 @@ retry:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
-index 9a66372..ec4917d 100644
---- a/arch/frv/mm/fault.c
-+++ b/arch/frv/mm/fault.c
-@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
-index 7225dad..ba5ba7a 100644
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -172,6 +172,8 @@ retry:
- */
- if (fault & VM_FAULT_OOM) {
- goto out_of_memory;
-+ } else if (fault & VM_FAULT_SIGSEGV) {
-+ goto bad_area;
- } else if (fault & VM_FAULT_SIGBUS) {
- signal = SIGBUS;
- goto bad_area;
-diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
-index e9c6a80..e3d4d48901 100644
---- a/arch/m32r/mm/fault.c
-+++ b/arch/m32r/mm/fault.c
-@@ -200,6 +200,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
-index 2bd7487..b2f04ae 100644
---- a/arch/m68k/mm/fault.c
-+++ b/arch/m68k/mm/fault.c
-@@ -145,6 +145,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto map_err;
- else if (fault & VM_FAULT_SIGBUS)
- goto bus_err;
- BUG();
-diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
-index 332680e..2de5dc6 100644
---- a/arch/metag/mm/fault.c
-+++ b/arch/metag/mm/fault.c
-@@ -141,6 +141,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
-index fa4cf52..d46a5eb 100644
---- a/arch/microblaze/mm/fault.c
-+++ b/arch/microblaze/mm/fault.c
-@@ -224,6 +224,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index becc42b..70ab5d6 100644
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -158,6 +158,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
-index 3516cbd..0c2cc5d 100644
---- a/arch/mn10300/mm/fault.c
-+++ b/arch/mn10300/mm/fault.c
-@@ -262,6 +262,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
-index 0703acf..230ac20 100644
---- a/arch/openrisc/mm/fault.c
-+++ b/arch/openrisc/mm/fault.c
-@@ -171,6 +171,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index d72197f..d27e388 100644
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -256,6 +256,8 @@ good_area:
- */
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto bad_area;
- BUG();
-diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index 51ab9e7..010fabf 100644
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -432,6 +432,8 @@ good_area:
- */
- fault = handle_mm_fault(mm, vma, address, flags);
- if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
-+ if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- rc = mm_fault_error(regs, address, fault);
- if (rc >= MM_FAULT_RETURN)
- goto bail;
-diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
-index 641e727..62f3e4e 100644
---- a/arch/powerpc/platforms/cell/spu_fault.c
-+++ b/arch/powerpc/platforms/cell/spu_fault.c
-@@ -75,7 +75,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
- if (*flt & VM_FAULT_OOM) {
- ret = -ENOMEM;
- goto out_unlock;
-- } else if (*flt & VM_FAULT_SIGBUS) {
-+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- ret = -EFAULT;
- goto out_unlock;
- }
-diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
-index 87ba7cf..65d633f 100644
---- a/arch/powerpc/platforms/cell/spufs/inode.c
-+++ b/arch/powerpc/platforms/cell/spufs/inode.c
-@@ -164,7 +164,7 @@ static void spufs_prune_dir(struct dentry *dir)
- struct dentry *dentry, *tmp;
-
- mutex_lock(&dir->d_inode->i_mutex);
-- list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
-+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
- spin_lock(&dentry->d_lock);
- if (!(d_unhashed(dentry)) && dentry->d_inode) {
- dget_dlock(dentry);
-diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
-index d95265b2..8e95432 100644
---- a/arch/s390/mm/fault.c
-+++ b/arch/s390/mm/fault.c
-@@ -239,6 +239,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
- do_no_context(regs);
- else
- pagefault_out_of_memory();
-+ } else if (fault & VM_FAULT_SIGSEGV) {
-+ /* Kernel mode? Handle exceptions or die */
-+ if (!user_mode(regs))
-+ do_no_context(regs);
-+ else
-+ do_sigsegv(regs, SEGV_MAPERR);
- } else if (fault & VM_FAULT_SIGBUS) {
- /* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs))
-diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
-index 52238983..6860beb 100644
---- a/arch/score/mm/fault.c
-+++ b/arch/score/mm/fault.c
-@@ -114,6 +114,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
-index 541dc61..a58fec9 100644
---- a/arch/sh/mm/fault.c
-+++ b/arch/sh/mm/fault.c
-@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- } else {
- if (fault & VM_FAULT_SIGBUS)
- do_sigbus(regs, error_code, address);
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ bad_area(regs, error_code, address);
- else
- BUG();
- }
-diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index 59dbd46..163c787 100644
---- a/arch/sparc/mm/fault_32.c
-+++ b/arch/sparc/mm/fault_32.c
-@@ -252,6 +252,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 45a413e..0d6de79 100644
---- a/arch/sparc/mm/fault_64.c
-+++ b/arch/sparc/mm/fault_64.c
-@@ -448,6 +448,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
-index 6c05712..c6d2a76 100644
---- a/arch/tile/mm/fault.c
-+++ b/arch/tile/mm/fault.c
-@@ -444,6 +444,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
-index 974b874..53b8320 100644
---- a/arch/um/kernel/trap.c
-+++ b/arch/um/kernel/trap.c
-@@ -80,6 +80,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM) {
- goto out_of_memory;
-+ } else if (fault & VM_FAULT_SIGSEGV) {
-+ goto out;
- } else if (fault & VM_FAULT_SIGBUS) {
- err = -EACCES;
- goto out;
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 09651d4..cf1eeea 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2258,7 +2258,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
- * Not recognized on AMD in compat mode (but is recognized in legacy
- * mode).
- */
-- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
-+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
- && !vendor_intel(ctxt))
- return emulate_ud(ctxt);
-
-@@ -2271,25 +2271,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
- setup_syscalls_segments(ctxt, &cs, &ss);
-
- ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
-- switch (ctxt->mode) {
-- case X86EMUL_MODE_PROT32:
-- if ((msr_data & 0xfffc) == 0x0)
-- return emulate_gp(ctxt, 0);
-- break;
-- case X86EMUL_MODE_PROT64:
-- if (msr_data == 0x0)
-- return emulate_gp(ctxt, 0);
-- break;
-- default:
-- break;
-- }
-+ if ((msr_data & 0xfffc) == 0x0)
-+ return emulate_gp(ctxt, 0);
-
- ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
-- cs_sel = (u16)msr_data;
-- cs_sel &= ~SELECTOR_RPL_MASK;
-+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
- ss_sel = cs_sel + 8;
-- ss_sel &= ~SELECTOR_RPL_MASK;
-- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
-+ if (efer & EFER_LMA) {
- cs.d = 0;
- cs.l = 1;
- }
-@@ -2298,10 +2286,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
- ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
-
- ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
-- ctxt->_eip = msr_data;
-+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
-
- ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
-- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
-+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
-+ (u32)msr_data;
-
- return X86EMUL_CONTINUE;
- }
-diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index a10c8c7..ebc551c 100644
---- a/arch/x86/mm/fault.c
-+++ b/arch/x86/mm/fault.c
-@@ -833,11 +833,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- unsigned int fault)
- {
- struct task_struct *tsk = current;
-- struct mm_struct *mm = tsk->mm;
- int code = BUS_ADRERR;
-
-- up_read(&mm->mmap_sem);
--
- /* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
- no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
-@@ -868,7 +865,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, unsigned int fault)
- {
- if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
-- up_read(&current->mm->mmap_sem);
- no_context(regs, error_code, address, 0, 0);
- return;
- }
-@@ -876,14 +872,11 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- if (fault & VM_FAULT_OOM) {
- /* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
-- up_read(&current->mm->mmap_sem);
- no_context(regs, error_code, address,
- SIGSEGV, SEGV_MAPERR);
- return;
- }
-
-- up_read(&current->mm->mmap_sem);
--
- /*
- * We ran out of memory, call the OOM killer, and return the
- * userspace (which will retry the fault, or kill us if we got
-@@ -894,6 +887,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
- VM_FAULT_HWPOISON_LARGE))
- do_sigbus(regs, error_code, address, fault);
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ bad_area_nosemaphore(regs, error_code, address);
- else
- BUG();
- }
-@@ -1216,6 +1211,7 @@ good_area:
- return;
-
- if (unlikely(fault & VM_FAULT_ERROR)) {
-+ up_read(&mm->mmap_sem);
- mm_fault_error(regs, error_code, address, fault);
- return;
- }
-diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
-index b57c4f9..9e3571a 100644
---- a/arch/xtensa/mm/fault.c
-+++ b/arch/xtensa/mm/fault.c
-@@ -117,6 +117,8 @@ good_area:
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
-+ else if (fault & VM_FAULT_SIGSEGV)
-+ goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
-diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
-index f667e37..5afe556 100644
---- a/drivers/bluetooth/ath3k.c
-+++ b/drivers/bluetooth/ath3k.c
-@@ -62,51 +62,59 @@ static const struct usb_device_id ath3k_table[] = {
- { USB_DEVICE(0x0CF3, 0x3000) },
-
- /* Atheros AR3011 with sflash firmware*/
-+ { USB_DEVICE(0x0489, 0xE027) },
-+ { USB_DEVICE(0x0489, 0xE03D) },
-+ { USB_DEVICE(0x0930, 0x0215) },
- { USB_DEVICE(0x0CF3, 0x3002) },
- { USB_DEVICE(0x0CF3, 0xE019) },
- { USB_DEVICE(0x13d3, 0x3304) },
-- { USB_DEVICE(0x0930, 0x0215) },
-- { USB_DEVICE(0x0489, 0xE03D) },
-- { USB_DEVICE(0x0489, 0xE027) },
-
- /* Atheros AR9285 Malbec with sflash firmware */
- { USB_DEVICE(0x03F0, 0x311D) },
-
- /* Atheros AR3012 with sflash firmware*/
-- { USB_DEVICE(0x0CF3, 0x0036) },
-- { USB_DEVICE(0x0CF3, 0x3004) },
-- { USB_DEVICE(0x0CF3, 0x3008) },
-- { USB_DEVICE(0x0CF3, 0x311D) },
-- { USB_DEVICE(0x0CF3, 0x817a) },
-- { USB_DEVICE(0x13d3, 0x3375) },
-+ { USB_DEVICE(0x0489, 0xe04d) },
-+ { USB_DEVICE(0x0489, 0xe04e) },
-+ { USB_DEVICE(0x0489, 0xe057) },
-+ { USB_DEVICE(0x0489, 0xe056) },
-+ { USB_DEVICE(0x0489, 0xe05f) },
-+ { USB_DEVICE(0x0489, 0xe078) },
-+ { USB_DEVICE(0x04c5, 0x1330) },
- { USB_DEVICE(0x04CA, 0x3004) },
- { USB_DEVICE(0x04CA, 0x3005) },
- { USB_DEVICE(0x04CA, 0x3006) },
- { USB_DEVICE(0x04CA, 0x3007) },
- { USB_DEVICE(0x04CA, 0x3008) },
- { USB_DEVICE(0x04CA, 0x300b) },
-- { USB_DEVICE(0x13d3, 0x3362) },
-- { USB_DEVICE(0x0CF3, 0xE004) },
-- { USB_DEVICE(0x0CF3, 0xE005) },
-+ { USB_DEVICE(0x04CA, 0x3010) },
- { USB_DEVICE(0x0930, 0x0219) },
- { USB_DEVICE(0x0930, 0x0220) },
-- { USB_DEVICE(0x0489, 0xe057) },
-- { USB_DEVICE(0x13d3, 0x3393) },
-- { USB_DEVICE(0x0489, 0xe04e) },
-- { USB_DEVICE(0x0489, 0xe056) },
-- { USB_DEVICE(0x0489, 0xe04d) },
-- { USB_DEVICE(0x04c5, 0x1330) },
-- { USB_DEVICE(0x13d3, 0x3402) },
-+ { USB_DEVICE(0x0930, 0x0227) },
-+ { USB_DEVICE(0x0b05, 0x17d0) },
-+ { USB_DEVICE(0x0CF3, 0x0036) },
-+ { USB_DEVICE(0x0CF3, 0x3004) },
-+ { USB_DEVICE(0x0CF3, 0x3008) },
-+ { USB_DEVICE(0x0CF3, 0x311D) },
-+ { USB_DEVICE(0x0CF3, 0x311E) },
-+ { USB_DEVICE(0x0CF3, 0x311F) },
- { USB_DEVICE(0x0cf3, 0x3121) },
-+ { USB_DEVICE(0x0CF3, 0x817a) },
- { USB_DEVICE(0x0cf3, 0xe003) },
-- { USB_DEVICE(0x0489, 0xe05f) },
-+ { USB_DEVICE(0x0CF3, 0xE004) },
-+ { USB_DEVICE(0x0CF3, 0xE005) },
-+ { USB_DEVICE(0x13d3, 0x3362) },
-+ { USB_DEVICE(0x13d3, 0x3375) },
-+ { USB_DEVICE(0x13d3, 0x3393) },
-+ { USB_DEVICE(0x13d3, 0x3402) },
-+ { USB_DEVICE(0x13d3, 0x3408) },
-+ { USB_DEVICE(0x13d3, 0x3432) },
-
- /* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xE02C) },
-
- /* Atheros AR5BBU22 with sflash firmware */
-- { USB_DEVICE(0x0489, 0xE03C) },
- { USB_DEVICE(0x0489, 0xE036) },
-+ { USB_DEVICE(0x0489, 0xE03C) },
-
- { } /* Terminating entry */
- };
-@@ -119,37 +127,45 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
- static const struct usb_device_id ath3k_blist_tbl[] = {
-
- /* Atheros AR3012 with sflash firmware*/
-- { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
-
- /* Atheros AR5BBU22 with sflash firmware */
-- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
-
- { } /* Terminating entry */
- };
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index e00c3f8..03b3317 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver;
- #define BTUSB_WRONG_SCO_MTU 0x40
- #define BTUSB_ATH3012 0x80
- #define BTUSB_INTEL 0x100
-+#define BTUSB_INTEL_BOOT 0x200
-
- static const struct usb_device_id btusb_table[] = {
- /* Generic Bluetooth USB device */
-@@ -101,21 +102,31 @@ static const struct usb_device_id btusb_table[] = {
- { USB_DEVICE(0x0c10, 0x0000) },
-
- /* Broadcom BCM20702A0 */
-+ { USB_DEVICE(0x0489, 0xe042) },
-+ { USB_DEVICE(0x04ca, 0x2003) },
- { USB_DEVICE(0x0b05, 0x17b5) },
- { USB_DEVICE(0x0b05, 0x17cb) },
-- { USB_DEVICE(0x04ca, 0x2003) },
-- { USB_DEVICE(0x0489, 0xe042) },
- { USB_DEVICE(0x413c, 0x8197) },
-
- /* Foxconn - Hon Hai */
- { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
-
-- /*Broadcom devices with vendor specific id */
-+ /* Broadcom devices with vendor specific id */
- { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
-
-+ /* ASUSTek Computer - Broadcom based */
-+ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
-+
- /* Belkin F8065bf - Broadcom based */
- { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
-
-+ /* IMC Networks - Broadcom based */
-+ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
-+
-+ /* Intel Bluetooth USB Bootloader (RAM module) */
-+ { USB_DEVICE(0x8087, 0x0a5a),
-+ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
-+
- { } /* Terminating entry */
- };
-
-@@ -129,56 +140,64 @@ static const struct usb_device_id blacklist_table[] = {
- { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
-
- /* Atheros 3011 with sflash firmware */
-+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
-+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
-+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
- { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
-- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
-- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
-- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
-
- /* Atheros AR9285 Malbec with sflash firmware */
- { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
-
- /* Atheros 3012 with sflash firmware */
-- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
-- { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
-
- /* Atheros AR5BBU12 with sflash firmware */
- { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
-
- /* Atheros AR5BBU12 with sflash firmware */
-- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
- { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
-+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
-
- /* Broadcom BCM2035 */
-- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
-- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
- { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
-+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
-+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
-
- /* Broadcom BCM2045 */
- { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
-@@ -1491,6 +1510,9 @@ static int btusb_probe(struct usb_interface *intf,
- if (id->driver_info & BTUSB_INTEL)
- hdev->setup = btusb_setup_intel;
-
-+ if (id->driver_info & BTUSB_INTEL_BOOT)
-+ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
-+
- /* Interface numbers are hardcoded in the specification */
- data->isoc = usb_ifnum_to_if(data->udev, 1);
-
-diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
-index 54e2abe..c611bcc 100644
---- a/drivers/edac/sb_edac.c
-+++ b/drivers/edac/sb_edac.c
-@@ -285,8 +285,9 @@ static const u32 correrrthrsld[] = {
- * sbridge structs
- */
-
--#define NUM_CHANNELS 4
--#define MAX_DIMMS 3 /* Max DIMMS per channel */
-+#define NUM_CHANNELS 4
-+#define MAX_DIMMS 3 /* Max DIMMS per channel */
-+#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
-
- enum type {
- SANDY_BRIDGE,
-@@ -1750,6 +1751,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
-
- /* FIXME: need support for channel mask */
-
-+ if (channel == CHANNEL_UNSPECIFIED)
-+ channel = -1;
-+
- /* Call the helper to output message */
- edac_mc_handle_error(tp_event, mci, core_err_cnt,
- m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
-diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
-index dcde5605..3177498 100644
---- a/drivers/net/bonding/bond_3ad.c
-+++ b/drivers/net/bonding/bond_3ad.c
-@@ -2479,7 +2479,7 @@ out:
- return NETDEV_TX_OK;
- err_free:
- /* no suitable interface, frame not sent */
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- goto out;
- }
-
-diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
-index e8f133e..c67bbc9 100644
---- a/drivers/net/bonding/bond_alb.c
-+++ b/drivers/net/bonding/bond_alb.c
-@@ -1479,7 +1479,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
- }
-
- /* no suitable interface, frame not sent */
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- out:
- return NETDEV_TX_OK;
- }
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index 1537982..32b0e705 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -3568,7 +3568,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
- }
- }
- /* no slave that can tx has been found */
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- }
-
- /**
-@@ -3650,7 +3650,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
- }
-@@ -3698,7 +3698,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
- if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
- }
-@@ -3785,7 +3785,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
- pr_err("%s: Error: Unknown bonding mode %d\n",
- dev->name, bond->params.mode);
- WARN_ON_ONCE(1);
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- }
-@@ -3806,7 +3806,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
- if (bond_has_slaves(bond))
- ret = __bond_start_xmit(skb, dev);
- else
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- rcu_read_unlock();
-
- return ret;
-diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
-index 6c9e1c9..0c8a168 100644
---- a/drivers/net/ethernet/broadcom/bnx2.c
-+++ b/drivers/net/ethernet/broadcom/bnx2.c
-@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
- sw_cons = BNX2_NEXT_TX_BD(sw_cons);
-
- tx_bytes += skb->len;
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- tx_pkt++;
- if (tx_pkt == budget)
- break;
-@@ -6640,7 +6640,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
- if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
-@@ -6733,7 +6733,7 @@ dma_error:
- PCI_DMA_TODEVICE);
- }
-
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
-diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
-index 8206113..bc65dc8 100644
---- a/drivers/net/ethernet/broadcom/tg3.c
-+++ b/drivers/net/ethernet/broadcom/tg3.c
-@@ -6593,7 +6593,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
- pkts_compl++;
- bytes_compl += skb->len;
-
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
-
- if (unlikely(tx_bug)) {
- tg3_tx_recover(tp);
-@@ -6925,7 +6925,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
- if (len > (tp->dev->mtu + ETH_HLEN) &&
- skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)) {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- goto drop_it_no_recycle;
- }
-
-@@ -7808,7 +7808,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
- PCI_DMA_TODEVICE);
- /* Make sure the mapping succeeded */
- if (pci_dma_mapping_error(tp->pdev, new_addr)) {
-- dev_kfree_skb(new_skb);
-+ dev_kfree_skb_any(new_skb);
- ret = -1;
- } else {
- u32 save_entry = *entry;
-@@ -7823,13 +7823,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
- new_skb->len, base_flags,
- mss, vlan)) {
- tg3_tx_skb_unmap(tnapi, save_entry, -1);
-- dev_kfree_skb(new_skb);
-+ dev_kfree_skb_any(new_skb);
- ret = -1;
- }
- }
- }
-
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- *pskb = new_skb;
- return ret;
- }
-@@ -7872,7 +7872,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
- } while (segs);
-
- tg3_tso_bug_end:
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
-
- return NETDEV_TX_OK;
- }
-@@ -8110,7 +8110,7 @@ dma_error:
- tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
- tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
- drop:
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- drop_nofree:
- tp->tx_dropped++;
- return NETDEV_TX_OK;
-diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
-index 80bfa03..075e7e7 100644
---- a/drivers/net/ethernet/emulex/benet/be_main.c
-+++ b/drivers/net/ethernet/emulex/benet/be_main.c
-@@ -1883,7 +1883,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
- queue_tail_inc(txq);
- } while (cur_index != last_index);
-
-- kfree_skb(sent_skb);
-+ dev_kfree_skb_any(sent_skb);
- return num_wrbs;
- }
-
-diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
-index ad5a5aa..70eb4d2 100644
---- a/drivers/net/ethernet/freescale/gianfar.c
-+++ b/drivers/net/ethernet/freescale/gianfar.c
-@@ -2152,13 +2152,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
- dev->stats.tx_errors++;
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
-- consume_skb(skb);
-+ dev_consume_skb_any(skb);
- skb = skb_new;
- }
-
-diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
-index 57e390c..f42c201 100644
---- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
-+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
-@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
- int tso;
-
- if (test_bit(__IXGB_DOWN, &adapter->flags)) {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
-@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-
- tso = ixgb_tso(adapter, skb);
- if (tso < 0) {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index 2f83f34..8be0f3e 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -2497,13 +2497,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- netif_carrier_off(dev);
- mlx4_en_set_default_moderation(priv);
-
-- err = register_netdev(dev);
-- if (err) {
-- en_err(priv, "Netdev registration failed for port %d\n", port);
-- goto out;
-- }
-- priv->registered = 1;
--
- en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
- en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
-
-@@ -2543,6 +2536,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- queue_delayed_work(mdev->workqueue, &priv->service_task,
- SERVICE_TASK_DELAY);
-
-+ err = register_netdev(dev);
-+ if (err) {
-+ en_err(priv, "Netdev registration failed for port %d\n", port);
-+ goto out;
-+ }
-+
-+ priv->registered = 1;
-+
- return 0;
-
- out:
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index 1345703..019a04a 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -325,7 +325,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
- }
- }
- }
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- return tx_info->nr_txbb;
- }
-
-diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
-index 737c1a8..a3c1daa 100644
---- a/drivers/net/ethernet/realtek/8139cp.c
-+++ b/drivers/net/ethernet/realtek/8139cp.c
-@@ -899,7 +899,7 @@ out_unlock:
-
- return NETDEV_TX_OK;
- out_dma_error:
-- kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- cp->dev->stats.tx_dropped++;
- goto out_unlock;
- }
-diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
-index da5972e..8cb2f35 100644
---- a/drivers/net/ethernet/realtek/8139too.c
-+++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
- if (len < ETH_ZLEN)
- memset(tp->tx_buf[entry], 0, ETH_ZLEN);
- skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- } else {
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
-index 3ff7bc3..90c14d1 100644
---- a/drivers/net/ethernet/realtek/r8169.c
-+++ b/drivers/net/ethernet/realtek/r8169.c
-@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
- tp->TxDescArray + entry);
- if (skb) {
- tp->dev->stats.tx_dropped++;
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- tx_skb->skb = NULL;
- }
- }
-@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- err_dma_1:
- rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
- err_dma_0:
-- dev_kfree_skb(skb);
-+ dev_kfree_skb_any(skb);
- err_update_stats:
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
-@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
- tp->tx_stats.packets++;
- tp->tx_stats.bytes += tx_skb->skb->len;
- u64_stats_update_end(&tp->tx_stats.syncp);
-- dev_kfree_skb(tx_skb->skb);
-+ dev_kfree_skb_any(tx_skb->skb);
- tx_skb->skb = NULL;
- }
- dirty_tx++;
-diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
-index cbd663e..19405ed 100644
---- a/drivers/staging/lustre/lustre/llite/dcache.c
-+++ b/drivers/staging/lustre/lustre/llite/dcache.c
-@@ -278,7 +278,7 @@ void ll_invalidate_aliases(struct inode *inode)
- inode->i_ino, inode->i_generation, inode);
-
- ll_lock_dcache(inode);
-- ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
-+ ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
- "inode %p flags %d\n", dentry->d_name.len,
- dentry->d_name.name, dentry, dentry->d_parent,
-diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
-index 6cfdb9e..5ae562e 100644
---- a/drivers/staging/lustre/lustre/llite/llite_lib.c
-+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
-@@ -678,7 +678,7 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
- return;
-
- list_for_each(tmp, &dentry->d_subdirs) {
-- struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
-+ struct dentry *d = list_entry(tmp, struct dentry, d_child);
- lustre_dump_dentry(d, recur - 1);
- }
- }
-diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
-index fc8d264..8e9a9e9 100644
---- a/drivers/staging/lustre/lustre/llite/namei.c
-+++ b/drivers/staging/lustre/lustre/llite/namei.c
-@@ -175,14 +175,14 @@ static void ll_invalidate_negative_children(struct inode *dir)
- struct ll_d_hlist_node *p;
-
- ll_lock_dcache(dir);
-- ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) {
-+ ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!list_empty(&dentry->d_subdirs)) {
- struct dentry *child;
-
- list_for_each_entry_safe(child, tmp_subdir,
- &dentry->d_subdirs,
-- d_u.d_child) {
-+ d_child) {
- if (child->d_inode == NULL)
- d_lustre_invalidate(child, 1);
- }
-@@ -364,7 +364,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
- discon_alias = invalid_alias = NULL;
-
- ll_lock_dcache(inode);
-- ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
-+ ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
- LASSERT(alias != dentry);
-
- spin_lock(&alias->d_lock);
-@@ -953,7 +953,7 @@ static void ll_get_child_fid(struct inode * dir, struct qstr *name,
- {
- struct dentry *parent, *child;
-
-- parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
-+ parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_u.d_alias);
- child = d_lookup(parent, name);
- if (child) {
- if (child->d_inode)
-diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
-index 93cbfbb..6096771 100644
---- a/drivers/staging/lustre/lustre/llite/vvp_io.c
-+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
-@@ -642,7 +642,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
- return 0;
- }
-
-- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
-+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
- CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
- return -EFAULT;
- }
-diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
-index d9a4367..9cca0ea 100644
---- a/fs/affs/amigaffs.c
-+++ b/fs/affs/amigaffs.c
-@@ -126,7 +126,7 @@ affs_fix_dcache(struct inode *inode, u32 entry_ino)
- {
- struct dentry *dentry;
- spin_lock(&inode->i_lock);
-- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- if (entry_ino == (u32)(long)dentry->d_fsdata) {
- dentry->d_fsdata = (void *)inode->i_ino;
- break;
-diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
-index 394e90b..edb46e6 100644
---- a/fs/autofs4/expire.c
-+++ b/fs/autofs4/expire.c
-@@ -91,7 +91,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
- spin_lock(&root->d_lock);
-
- if (prev)
-- next = prev->d_u.d_child.next;
-+ next = prev->d_child.next;
- else {
- prev = dget_dlock(root);
- next = prev->d_subdirs.next;
-@@ -105,13 +105,13 @@ cont:
- return NULL;
- }
-
-- q = list_entry(next, struct dentry, d_u.d_child);
-+ q = list_entry(next, struct dentry, d_child);
-
- spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
- /* Already gone or negative dentry (under construction) - try next */
- if (!d_count(q) || !simple_positive(q)) {
- spin_unlock(&q->d_lock);
-- next = q->d_u.d_child.next;
-+ next = q->d_child.next;
- goto cont;
- }
- dget_dlock(q);
-@@ -161,13 +161,13 @@ again:
- goto relock;
- }
- spin_unlock(&p->d_lock);
-- next = p->d_u.d_child.next;
-+ next = p->d_child.next;
- p = parent;
- if (next != &parent->d_subdirs)
- break;
- }
- }
-- ret = list_entry(next, struct dentry, d_u.d_child);
-+ ret = list_entry(next, struct dentry, d_child);
-
- spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
- /* Negative dentry - try next */
-@@ -461,7 +461,7 @@ found:
- spin_lock(&sbi->lookup_lock);
- spin_lock(&expired->d_parent->d_lock);
- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
-- list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
-+ list_move(&expired->d_parent->d_subdirs, &expired->d_child);
- spin_unlock(&expired->d_lock);
- spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&sbi->lookup_lock);
-diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
-index cc87c1a..9e016e6 100644
---- a/fs/autofs4/root.c
-+++ b/fs/autofs4/root.c
-@@ -655,7 +655,7 @@ static void autofs_clear_leaf_automount_flags(struct dentry *dentry)
- /* only consider parents below dentrys in the root */
- if (IS_ROOT(parent->d_parent))
- return;
-- d_child = &dentry->d_u.d_child;
-+ d_child = &dentry->d_child;
- /* Set parent managed if it's becoming empty */
- if (d_child->next == &parent->d_subdirs &&
- d_child->prev == &parent->d_subdirs)
-diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
-index 5e0982a..18e14cf 100644
---- a/fs/ceph/dir.c
-+++ b/fs/ceph/dir.c
-@@ -111,7 +111,7 @@ static int fpos_cmp(loff_t l, loff_t r)
- /*
- * When possible, we try to satisfy a readdir by peeking at the
- * dcache. We make this work by carefully ordering dentries on
-- * d_u.d_child when we initially get results back from the MDS, and
-+ * d_child when we initially get results back from the MDS, and
- * falling back to a "normal" sync readdir if any dentries in the dir
- * are dropped.
- *
-@@ -146,11 +146,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
- p = parent->d_subdirs.prev;
- dout(" initial p %p/%p\n", p->prev, p->next);
- } else {
-- p = last->d_u.d_child.prev;
-+ p = last->d_child.prev;
- }
-
- more:
-- dentry = list_entry(p, struct dentry, d_u.d_child);
-+ dentry = list_entry(p, struct dentry, d_child);
- di = ceph_dentry(dentry);
- while (1) {
- dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
-@@ -172,7 +172,7 @@ more:
- !dentry->d_inode ? " null" : "");
- spin_unlock(&dentry->d_lock);
- p = p->prev;
-- dentry = list_entry(p, struct dentry, d_u.d_child);
-+ dentry = list_entry(p, struct dentry, d_child);
- di = ceph_dentry(dentry);
- }
-
-diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
-index 6471f9c..ee24490 100644
---- a/fs/ceph/inode.c
-+++ b/fs/ceph/inode.c
-@@ -1289,7 +1289,7 @@ retry_lookup:
- /* reorder parent's d_subdirs */
- spin_lock(&parent->d_lock);
- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
-- list_move(&dn->d_u.d_child, &parent->d_subdirs);
-+ list_move(&dn->d_child, &parent->d_subdirs);
- spin_unlock(&dn->d_lock);
- spin_unlock(&parent->d_lock);
- }
-diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
-index f2ddcf7..7ee427e 100644
---- a/fs/cifs/inode.c
-+++ b/fs/cifs/inode.c
-@@ -883,7 +883,7 @@ inode_has_hashed_dentries(struct inode *inode)
- struct dentry *dentry;
-
- spin_lock(&inode->i_lock);
-- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
- spin_unlock(&inode->i_lock);
- return true;
-diff --git a/fs/coda/cache.c b/fs/coda/cache.c
-index 1da168c..9bc1147 100644
---- a/fs/coda/cache.c
-+++ b/fs/coda/cache.c
-@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
- struct dentry *de;
-
- spin_lock(&parent->d_lock);
-- list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(de, &parent->d_subdirs, d_child) {
- /* don't know what to do with negative dentries */
- if (de->d_inode )
- coda_flag_inode(de->d_inode, flag);
-diff --git a/fs/dcache.c b/fs/dcache.c
-index 4366127..c345f5f 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -44,7 +44,7 @@
- /*
- * Usage:
- * dcache->d_inode->i_lock protects:
-- * - i_dentry, d_alias, d_inode of aliases
-+ * - i_dentry, d_u.d_alias, d_inode of aliases
- * dcache_hash_bucket lock protects:
- * - the dcache hash table
- * s_anon bl list spinlock protects:
-@@ -59,7 +59,7 @@
- * - d_unhashed()
- * - d_parent and d_subdirs
- * - childrens' d_child and d_parent
-- * - d_alias, d_inode
-+ * - d_u.d_alias, d_inode
- *
- * Ordering:
- * dentry->d_inode->i_lock
-@@ -239,7 +239,6 @@ static void __d_free(struct rcu_head *head)
- {
- struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
-
-- WARN_ON(!hlist_unhashed(&dentry->d_alias));
- if (dname_external(dentry))
- kfree(dentry->d_name.name);
- kmem_cache_free(dentry_cache, dentry);
-@@ -250,6 +249,7 @@ static void __d_free(struct rcu_head *head)
- */
- static void d_free(struct dentry *dentry)
- {
-+ WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
- BUG_ON((int)dentry->d_lockref.count > 0);
- this_cpu_dec(nr_dentry);
- if (dentry->d_op && dentry->d_op->d_release)
-@@ -288,7 +288,7 @@ static void dentry_iput(struct dentry * dentry)
- struct inode *inode = dentry->d_inode;
- if (inode) {
- dentry->d_inode = NULL;
-- hlist_del_init(&dentry->d_alias);
-+ hlist_del_init(&dentry->d_u.d_alias);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&inode->i_lock);
- if (!inode->i_nlink)
-@@ -313,7 +313,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
- struct inode *inode = dentry->d_inode;
- __d_clear_type(dentry);
- dentry->d_inode = NULL;
-- hlist_del_init(&dentry->d_alias);
-+ hlist_del_init(&dentry->d_u.d_alias);
- dentry_rcuwalk_barrier(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&inode->i_lock);
-@@ -435,7 +435,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
- __releases(parent->d_lock)
- __releases(dentry->d_inode->i_lock)
- {
-- list_del(&dentry->d_u.d_child);
-+ list_del(&dentry->d_child);
- /*
- * Inform d_walk() that we are no longer attached to the
- * dentry tree
-@@ -737,7 +737,7 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
-
- again:
- discon_alias = NULL;
-- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&alias->d_lock);
- if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
- if (IS_ROOT(alias) &&
-@@ -790,7 +790,7 @@ void d_prune_aliases(struct inode *inode)
- struct dentry *dentry;
- restart:
- spin_lock(&inode->i_lock);
-- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (!dentry->d_lockref.count) {
- /*
-@@ -1091,7 +1091,7 @@ repeat:
- resume:
- while (next != &this_parent->d_subdirs) {
- struct list_head *tmp = next;
-- struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
-+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
- next = tmp->next;
-
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-@@ -1143,7 +1143,7 @@ resume:
- goto rename_retry;
- }
- rcu_read_unlock();
-- next = child->d_u.d_child.next;
-+ next = child->d_child.next;
- goto resume;
- }
- if (need_seqretry(&rename_lock, seq)) {
-@@ -1524,8 +1524,8 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
- INIT_HLIST_BL_NODE(&dentry->d_hash);
- INIT_LIST_HEAD(&dentry->d_lru);
- INIT_LIST_HEAD(&dentry->d_subdirs);
-- INIT_HLIST_NODE(&dentry->d_alias);
-- INIT_LIST_HEAD(&dentry->d_u.d_child);
-+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
-+ INIT_LIST_HEAD(&dentry->d_child);
- d_set_d_op(dentry, dentry->d_sb->s_d_op);
-
- this_cpu_inc(nr_dentry);
-@@ -1555,7 +1555,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
- */
- __dget_dlock(parent);
- dentry->d_parent = parent;
-- list_add(&dentry->d_u.d_child, &parent->d_subdirs);
-+ list_add(&dentry->d_child, &parent->d_subdirs);
- spin_unlock(&parent->d_lock);
-
- return dentry;
-@@ -1648,7 +1648,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
- spin_lock(&dentry->d_lock);
- __d_set_type(dentry, add_flags);
- if (inode)
-- hlist_add_head(&dentry->d_alias, &inode->i_dentry);
-+ hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
- dentry->d_inode = inode;
- dentry_rcuwalk_barrier(dentry);
- spin_unlock(&dentry->d_lock);
-@@ -1672,7 +1672,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
-
- void d_instantiate(struct dentry *entry, struct inode * inode)
- {
-- BUG_ON(!hlist_unhashed(&entry->d_alias));
-+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
- if (inode)
- spin_lock(&inode->i_lock);
- __d_instantiate(entry, inode);
-@@ -1711,7 +1711,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
- return NULL;
- }
-
-- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- /*
- * Don't need alias->d_lock here, because aliases with
- * d_parent == entry->d_parent are not subject to name or
-@@ -1737,7 +1737,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
- {
- struct dentry *result;
-
-- BUG_ON(!hlist_unhashed(&entry->d_alias));
-+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
- if (inode)
- spin_lock(&inode->i_lock);
-@@ -1768,7 +1768,7 @@ EXPORT_SYMBOL(d_instantiate_unique);
- */
- int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
- {
-- BUG_ON(!hlist_unhashed(&entry->d_alias));
-+ BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
-
- spin_lock(&inode->i_lock);
- if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
-@@ -1807,7 +1807,7 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
-
- if (hlist_empty(&inode->i_dentry))
- return NULL;
-- alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
-+ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- __dget(alias);
- return alias;
- }
-@@ -1884,7 +1884,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
- spin_lock(&tmp->d_lock);
- tmp->d_inode = inode;
- tmp->d_flags |= add_flags;
-- hlist_add_head(&tmp->d_alias, &inode->i_dentry);
-+ hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
- hlist_bl_lock(&tmp->d_sb->s_anon);
- hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- hlist_bl_unlock(&tmp->d_sb->s_anon);
-@@ -2327,7 +2327,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
- struct dentry *child;
-
- spin_lock(&dparent->d_lock);
-- list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(child, &dparent->d_subdirs, d_child) {
- if (dentry == child) {
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- __dget_dlock(dentry);
-@@ -2574,8 +2574,8 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
- /* Unhash the target: dput() will then get rid of it */
- __d_drop(target);
-
-- list_del(&dentry->d_u.d_child);
-- list_del(&target->d_u.d_child);
-+ list_del(&dentry->d_child);
-+ list_del(&target->d_child);
-
- /* Switch the names.. */
- switch_names(dentry, target);
-@@ -2585,15 +2585,15 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
- if (IS_ROOT(dentry)) {
- dentry->d_parent = target->d_parent;
- target->d_parent = target;
-- INIT_LIST_HEAD(&target->d_u.d_child);
-+ INIT_LIST_HEAD(&target->d_child);
- } else {
- swap(dentry->d_parent, target->d_parent);
-
- /* And add them back to the (new) parent lists */
-- list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
-+ list_add(&target->d_child, &target->d_parent->d_subdirs);
- }
-
-- list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
-+ list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
-
- write_seqcount_end(&target->d_seq);
- write_seqcount_end(&dentry->d_seq);
-@@ -2700,9 +2700,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
- swap(dentry->d_name.hash, anon->d_name.hash);
-
- dentry->d_parent = dentry;
-- list_del_init(&dentry->d_u.d_child);
-+ list_del_init(&dentry->d_child);
- anon->d_parent = dparent;
-- list_move(&anon->d_u.d_child, &dparent->d_subdirs);
-+ list_move(&anon->d_child, &dparent->d_subdirs);
-
- write_seqcount_end(&dentry->d_seq);
- write_seqcount_end(&anon->d_seq);
-@@ -3333,7 +3333,7 @@ void d_tmpfile(struct dentry *dentry, struct inode *inode)
- {
- inode_dec_link_count(inode);
- BUG_ON(dentry->d_name.name != dentry->d_iname ||
-- !hlist_unhashed(&dentry->d_alias) ||
-+ !hlist_unhashed(&dentry->d_u.d_alias) ||
- !d_unlinked(dentry));
- spin_lock(&dentry->d_parent->d_lock);
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
-index 1ff8fe5..4a9f0e0 100644
---- a/fs/debugfs/inode.c
-+++ b/fs/debugfs/inode.c
-@@ -552,7 +552,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
- * use the d_u.d_child as the rcu head and corrupt this list.
- */
- spin_lock(&parent->d_lock);
-- list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(child, &parent->d_subdirs, d_child) {
- if (!debugfs_positive(child))
- continue;
-
-diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
-index 48a359d..831d4f0 100644
---- a/fs/exportfs/expfs.c
-+++ b/fs/exportfs/expfs.c
-@@ -50,7 +50,7 @@ find_acceptable_alias(struct dentry *result,
-
- inode = result->d_inode;
- spin_lock(&inode->i_lock);
-- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- dget(dentry);
- spin_unlock(&inode->i_lock);
- if (toput)
-diff --git a/fs/libfs.c b/fs/libfs.c
-index a184424..868c0b7 100644
---- a/fs/libfs.c
-+++ b/fs/libfs.c
-@@ -113,18 +113,18 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
-
- spin_lock(&dentry->d_lock);
- /* d_lock not required for cursor */
-- list_del(&cursor->d_u.d_child);
-+ list_del(&cursor->d_child);
- p = dentry->d_subdirs.next;
- while (n && p != &dentry->d_subdirs) {
- struct dentry *next;
-- next = list_entry(p, struct dentry, d_u.d_child);
-+ next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (simple_positive(next))
- n--;
- spin_unlock(&next->d_lock);
- p = p->next;
- }
-- list_add_tail(&cursor->d_u.d_child, p);
-+ list_add_tail(&cursor->d_child, p);
- spin_unlock(&dentry->d_lock);
- }
- }
-@@ -149,7 +149,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
- {
- struct dentry *dentry = file->f_path.dentry;
- struct dentry *cursor = file->private_data;
-- struct list_head *p, *q = &cursor->d_u.d_child;
-+ struct list_head *p, *q = &cursor->d_child;
-
- if (!dir_emit_dots(file, ctx))
- return 0;
-@@ -158,7 +158,7 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
- list_move(q, &dentry->d_subdirs);
-
- for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
-- struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
-+ struct dentry *next = list_entry(p, struct dentry, d_child);
- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
- if (!simple_positive(next)) {
- spin_unlock(&next->d_lock);
-@@ -286,7 +286,7 @@ int simple_empty(struct dentry *dentry)
- int ret = 0;
-
- spin_lock(&dentry->d_lock);
-- list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(child, &dentry->d_subdirs, d_child) {
- spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
- if (simple_positive(child)) {
- spin_unlock(&child->d_lock);
-diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
-index c320ac5..dc9747d 100644
---- a/fs/ncpfs/dir.c
-+++ b/fs/ncpfs/dir.c
-@@ -406,7 +406,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
- spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
-- dent = list_entry(next, struct dentry, d_u.d_child);
-+ dent = list_entry(next, struct dentry, d_child);
- if ((unsigned long)dent->d_fsdata == fpos) {
- if (dent->d_inode)
- dget(dent);
-diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
-index 32c0658..6d5e7c5 100644
---- a/fs/ncpfs/ncplib_kernel.h
-+++ b/fs/ncpfs/ncplib_kernel.h
-@@ -194,7 +194,7 @@ ncp_renew_dentries(struct dentry *parent)
- spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
-- dentry = list_entry(next, struct dentry, d_u.d_child);
-+ dentry = list_entry(next, struct dentry, d_child);
-
- if (dentry->d_fsdata == NULL)
- ncp_age_dentry(server, dentry);
-@@ -216,7 +216,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
- spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
-- dentry = list_entry(next, struct dentry, d_u.d_child);
-+ dentry = list_entry(next, struct dentry, d_child);
- dentry->d_fsdata = NULL;
- ncp_age_dentry(server, dentry);
- next = next->next;
-diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
-index 66984a9..5b8ab0e 100644
---- a/fs/nfs/getroot.c
-+++ b/fs/nfs/getroot.c
-@@ -58,7 +58,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
- */
- spin_lock(&sb->s_root->d_inode->i_lock);
- spin_lock(&sb->s_root->d_lock);
-- hlist_del_init(&sb->s_root->d_alias);
-+ hlist_del_init(&sb->s_root->d_u.d_alias);
- spin_unlock(&sb->s_root->d_lock);
- spin_unlock(&sb->s_root->d_inode->i_lock);
- }
-diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
-index 9d3e9c5..7001299 100644
---- a/fs/notify/fsnotify.c
-+++ b/fs/notify/fsnotify.c
-@@ -63,14 +63,14 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
- spin_lock(&inode->i_lock);
- /* run all of the dentries associated with this inode. Since this is a
- * directory, there damn well better only be one item on this list */
-- hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- struct dentry *child;
-
- /* run all of the children of the original inode and fix their
- * d_flags to indicate parental interest (their parent is the
- * original inode) */
- spin_lock(&alias->d_lock);
-- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(child, &alias->d_subdirs, d_child) {
- if (!child->d_inode)
- continue;
-
-diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
-index 0d3a97d..1167485 100644
---- a/fs/ocfs2/dcache.c
-+++ b/fs/ocfs2/dcache.c
-@@ -173,7 +173,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
- struct dentry *dentry;
-
- spin_lock(&inode->i_lock);
-- hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
-+ hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- spin_lock(&dentry->d_lock);
- if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
- trace_ocfs2_find_local_alias(dentry->d_name.len,
-diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index c254671..eaa7374 100644
---- a/fs/proc/task_mmu.c
-+++ b/fs/proc/task_mmu.c
-@@ -993,9 +993,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
- struct vm_area_struct *vma;
- struct pagemapread *pm = walk->private;
- spinlock_t *ptl;
-- pte_t *pte;
-+ pte_t *pte, *orig_pte;
- int err = 0;
-- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-
- /* find the first VMA at or above 'addr' */
- vma = find_vma(walk->mm, addr);
-@@ -1009,6 +1008,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
-
- for (; addr != end; addr += PAGE_SIZE) {
- unsigned long offset;
-+ pagemap_entry_t pme;
-
- offset = (addr & ~PAGEMAP_WALK_MASK) >>
- PAGE_SHIFT;
-@@ -1023,32 +1023,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
-
- if (pmd_trans_unstable(pmd))
- return 0;
-- for (; addr != end; addr += PAGE_SIZE) {
-- int flags2;
--
-- /* check to see if we've left 'vma' behind
-- * and need a new, higher one */
-- if (vma && (addr >= vma->vm_end)) {
-- vma = find_vma(walk->mm, addr);
-- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
-- flags2 = __PM_SOFT_DIRTY;
-- else
-- flags2 = 0;
-- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
-+
-+ while (1) {
-+ /* End of address space hole, which we mark as non-present. */
-+ unsigned long hole_end;
-+
-+ if (vma)
-+ hole_end = min(end, vma->vm_start);
-+ else
-+ hole_end = end;
-+
-+ for (; addr < hole_end; addr += PAGE_SIZE) {
-+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
-+
-+ err = add_to_pagemap(addr, &pme, pm);
-+ if (err)
-+ return err;
- }
-
-- /* check that 'vma' actually covers this address,
-- * and that it isn't a huge page vma */
-- if (vma && (vma->vm_start <= addr) &&
-- !is_vm_hugetlb_page(vma)) {
-- pte = pte_offset_map(pmd, addr);
-+ if (!vma || vma->vm_start >= end)
-+ break;
-+ /*
-+ * We can't possibly be in a hugetlb VMA. In general,
-+ * for a mm_walk with a pmd_entry and a hugetlb_entry,
-+ * the pmd_entry can only be called on addresses in a
-+ * hugetlb if the walk starts in a non-hugetlb VMA and
-+ * spans a hugepage VMA. Since pagemap_read walks are
-+ * PMD-sized and PMD-aligned, this will never be true.
-+ */
-+ BUG_ON(is_vm_hugetlb_page(vma));
-+
-+ /* Addresses in the VMA. */
-+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
-+ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
-+ pagemap_entry_t pme;
-+
- pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
-- /* unmap before userspace copy */
-- pte_unmap(pte);
-+ err = add_to_pagemap(addr, &pme, pm);
-+ if (err)
-+ break;
- }
-- err = add_to_pagemap(addr, &pme, pm);
-+ pte_unmap_unlock(orig_pte, ptl);
-+
- if (err)
- return err;
-+
-+ if (addr == end)
-+ break;
-+
-+ vma = find_vma(walk->mm, addr);
- }
-
- cond_resched();
-diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 3b50cac..0f0eb1c 100644
---- a/include/linux/dcache.h
-+++ b/include/linux/dcache.h
-@@ -124,15 +124,15 @@ struct dentry {
- void *d_fsdata; /* fs-specific data */
-
- struct list_head d_lru; /* LRU list */
-+ struct list_head d_child; /* child of parent list */
-+ struct list_head d_subdirs; /* our children */
- /*
-- * d_child and d_rcu can share memory
-+ * d_alias and d_rcu can share memory
- */
- union {
-- struct list_head d_child; /* child of parent list */
-+ struct hlist_node d_alias; /* inode alias list */
- struct rcu_head d_rcu;
- } d_u;
-- struct list_head d_subdirs; /* our children */
-- struct hlist_node d_alias; /* inode alias list */
- };
-
- /*
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 46b8ab5..a7b311d 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -1009,6 +1009,7 @@ static inline int page_mapped(struct page *page)
- #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
- #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
- #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
-+#define VM_FAULT_SIGSEGV 0x0040
-
- #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
- #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
-@@ -1017,8 +1018,9 @@ static inline int page_mapped(struct page *page)
-
- #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-
--#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
-- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
-+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
-+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
-+ VM_FAULT_FALLBACK)
-
- /* Encode hstate index for a hwpoisoned large page */
- #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 911718f..bf46cc8 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -1880,6 +1880,12 @@ void netdev_freemem(struct net_device *dev);
- void synchronize_net(void);
- int init_dummy_netdev(struct net_device *dev);
-
-+DECLARE_PER_CPU(int, xmit_recursion);
-+static inline int dev_recursion_level(void)
-+{
-+ return this_cpu_read(xmit_recursion);
-+}
-+
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
- struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 218b058..91fe6a3 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1695,7 +1695,7 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
- }
-
-
--static int pid_alive(const struct task_struct *p);
-+static inline int pid_alive(const struct task_struct *p);
- static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
- {
- pid_t pid = 0;
-diff --git a/include/net/ip.h b/include/net/ip.h
-index 3446cdd..5128fa7 100644
---- a/include/net/ip.h
-+++ b/include/net/ip.h
-@@ -407,22 +407,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
-
- #endif
-
--static inline int sk_mc_loop(struct sock *sk)
--{
-- if (!sk)
-- return 1;
-- switch (sk->sk_family) {
-- case AF_INET:
-- return inet_sk(sk)->mc_loop;
--#if IS_ENABLED(CONFIG_IPV6)
-- case AF_INET6:
-- return inet6_sk(sk)->mc_loop;
--#endif
-- }
-- WARN_ON(1);
-- return 1;
--}
--
- bool ip_call_ra_chain(struct sk_buff *skb);
-
- /*
-diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
-index 2e74c6c..ee2d53a 100644
---- a/include/net/ip6_route.h
-+++ b/include/net/ip6_route.h
-@@ -168,7 +168,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-
- static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
- {
-- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
-+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
-+ inet6_sk(skb->sk) : NULL;
-
- return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-diff --git a/include/net/sock.h b/include/net/sock.h
-index f66b2b1..0c79a74 100644
---- a/include/net/sock.h
-+++ b/include/net/sock.h
-@@ -1815,6 +1815,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
-
- struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
-
-+bool sk_mc_loop(struct sock *sk);
-+
- static inline bool sk_can_gso(const struct sock *sk)
- {
- return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
-diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 550e205..18711f3 100644
---- a/kernel/cgroup.c
-+++ b/kernel/cgroup.c
-@@ -971,7 +971,7 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
- parent = dentry->d_parent;
- spin_lock(&parent->d_lock);
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-- list_del_init(&dentry->d_u.d_child);
-+ list_del_init(&dentry->d_child);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&parent->d_lock);
- remove_dir(dentry);
-diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 813b021..a2d62b3 100644
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -6158,7 +6158,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m
- int ret;
-
- /* Paranoid: Make sure the parent is the "instances" directory */
-- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
-+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- if (WARN_ON_ONCE(parent != trace_instance_dir))
- return -ENOENT;
-
-@@ -6185,7 +6185,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry)
- int ret;
-
- /* Paranoid: Make sure the parent is the "instances" directory */
-- parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
-+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
- if (WARN_ON_ONCE(parent != trace_instance_dir))
- return -ENOENT;
-
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index e4c4efc..c6646a5 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -428,7 +428,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
-
- if (dir) {
- spin_lock(&dir->d_lock); /* probably unneeded */
-- list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
-+ list_for_each_entry(child, &dir->d_subdirs, d_child) {
- if (child->d_inode) /* probably unneeded */
- child->d_inode->i_private = NULL;
- }
-diff --git a/mm/ksm.c b/mm/ksm.c
-index 68710e8..5e706e3 100644
---- a/mm/ksm.c
-+++ b/mm/ksm.c
-@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
- else
- ret = VM_FAULT_WRITE;
- put_page(page);
-- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
-+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
- /*
- * We must loop because handle_mm_fault() may back out if there's
- * any difficulty e.g. if pte accessed bit gets updated concurrently.
-diff --git a/mm/memory-failure.c b/mm/memory-failure.c
-index a98c7fc..ffc7bf0 100644
---- a/mm/memory-failure.c
-+++ b/mm/memory-failure.c
-@@ -1645,8 +1645,6 @@ static int __soft_offline_page(struct page *page, int flags)
- * setting PG_hwpoison.
- */
- if (!is_free_buddy_page(page))
-- lru_add_drain_all();
-- if (!is_free_buddy_page(page))
- drain_all_pages();
- SetPageHWPoison(page);
- if (!is_free_buddy_page(page))
-diff --git a/mm/memory.c b/mm/memory.c
-index 102af09..749e1c6 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -1836,7 +1836,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- else
- return -EFAULT;
- }
-- if (ret & VM_FAULT_SIGBUS)
-+ if (ret & (VM_FAULT_SIGBUS |
-+ VM_FAULT_SIGSEGV))
- return i ? i : -EFAULT;
- BUG();
- }
-@@ -1946,7 +1947,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
- return -ENOMEM;
- if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
- return -EHWPOISON;
-- if (ret & VM_FAULT_SIGBUS)
-+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
- return -EFAULT;
- BUG();
- }
-@@ -3235,7 +3236,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
-
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
-- return VM_FAULT_SIGBUS;
-+ return VM_FAULT_SIGSEGV;
-
- /* Use the zero-page for reads */
- if (!(flags & FAULT_FLAG_WRITE)) {
-diff --git a/net/core/dev.c b/net/core/dev.c
-index f6d8d7f..73abbd7 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -2775,7 +2775,9 @@ static void skb_update_prio(struct sk_buff *skb)
- #define skb_update_prio(skb)
- #endif
-
--static DEFINE_PER_CPU(int, xmit_recursion);
-+DEFINE_PER_CPU(int, xmit_recursion);
-+EXPORT_SYMBOL(xmit_recursion);
-+
- #define RECURSION_LIMIT 10
-
- /**
-diff --git a/net/core/sock.c b/net/core/sock.c
-index c806956..650dd58 100644
---- a/net/core/sock.c
-+++ b/net/core/sock.c
-@@ -659,6 +659,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
- sock_reset_flag(sk, bit);
- }
-
-+bool sk_mc_loop(struct sock *sk)
-+{
-+ if (dev_recursion_level())
-+ return false;
-+ if (!sk)
-+ return true;
-+ switch (sk->sk_family) {
-+ case AF_INET:
-+ return inet_sk(sk)->mc_loop;
-+#if IS_ENABLED(CONFIG_IPV6)
-+ case AF_INET6:
-+ return inet6_sk(sk)->mc_loop;
-+#endif
-+ }
-+ WARN_ON(1);
-+ return true;
-+}
-+EXPORT_SYMBOL(sk_mc_loop);
-+
- /*
- * This is meant for all protocols to use and covers goings on
- * at the socket level. Everything here is generic.
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
-index 2291791..9fbd69e 100644
---- a/net/ipv4/tcp_input.c
-+++ b/net/ipv4/tcp_input.c
-@@ -3064,10 +3064,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- if (seq_rtt < 0) {
- seq_rtt = ca_seq_rtt;
- }
-- if (!(sacked & TCPCB_SACKED_ACKED))
-+ if (!(sacked & TCPCB_SACKED_ACKED)) {
- reord = min(pkts_acked, reord);
-- if (!after(scb->end_seq, tp->high_seq))
-- flag |= FLAG_ORIG_SACK_ACKED;
-+ if (!after(scb->end_seq, tp->high_seq))
-+ flag |= FLAG_ORIG_SACK_ACKED;
-+ }
- }
-
- if (sacked & TCPCB_SACKED_ACKED)
-diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index b7effad..e2f8bd0 100644
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -1875,7 +1875,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
- skb->sk = sk;
- skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
-- struct dst_entry *dst = sk->sk_rx_dst;
-+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
-
- if (dst)
- dst = dst_check(dst, 0);
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index 96f64e5..8c70c73 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -2796,6 +2796,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- }
- #endif
-
-+ /* Do not fool tcpdump (if any), clean our debris */
-+ skb->tstamp.tv64 = 0;
- return skb;
- }
- EXPORT_SYMBOL(tcp_make_synack);
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
-index d7907ec..066d0b0 100644
---- a/net/ipv6/ip6_output.c
-+++ b/net/ipv6/ip6_output.c
-@@ -555,7 +555,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
- {
- struct sk_buff *frag;
- struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
-- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
-+ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
-+ inet6_sk(skb->sk) : NULL;
- struct ipv6hdr *tmp_hdr;
- struct frag_hdr *fh;
- unsigned int mtu, hlen, left, len;
-diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
-index 09a22f4..bcd6518 100644
---- a/net/ipv6/ndisc.c
-+++ b/net/ipv6/ndisc.c
-@@ -1193,7 +1193,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
- if (rt)
- rt6_set_expires(rt, jiffies + (HZ * lifetime));
- if (ra_msg->icmph.icmp6_hop_limit) {
-- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
-+ /* Only set hop_limit on the interface if it is higher than
-+ * the current hop_limit.
-+ */
-+ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
-+ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
-+ } else {
-+ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
-+ }
- if (rt)
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
- ra_msg->icmph.icmp6_hop_limit);
-diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index a4f890d..9d4332d 100644
---- a/net/ipv6/tcp_ipv6.c
-+++ b/net/ipv6/tcp_ipv6.c
-@@ -1633,7 +1633,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
- skb->sk = sk;
- skb->destructor = sock_edemux;
- if (sk->sk_state != TCP_TIME_WAIT) {
-- struct dst_entry *dst = sk->sk_rx_dst;
-+ struct dst_entry *dst = ACCESS_ONCE(sk->sk_rx_dst);
-
- if (dst)
- dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
-diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
-index d25f293..957c1db 100644
---- a/net/netfilter/nf_conntrack_proto_generic.c
-+++ b/net/netfilter/nf_conntrack_proto_generic.c
-@@ -14,6 +14,30 @@
-
- static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
-
-+static bool nf_generic_should_process(u8 proto)
-+{
-+ switch (proto) {
-+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
-+ case IPPROTO_SCTP:
-+ return false;
-+#endif
-+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
-+ case IPPROTO_DCCP:
-+ return false;
-+#endif
-+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
-+ case IPPROTO_GRE:
-+ return false;
-+#endif
-+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
-+ case IPPROTO_UDPLITE:
-+ return false;
-+#endif
-+ default:
-+ return true;
-+ }
-+}
-+
- static inline struct nf_generic_net *generic_pernet(struct net *net)
- {
- return &net->ct.nf_ct_proto.generic;
-@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
- static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts)
- {
-- return true;
-+ return nf_generic_should_process(nf_ct_protonum(ct));
- }
-
- #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
-diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
-index 6c4cbd9..fc68bf6 100644
---- a/security/selinux/selinuxfs.c
-+++ b/security/selinux/selinuxfs.c
-@@ -1200,7 +1200,7 @@ static void sel_remove_entries(struct dentry *de)
- spin_lock(&de->d_lock);
- node = de->d_subdirs.next;
- while (node != &de->d_subdirs) {
-- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
-+ struct dentry *d = list_entry(node, struct dentry, d_child);
-
- spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
- list_del_init(node);
-@@ -1674,12 +1674,12 @@ static void sel_remove_classes(void)
-
- list_for_each(class_node, &class_dir->d_subdirs) {
- struct dentry *class_subdir = list_entry(class_node,
-- struct dentry, d_u.d_child);
-+ struct dentry, d_child);
- struct list_head *class_subdir_node;
-
- list_for_each(class_subdir_node, &class_subdir->d_subdirs) {
- struct dentry *d = list_entry(class_subdir_node,
-- struct dentry, d_u.d_child);
-+ struct dentry, d_child);
-
- if (d->d_inode)
- if (d->d_inode->i_mode & S_IFDIR)
diff --git a/3.14.40/0000_README b/3.14.41/0000_README
index 39720d8..77861d5 100644
--- a/3.14.40/0000_README
+++ b/3.14.41/0000_README
@@ -2,11 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1039_linux-3.14.40.patch
+Patch: 1040_linux-3.14.41.patch
From: http://www.kernel.org
-Desc: Linux 3.14.40
+Desc: Linux 3.14.41
-Patch: 4420_grsecurity-3.1-3.14.40-201505042052.patch
+Patch: 4420_grsecurity-3.1-3.14.41-201505091723.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.41/1040_linux-3.14.41.patch b/3.14.41/1040_linux-3.14.41.patch
new file mode 100644
index 0000000..444b427
--- /dev/null
+++ b/3.14.41/1040_linux-3.14.41.patch
@@ -0,0 +1,3586 @@
+diff --git a/Makefile b/Makefile
+index 070e0eb..7a60d4a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Remembering Coco
+
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index 187fd46..355117c 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -154,7 +154,7 @@
+
+ uart2: serial@12200 {
+ compatible = "ns16550a";
+- reg = <0x12000 0x100>;
++ reg = <0x12200 0x100>;
+ reg-shift = <2>;
+ interrupts = <9>;
+ clocks = <&core_clk 0>;
+@@ -163,7 +163,7 @@
+
+ uart3: serial@12300 {
+ compatible = "ns16550a";
+- reg = <0x12100 0x100>;
++ reg = <0x12300 0x100>;
+ reg-shift = <2>;
+ interrupts = <10>;
+ clocks = <&core_clk 0>;
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index f4b46d3..051b726 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -114,7 +114,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 7bc6668..dcbe17f 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <mach/gpio-samsung.h>
+
+ #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
+
+ #define PCA935X_GPIO_BASE GPIO_BOARD_START
+ #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index 3df3c37..66b95c4 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -555,6 +555,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+
+ static struct wm831x_pdata crag_pmic_pdata = {
+ .wm831x_num = 1,
++ .irq_base = BANFF_PMIC_IRQ_BASE,
+ .gpio_base = BANFF_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
+
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index 6d20b7d..a268a9a 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -43,7 +43,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+ # Assembly rules for the .S files
+-$(obj-vdso): %.o: %.S
++$(obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+ # Actual build commands
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84..04845aa 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+
+-void time_init(void)
++void __init time_init(void)
+ {
+ u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
+deleted file mode 100644
+index 3adac3b..0000000
+--- a/arch/mips/include/asm/suspend.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-#ifndef __ASM_SUSPEND_H
+-#define __ASM_SUSPEND_H
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+-#endif /* __ASM_SUSPEND_H */
+diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
+index 521e596..2129e67 100644
+--- a/arch/mips/power/cpu.c
++++ b/arch/mips/power/cpu.c
+@@ -7,7 +7,7 @@
+ * Author: Hu Hongbing <huhb@lemote.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+-#include <asm/suspend.h>
++#include <asm/sections.h>
+ #include <asm/fpu.h>
+ #include <asm/dsp.h>
+
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c82..e7567c8 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+
+ LEAF(swsusp_arch_resume)
++ /* Avoid TLB mismatch during and after kernel resume */
++ jal local_flush_tlb_all
+ PTR_L t0, restore_pblist
+ 0:
+ PTR_L t1, PBE_ADDRESS(t0) /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
+- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index 2912b87..3eb36ce 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -61,12 +61,22 @@ struct cache_type_info {
+ };
+
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED 0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA 2
++#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA 3
+
+ static const struct cache_type_info cache_type_info[] = {
+ {
++ /* Embedded systems that use cache-size, cache-block-size,
++ * etc. for the Unified (typically L2) cache. */
++ .name = "Unified",
++ .size_prop = "cache-size",
++ .line_size_props = { "cache-line-size",
++ "cache-block-size", },
++ .nr_sets_prop = "cache-sets",
++ },
++ {
+ /* PowerPC Processor binding says the [di]-cache-*
+ * must be equal on unified caches, so just use
+ * d-cache properties. */
+@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ struct cache *iter;
+
+- if (cache->type == CACHE_TYPE_UNIFIED)
++ if (cache->type == CACHE_TYPE_UNIFIED ||
++ cache->type == CACHE_TYPE_UNIFIED_D)
+ return cache;
+
+ list_for_each_entry(iter, &cache_list, list)
+@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ return of_get_property(np, "cache-unified", NULL);
+ }
+
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+- int level)
++/*
++ * Unified caches can have two different sets of tags. Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc. Check on initialization for which type we have, and
++ * return the appropriate structure type. Assume it's embedded if it isn't
++ * open firmware. If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+- struct cache *cache;
++ return of_get_property(np,
++ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+- return cache;
++ return new_cache(cache_is_unified_d(node), level, node);
+ }
+
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
+index 0167d53..a531154 100644
+--- a/arch/powerpc/kernel/suspend.c
++++ b/arch/powerpc/kernel/suspend.c
+@@ -9,9 +9,7 @@
+
+ #include <linux/mm.h>
+ #include <asm/page.h>
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
++#include <asm/sections.h>
+
+ /*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda..ead5535 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+- for (;;) {
++ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ fp = (unsigned long __user *) sp;
+ if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ return;
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index 2b90ff8..59ef76c 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
++ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+
+ mb();
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index a7a7537..d3236c9 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -13,14 +13,10 @@
+ #include <asm/ipl.h>
+ #include <asm/cio.h>
+ #include <asm/pci.h>
++#include <asm/sections.h>
+ #include "entry.h"
+
+ /*
+- * References to section boundaries
+- */
+-extern const void __nosave_begin, __nosave_end;
+-
+-/*
+ * The restore of the saved pages in an hibernation image will set
+ * the change and referenced bits in the storage key for each page.
+ * Overindication of the referenced bits after an hibernation cycle
+@@ -142,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ /* Always save lowcore pages (LC protection might be enabled). */
+ if (pfn <= LC_PAGES)
+@@ -149,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++ if (pfn >= stext_pfn && pfn <= eshared_pfn)
++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ if (tprot(PFN_PHYS(pfn)))
+ return 1;
+ return 0;
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 75beea6..3588f2f 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -414,6 +414,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
++ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
+index 1b61997..7a99e6a 100644
+--- a/arch/sh/include/asm/sections.h
++++ b/arch/sh/include/asm/sections.h
+@@ -3,7 +3,6 @@
+
+ #include <asm-generic/sections.h>
+
+-extern long __nosave_begin, __nosave_end;
+ extern long __machvec_start, __machvec_end;
+ extern char __uncached_start, __uncached_end;
+ extern char __start_eh_frame[], __stop_eh_frame[];
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 42b0b8c..17bd2e1 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -9,11 +9,9 @@
+ #include <asm/hibernate.h>
+ #include <asm/visasm.h>
+ #include <asm/page.h>
++#include <asm/sections.h>
+ #include <asm/tlb.h>
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ struct saved_context saved_context;
+
+ /*
+diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
+index 4dcd34a..77b5226 100644
+--- a/arch/unicore32/include/mach/pm.h
++++ b/arch/unicore32/include/mach/pm.h
+@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state);
+ /* Defined in hibernate_asm.S */
+ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ extern struct pbe *restore_pblist;
+ #endif
+diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
+index d75ef8b..9969ec3 100644
+--- a/arch/unicore32/kernel/hibernate.c
++++ b/arch/unicore32/kernel/hibernate.c
+@@ -18,6 +18,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+
+ #include "mach/pm.h"
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 1da25a5..3ba047c 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ :: "a" (eax), "c" (ecx));
+ }
+
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++ trace_hardirqs_on();
++ /* "mwait %eax, %ecx;" */
++ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
++ :: "a" (eax), "c" (ecx));
++}
++
+ /*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 3fb8d95..1a1ff42 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -28,6 +28,7 @@
+ #include <asm/fpu-internal.h>
+ #include <asm/debugreg.h>
+ #include <asm/nmi.h>
++#include <asm/mwait.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -398,6 +399,52 @@ static void amd_e400_idle(void)
+ default_idle();
+ }
+
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++ if (c->x86_vendor != X86_VENDOR_INTEL)
++ return 0;
++
++ if (!cpu_has(c, X86_FEATURE_MWAIT))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++ if (!current_set_polling_and_test()) {
++ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
++ mb();
++ clflush((void *)&current_thread_info()->flags);
++ mb();
++ }
++
++ __monitor((void *)&current_thread_info()->flags, 0, 0);
++ if (!need_resched())
++ __sti_mwait(0, 0);
++ else
++ local_irq_enable();
++ } else
++ local_irq_enable();
++ current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -411,6 +458,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ pr_info("using AMD E400 aware idle routine\n");
+ x86_idle = amd_e400_idle;
++ } else if (prefer_mwait_c1_over_halt(c)) {
++ pr_info("using mwait in idle threads\n");
++ x86_idle = mwait_idle;
+ } else
+ x86_idle = default_idle;
+ }
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 7d28c88..291226b 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -13,13 +13,11 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mmzone.h>
++#include <asm/sections.h>
+
+ /* Defined in hibernate_asm_32.S */
+ extern int restore_image(void);
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ /* Pointer to the temporary resume page tables */
+ pgd_t *resume_pg_dir;
+
+diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
+index 304fca2..2276238 100644
+--- a/arch/x86/power/hibernate_64.c
++++ b/arch/x86/power/hibernate_64.c
+@@ -17,11 +17,9 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mtrr.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+
+-/* References to section boundaries */
+-extern __visible const void __nosave_begin, __nosave_end;
+-
+ /* Defined in hibernate_asm_64.S */
+ extern asmlinkage int restore_image(void);
+
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index c87ae7c..8879361 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -336,6 +336,36 @@ menu "Executable file formats"
+
+ source "fs/Kconfig.binfmt"
+
++config XTFPGA_LCD
++ bool "Enable XTFPGA LCD driver"
++ depends on XTENSA_PLATFORM_XTFPGA
++ default n
++ help
++ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++ progress messages there during bootup/shutdown. It may be useful
++ during board bringup.
++
++ If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++ hex "XTFPGA LCD base address"
++ depends on XTFPGA_LCD
++ default "0x0d0c0000"
++ help
++ Base address of the LCD controller inside KIO region.
++ Different boards from XTFPGA family have LCD controller at different
++ addresses. Please consult prototyping user guide for your board for
++ the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++ bool "Use 8-bit access to XTFPGA LCD"
++ depends on XTFPGA_LCD
++ default n
++ help
++ LCD may be connected with 4- or 8-bit interface, 8-bit access may
++ only be used with 8-bit interface. Please consult prototyping user
++ guide for your board for the correct interface width.
++
+ endmenu
+
+ source "net/Kconfig"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index 50084f7..b54fa1b 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at 325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range 326
++#define __NR_sync_file_range2 326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open 327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index d05f8fe..17b1ef3 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
+ {
+ struct iss_net_private *lp = (struct iss_net_private *)priv;
+
+- spin_lock(&lp->lock);
+ iss_net_poll();
++ spin_lock(&lp->lock);
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+ spin_unlock(&lp->lock);
+ }
+@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
+ struct iss_net_private *lp = netdev_priv(dev);
+ int err;
+
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ err = lp->tp.open(lp);
+ if (err < 0)
+@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+- spin_lock(&opened_lock);
++ spin_unlock_bh(&lp->lock);
++ spin_lock_bh(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+- spin_unlock(&opened_lock);
++ spin_unlock_bh(&opened_lock);
++ spin_lock_bh(&lp->lock);
+
+ init_timer(&lp->timer);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+ out:
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return err;
+ }
+
+@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
+
+ lp->tp.close(lp);
+
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+- unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+- spin_lock_irqsave(&lp->lock, flags);
++ spin_lock_bh(&lp->lock);
+
+ len = lp->tp.write(lp, &skb);
+
+@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ }
+
+- spin_unlock_irqrestore(&lp->lock, flags);
++ spin_unlock_bh(&lp->lock);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
+ *lp = (struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
+ .dev = dev,
+ .index = index,
+- };
++ };
+
++ spin_lock_init(&lp->lock);
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206..7839d38 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+
+-obj-y = setup.o lcd.o
++obj-y += setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index aeb316b..e8cc86f 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -40,9 +40,6 @@
+
+ /* UART */
+ #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e43564..4c8541e 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301..4dc0c1b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+
+-#define LCD_PAUSE_ITERATIONS 4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR 0x1
+ #define LCD_DISPLAY_ON 0xc
+
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT 0x38
++#define LCD_DISPLAY_MODE4BIT 0x28
+ #define LCD_DISPLAY_POS 0x80
+ #define LCD_SHIFT_LEFT 0x18
+ #define LCD_SHIFT_RIGHT 0x1c
+
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*addr) = data;
++#else
++ ACCESS_ONCE(*addr) = data & 0xf0;
++ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++ udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++ udelay(50);
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_CLEAR;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ udelay(100);
+ while (*str != 0) {
+- *LCD_DATA_ADDR = *str;
++ lcd_put_byte(LCD_DATA_ADDR, *str);
+ udelay(200);
+ str++;
+ }
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+
+ void lcd_shiftleft(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ udelay(50);
+ }
+
+ void lcd_shiftright(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ udelay(50);
+ }
+
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 666beea..9498c3d 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -192,7 +192,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+- mutex_lock(&adev->physical_node_lock);
++ /*
++ * acpi_container_offline() calls this for all of the container's
++ * children under the container's physical_node_lock lock.
++ */
++ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 45d0fa7..12b39dc 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
+ goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+- goto out_groups;
++ goto out_id;
+ error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+- goto out_id;
++ goto out_groups;
+ error = sysfs_create_link(&dev->kobj,
+ &dev->bus->p->subsys.kobj, "subsystem");
+ if (error)
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 5afe556..26b03e1 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -64,6 +64,7 @@ static const struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x04F2, 0xAFF1) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 03b3317..9eb1669 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -142,6 +142,7 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 0996a3a..a9dd21a 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -257,7 +257,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+- if (rcg->mnd_width && f->n)
++ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask,
+ cfg);
+diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
+index c0a7d77..a90af17 100644
+--- a/drivers/clk/tegra/clk.c
++++ b/drivers/clk/tegra/clk.c
+@@ -266,7 +266,7 @@ void __init tegra_add_of_provider(struct device_node *np)
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+- rst_ctlr.nr_resets = clk_num * 32;
++ rst_ctlr.nr_resets = periph_banks * 32;
+ reset_controller_register(&rst_ctlr);
+ }
+
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index dde41f1d..d522396 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ return err;
+ }
+
+-static int omap_aes_check_aligned(struct scatterlist *sg)
++static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++ int len = 0;
++
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return -1;
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return -1;
++
++ len += sg->length;
+ sg = sg_next(sg);
+ }
++
++ if (len != total)
++ return -1;
++
+ return 0;
+ }
+
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+- if (omap_aes_check_aligned(dd->in_sg) ||
+- omap_aes_check_aligned(dd->out_sg)) {
++ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++ omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ if (omap_aes_copy_sgs(dd))
+ pr_err("Failed to copy SGs for unaligned cases\n");
+ dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 3b1fd1c..e9d8cf6 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -304,11 +304,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -316,11 +318,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -328,11 +332,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -340,11 +346,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 8f42bd7..f1fc14c 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1928,15 +1928,15 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+ * that the GPIO was actually requested.
+ */
+
+-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
++static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
+ {
+ struct gpio_chip *chip;
+- int value;
++ bool value;
+ int offset;
+
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+- value = chip->get ? chip->get(chip, offset) : 0;
++ value = chip->get ? chip->get(chip, offset) : false;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
+ return value;
+ }
+@@ -1992,7 +1992,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
+ * @desc: gpio descriptor whose state need to be set.
+ * @value: Non-zero for setting it HIGH otherise it will set to LOW.
+ */
+-static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
++static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
+ {
+ int err = 0;
+ struct gpio_chip *chip = desc->chip;
+@@ -2019,7 +2019,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
+ * @desc: gpio descriptor whose state need to be set.
+ * @value: Non-zero for setting it HIGH otherise it will set to LOW.
+ */
+-static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
++static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
+ {
+ int err = 0;
+ struct gpio_chip *chip = desc->chip;
+@@ -2041,7 +2041,7 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
+ __func__, err);
+ }
+
+-static void _gpiod_set_raw_value(struct gpio_desc *desc, int value)
++static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
+ {
+ struct gpio_chip *chip;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 0c83b3d..5b38bf8 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1181,6 +1181,7 @@
+ #define GMBUS_CYCLE_INDEX (2<<25)
+ #define GMBUS_CYCLE_STOP (4<<25)
+ #define GMBUS_BYTE_COUNT_SHIFT 16
++#define GMBUS_BYTE_COUNT_MAX 256U
+ #define GMBUS_SLAVE_INDEX_SHIFT 8
+ #define GMBUS_SLAVE_ADDR_SHIFT 1
+ #define GMBUS_SLAVE_READ (1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index d33b61d..1d02970 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -324,18 +324,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+- u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len,
++ u32 gmbus1_index)
+ {
+ int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+- u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -357,11 +356,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++ u32 gmbus1_index)
+ {
+- int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+ u8 *buf = msg->buf;
++ unsigned int rx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++ buf, len, gmbus1_index);
++ if (ret)
++ return ret;
++
++ rx_size -= len;
++ buf += len;
++ } while (rx_size != 0);
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len)
++{
++ int reg_offset = dev_priv->gpio_mmio_base;
++ unsigned int chunk_size = len;
+ u32 val, loop;
+
+ val = loop = 0;
+@@ -373,8 +396,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -391,6 +414,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ if (ret)
+ return ret;
+ }
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++ u8 *buf = msg->buf;
++ unsigned int tx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++ if (ret)
++ return ret;
++
++ buf += len;
++ tx_size -= len;
++ } while (tx_size != 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+index 461df93..4f32b34 100644
+--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+@@ -35,8 +35,6 @@
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+-static struct platform_device *a3xx_pdev;
+-
+ static void a3xx_me_init(struct msm_gpu *gpu)
+ {
+ struct msm_ringbuffer *ring = gpu->rb;
+@@ -311,7 +309,6 @@ static void a3xx_destroy(struct msm_gpu *gpu)
+ ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+ #endif
+
+- put_device(&a3xx_gpu->pdev->dev);
+ kfree(a3xx_gpu);
+ }
+
+@@ -439,7 +436,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+- struct platform_device *pdev = a3xx_pdev;
++ struct msm_drm_private *priv = dev->dev_private;
++ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config;
+ int ret;
+
+@@ -460,7 +458,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+- get_device(&pdev->dev);
+ a3xx_gpu->pdev = pdev;
+
+ gpu->fast_rate = config->fast_rate;
+@@ -522,17 +519,24 @@ fail:
+ # include <mach/kgsl.h>
+ #endif
+
+-static int a3xx_probe(struct platform_device *pdev)
++static void set_gpu_pdev(struct drm_device *dev,
++ struct platform_device *pdev)
++{
++ struct msm_drm_private *priv = dev->dev_private;
++ priv->gpu_pdev = pdev;
++}
++
++static int a3xx_bind(struct device *dev, struct device *master, void *data)
+ {
+ static struct adreno_platform_config config = {};
+ #ifdef CONFIG_OF
+- struct device_node *child, *node = pdev->dev.of_node;
++ struct device_node *child, *node = dev->of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(node, "qcom,chipid", &val);
+ if (ret) {
+- dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
++ dev_err(dev, "could not find chipid: %d\n", ret);
+ return ret;
+ }
+
+@@ -548,7 +552,7 @@ static int a3xx_probe(struct platform_device *pdev)
+ for_each_child_of_node(child, pwrlvl) {
+ ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+ if (ret) {
+- dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
++ dev_err(dev, "could not find gpu-freq: %d\n", ret);
+ return ret;
+ }
+ config.fast_rate = max(config.fast_rate, val);
+@@ -558,12 +562,12 @@ static int a3xx_probe(struct platform_device *pdev)
+ }
+
+ if (!config.fast_rate) {
+- dev_err(&pdev->dev, "could not find clk rates\n");
++ dev_err(dev, "could not find clk rates\n");
+ return -ENXIO;
+ }
+
+ #else
+- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
++ struct kgsl_device_platform_data *pdata = dev->platform_data;
+ uint32_t version = socinfo_get_version();
+ if (cpu_is_apq8064ab()) {
+ config.fast_rate = 450000000;
+@@ -609,14 +613,30 @@ static int a3xx_probe(struct platform_device *pdev)
+ config.bus_scale_table = pdata->bus_scale_table;
+ # endif
+ #endif
+- pdev->dev.platform_data = &config;
+- a3xx_pdev = pdev;
++ dev->platform_data = &config;
++ set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+ }
+
++static void a3xx_unbind(struct device *dev, struct device *master,
++ void *data)
++{
++ set_gpu_pdev(dev_get_drvdata(master), NULL);
++}
++
++static const struct component_ops a3xx_ops = {
++ .bind = a3xx_bind,
++ .unbind = a3xx_unbind,
++};
++
++static int a3xx_probe(struct platform_device *pdev)
++{
++ return component_add(&pdev->dev, &a3xx_ops);
++}
++
+ static int a3xx_remove(struct platform_device *pdev)
+ {
+- a3xx_pdev = NULL;
++ component_del(&pdev->dev, &a3xx_ops);
+ return 0;
+ }
+
+@@ -624,7 +644,6 @@ static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+ };
+-MODULE_DEVICE_TABLE(of, dt_match);
+
+ static struct platform_driver a3xx_driver = {
+ .probe = a3xx_probe,
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index 6f1588a..8a04a1d 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -17,8 +17,6 @@
+
+ #include "hdmi.h"
+
+-static struct platform_device *hdmi_pdev;
+-
+ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+ {
+ uint32_t ctrl = 0;
+@@ -75,7 +73,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+ {
+ struct hdmi *hdmi = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+- struct platform_device *pdev = hdmi_pdev;
++ struct platform_device *pdev = priv->hdmi_pdev;
+ struct hdmi_platform_config *config;
+ int i, ret;
+
+@@ -95,8 +93,6 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+
+ kref_init(&hdmi->refcount);
+
+- get_device(&pdev->dev);
+-
+ hdmi->dev = dev;
+ hdmi->pdev = pdev;
+ hdmi->config = config;
+@@ -249,17 +245,24 @@ fail:
+
+ #include <linux/of_gpio.h>
+
+-static int hdmi_dev_probe(struct platform_device *pdev)
++static void set_hdmi_pdev(struct drm_device *dev,
++ struct platform_device *pdev)
++{
++ struct msm_drm_private *priv = dev->dev_private;
++ priv->hdmi_pdev = pdev;
++}
++
++static int hdmi_bind(struct device *dev, struct device *master, void *data)
+ {
+ static struct hdmi_platform_config config = {};
+ #ifdef CONFIG_OF
+- struct device_node *of_node = pdev->dev.of_node;
++ struct device_node *of_node = dev->of_node;
+
+ int get_gpio(const char *name)
+ {
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+- dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
++ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+@@ -336,14 +339,30 @@ static int hdmi_dev_probe(struct platform_device *pdev)
+ config.mux_sel_gpio = -1;
+ }
+ #endif
+- pdev->dev.platform_data = &config;
+- hdmi_pdev = pdev;
++ dev->platform_data = &config;
++ set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+ }
+
++static void hdmi_unbind(struct device *dev, struct device *master,
++ void *data)
++{
++ set_hdmi_pdev(dev_get_drvdata(master), NULL);
++}
++
++static const struct component_ops hdmi_ops = {
++ .bind = hdmi_bind,
++ .unbind = hdmi_unbind,
++};
++
++static int hdmi_dev_probe(struct platform_device *pdev)
++{
++ return component_add(&pdev->dev, &hdmi_ops);
++}
++
+ static int hdmi_dev_remove(struct platform_device *pdev)
+ {
+- hdmi_pdev = NULL;
++ component_del(&pdev->dev, &hdmi_ops);
+ return 0;
+ }
+
+@@ -351,7 +370,6 @@ static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx" },
+ {}
+ };
+-MODULE_DEVICE_TABLE(of, dt_match);
+
+ static struct platform_driver hdmi_driver = {
+ .probe = hdmi_dev_probe,
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index e6adafc..e79cfd0 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -56,6 +56,10 @@ static char *vram;
+ MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+ module_param(vram, charp, 0);
+
++/*
++ * Util/helpers:
++ */
++
+ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+ {
+@@ -143,6 +147,8 @@ static int msm_unload(struct drm_device *dev)
+ priv->vram.paddr, &attrs);
+ }
+
++ component_unbind_all(dev->dev, dev);
++
+ dev->dev_private = NULL;
+
+ kfree(priv);
+@@ -175,6 +181,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ struct msm_kms *kms;
+ int ret;
+
++
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+@@ -226,6 +233,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
++ platform_set_drvdata(pdev, dev);
++
++ /* Bind all our sub-components: */
++ ret = component_bind_all(dev->dev, dev);
++ if (ret)
++ return ret;
++
+ switch (get_mdp_ver(pdev)) {
+ case 4:
+ kms = mdp4_kms_init(dev);
+@@ -281,8 +295,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ goto fail;
+ }
+
+- platform_set_drvdata(pdev, dev);
+-
+ #ifdef CONFIG_DRM_MSM_FBDEV
+ priv->fbdev = msm_fbdev_init(dev);
+ #endif
+@@ -819,18 +831,110 @@ static const struct dev_pm_ops msm_pm_ops = {
+ };
+
+ /*
++ * Componentized driver support:
++ */
++
++#ifdef CONFIG_OF
++/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
++ * (or probably any other).. so probably some room for some helpers
++ */
++static int compare_of(struct device *dev, void *data)
++{
++ return dev->of_node == data;
++}
++
++static int msm_drm_add_components(struct device *master, struct master *m)
++{
++ struct device_node *np = master->of_node;
++ unsigned i;
++ int ret;
++
++ for (i = 0; ; i++) {
++ struct device_node *node;
++
++ node = of_parse_phandle(np, "connectors", i);
++ if (!node)
++ break;
++
++ ret = component_master_add_child(m, compare_of, node);
++ of_node_put(node);
++
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++#else
++static int compare_dev(struct device *dev, void *data)
++{
++ return dev == data;
++}
++
++static int msm_drm_add_components(struct device *master, struct master *m)
++{
++ /* For non-DT case, it kinda sucks. We don't actually have a way
++ * to know whether or not we are waiting for certain devices (or if
++ * they are simply not present). But for non-DT we only need to
++ * care about apq8064/apq8060/etc (all mdp4/a3xx):
++ */
++ static const char *devnames[] = {
++ "hdmi_msm.0", "kgsl-3d0.0",
++ };
++ int i;
++
++ DBG("Adding components..");
++
++ for (i = 0; i < ARRAY_SIZE(devnames); i++) {
++ struct device *dev;
++ int ret;
++
++ dev = bus_find_device_by_name(&platform_bus_type,
++ NULL, devnames[i]);
++ if (!dev) {
++ dev_info(master, "still waiting for %s\n", devnames[i]);
++ return -EPROBE_DEFER;
++ }
++
++ ret = component_master_add_child(m, compare_dev, dev);
++ if (ret) {
++ DBG("could not add child: %d", ret);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++#endif
++
++static int msm_drm_bind(struct device *dev)
++{
++ return drm_platform_init(&msm_driver, to_platform_device(dev));
++}
++
++static void msm_drm_unbind(struct device *dev)
++{
++ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
++}
++
++static const struct component_master_ops msm_drm_ops = {
++ .add_components = msm_drm_add_components,
++ .bind = msm_drm_bind,
++ .unbind = msm_drm_unbind,
++};
++
++/*
+ * Platform driver:
+ */
+
+ static int msm_pdev_probe(struct platform_device *pdev)
+ {
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+- return drm_platform_init(&msm_driver, pdev);
++ return component_master_add(&pdev->dev, &msm_drm_ops);
+ }
+
+ static int msm_pdev_remove(struct platform_device *pdev)
+ {
+- drm_put_dev(platform_get_drvdata(pdev));
++ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 3d63269..9d10ee0 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -22,6 +22,7 @@
+ #include <linux/clk.h>
+ #include <linux/cpufreq.h>
+ #include <linux/module.h>
++#include <linux/component.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -69,6 +70,9 @@ struct msm_drm_private {
+
+ struct msm_kms *kms;
+
++ /* subordinate devices, if present: */
++ struct platform_device *hdmi_pdev, *gpu_pdev;
++
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+ struct msm_file_private *lastctx;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 663394f..0db3e20 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index e99e71a..356f22f 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -134,7 +134,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -150,7 +150,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ if (userdatalen)
+@@ -194,6 +194,9 @@ error1:
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
++error_gpadl:
++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index 5fb80b8..43fe15a 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -217,6 +217,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ adap->bus_recovery_info->set_scl(adap, 1);
+ return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -231,6 +232,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -240,6 +242,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+
+ static int i2c_device_probe(struct device *dev)
+ {
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 055ebeb..c1fef27 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -94,12 +94,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
++ if (!size)
++ return ERR_PTR(-EINVAL);
++
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+- if ((PAGE_ALIGN(addr + size) <= size) ||
+- (PAGE_ALIGN(addr + size) <= addr))
++ if (((addr + size) < addr) ||
++ PAGE_ALIGN(addr + size) < (addr + size))
+ return ERR_PTR(-EINVAL);
+
+ if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index d8f4d1f..8d7cd98 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2274,8 +2274,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+- wr->wr.ud.hlen);
++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ *lso_seg_len = halign;
+ return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0b75b57..cfc5a2e 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -814,6 +814,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++ unsigned int rate)
++{
++ struct elantech_data *etd = psmouse->private;
++
++ etd->original_set_rate(psmouse, rate);
++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++ psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+ * Put the touchpad into absolute mode
+ */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1015,6 +1030,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
++ * Asus X750JN 0x381f17 10, 14, 0e clickpad
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+@@ -1490,6 +1507,11 @@ int elantech_init(struct psmouse *psmouse)
+ goto init_fail;
+ }
+
++ if (etd->fw_version == 0x381f17) {
++ etd->original_set_rate = psmouse->set_rate;
++ psmouse->set_rate = elantech_set_rate_restore_reg_07;
++ }
++
+ if (elantech_set_input_params(psmouse)) {
+ psmouse_err(psmouse, "failed to query touchpad range.\n");
+ goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 9e0e2a1..59263a3 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -139,6 +139,7 @@ struct elantech_data {
+ struct finger_pos mt[ETP_MAX_FINGERS];
+ unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 4a8d19d..5a4cda2 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -915,11 +915,10 @@ static int crypt_convert(struct crypt_config *cc,
+
+ switch (r) {
+ /* async */
++ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+- /* fall through*/
+- case -EINPROGRESS:
+ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+@@ -1314,10 +1313,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ struct crypt_config *cc = io->cc;
+
+- if (error == -EINPROGRESS) {
+- complete(&ctx->restart);
++ if (error == -EINPROGRESS)
+ return;
+- }
+
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1328,12 +1325,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+
+ if (!atomic_dec_and_test(&ctx->cc_pending))
+- return;
++ goto done;
+
+ if (bio_data_dir(io->base_bio) == READ)
+ kcryptd_crypt_read_done(io);
+ else
+ kcryptd_crypt_write_io_submit(io, 1);
++done:
++ if (!completion_done(&ctx->restart))
++ complete(&ctx->restart);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 407a99e..683e685 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -320,7 +320,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
+
+ /*
+ * remaps the bio to the target device. we separate two flows.
+- * power 2 flow and a general flow for the sake of perfromance
++ * power 2 flow and a general flow for the sake of performance
+ */
+ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+@@ -538,6 +538,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ sector = bio->bi_iter.bi_sector;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index c45c988..4572530 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ if (mutex_lock_interruptible(&dev->v4l_lock))
+ return -ERESTARTSYS;
+
++ /*
++ * Once URBs are cancelled, the URB complete handler
++ * won't be running. This is required to safely release the
++ * current buffer (dev->isoc_ctl.buf).
++ */
+ stk1160_cancel_isoc(dev);
+
+ /*
+@@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
+- /* It's important to clear current buffer */
+- dev->isoc_ctl.buf = NULL;
++
++ /* It's important to release the current buffer */
++ if (dev->isoc_ctl.buf) {
++ buf = dev->isoc_ctl.buf;
++ dev->isoc_ctl.buf = NULL;
++
++ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++ stk1160_info("buffer [%p/%d] aborted\n",
++ buf, buf->vb.v4l2_buf.index);
++ }
+ spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index fc145d2..922a750 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+- for (cnt = 0; cnt < msb->current_seg; cnt++)
++ for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
++ }
+ }
+ } else
+ t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index 6f27d9a..21841fe 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+- bitflips = !!err;
++ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 8ca49f2..4cbbd55 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -451,7 +451,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 0e11671d..930cf2c 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+- vol->eba_tbl[aeb->lnum] = aeb->pnum;
++ else
++ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 68b924e..c6b0b07 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -995,7 +995,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int cancel)
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+- int vol_id = -1, uninitialized_var(lnum);
++ int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 46e6544..b655fe4 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++ struct e1000_rx_ring *rx_ring,
++ int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+@@ -3531,8 +3536,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+- if (netif_running(netdev))
++ if (netif_running(netdev)) {
++ /* prevent buffers from being reallocated */
++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ e1000_down(adapter);
++ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index f583167..66c92a1 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -314,6 +314,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -370,6 +371,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index 7f1669c..779dc2b 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index f7381dd..1bce432 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+
+ #include "wlcore.h"
+
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+- loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++ loff_t *ppos, char *fmt, ...);
+
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 7297df2..2d9d198 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1037,7 +1037,9 @@ static int compal_probe(struct platform_device *pdev)
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+- power_supply_register(&compal_device->dev, &data->psy);
++ err = power_supply_register(&compal_device->dev, &data->psy);
++ if (err < 0)
++ goto remove;
+
+ platform_set_drvdata(pdev, data);
+
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index ed49b50..72da2a6 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ pchg->battery.get_property = lp8788_battery_get_property;
+
+- if (power_supply_register(&pdev->dev, &pchg->battery))
++ if (power_supply_register(&pdev->dev, &pchg->battery)) {
++ power_supply_unregister(&pchg->charger);
+ return -EPERM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a..cf90760 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++ int ret = 0;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ if (ret < 0)
++ kfree(twl4030_madc_bat);
+
+- return 0;
++ return ret;
+ }
+
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
+index 65180e1..50c75e1 100644
+--- a/drivers/scsi/bfa/bfa_ioc.c
++++ b/drivers/scsi/bfa/bfa_ioc.c
+@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+- udelay(10000);
++ mdelay(10);
+ }
+ return BFA_STATUS_OK;
+ }
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 6c1f223..4c0b8b4 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+ {
+- struct sas_ha_struct *sha = mvi->sas;
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+- struct sas_phy *sphy = dev->phy;
+- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
++ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 86b0515..97892f2 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -739,21 +739,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ sg_kunmap_atomic(bounce_addr);
++ bounce_addr = 0;
+ j++;
++ }
+
+- /* if we need to use another bounce buffer */
+- if (srclen || i != orig_sgl_count - 1)
+- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++ /* if we need to use another bounce buffer */
++ if (srclen && bounce_addr == 0)
++ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+
+- } else if (srclen == 0 && i == orig_sgl_count - 1) {
+- /* unmap the last bounce that is < PAGE_SIZE */
+- sg_kunmap_atomic(bounce_addr);
+- }
+ }
+
+ sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ }
+
++ if (bounce_addr)
++ sg_kunmap_atomic(bounce_addr);
++
+ local_irq_restore(flags);
+
+ return total_copied;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index d7c6e36..2fe5b61 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev,
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+- if (total > bufsiz) {
++ /* Check total length of transfers. Also check each
++ * transfer length to avoid arithmetic overflow.
++ */
++ if (total > bufsiz || k_tmp->len > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 41eff7d..b199f1e 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -263,40 +263,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+- struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+- u32 prot_size, len, size;
+- int rc, ret = 1, i;
++ u32 prot_size;
++ int rc, ret = 1;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+- fd_prot->prot_buf = vzalloc(prot_size);
++ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+- fd_prot->prot_sg_nents = cmd->t_prot_nents;
+- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+- fd_prot->prot_sg_nents, GFP_KERNEL);
++ fd_prot->prot_sg_nents = 1;
++ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
++ GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+- size = prot_size;
+-
+- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+-
+- len = min_t(u32, PAGE_SIZE, size);
+- sg_set_buf(sg, buf, len);
+- size -= len;
+- buf += len;
+- }
++ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
++ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
+ }
+
+ if (is_write) {
+@@ -317,7 +309,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+@@ -652,11 +644,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ }
+ } else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+@@ -672,7 +664,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+@@ -703,7 +695,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 68511e8..f89b24a 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -314,7 +314,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ return 0;
+ }
+
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+@@ -378,7 +378,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ cmd->data_direction);
+ }
+
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+
+@@ -401,7 +401,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ return TCM_NO_SENSE;
+ }
+
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+@@ -416,11 +416,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+- * which will not have taken ->caw_mutex yet..
++ * which will not have taken ->caw_sem yet..
+ */
+- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ return TCM_NO_SENSE;
+ /*
++ * Handle special case for zero-length COMPARE_AND_WRITE
++ */
++ if (!cmd->data_length)
++ goto out;
++ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9e54c0f..6fc3890 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1600,11 +1600,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+- * callback is expected to drop the per device ->caw_mutex.
++ * callback is expected to drop the per device ->caw_sem.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+- cmd->transport_complete_callback(cmd);
++ cmd->transport_complete_callback(cmd, false);
+
+ switch (sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+@@ -1941,8 +1941,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+- rc = cmd->transport_complete_callback(cmd);
++ rc = cmd->transport_complete_callback(cmd, true);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ !cmd->data_length)
++ goto queue_rsp;
++
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+@@ -1956,6 +1960,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ }
+ }
+
++queue_rsp:
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2044,6 +2049,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++ /*
++ * Release special case READ buffer payload required for
++ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++ */
++ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++ transport_free_sgl(cmd->t_bidi_data_sg,
++ cmd->t_bidi_data_nents);
++ cmd->t_bidi_data_sg = NULL;
++ cmd->t_bidi_data_nents = 0;
++ }
+ transport_reset_sgl_orig(cmd);
+ return;
+ }
+@@ -2192,6 +2207,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ int ret = 0;
++ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+@@ -2200,7 +2216,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2223,6 +2238,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ cmd->data_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ cmd->data_length) {
++ /*
++ * Special case for COMPARE_AND_WRITE with fabrics
++ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++ */
++ u32 caw_length = cmd->t_task_nolb *
++ cmd->se_dev->dev_attrib.block_size;
++
++ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++ &cmd->t_bidi_data_nents,
++ caw_length, zero_flag);
++ if (ret < 0)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * If this command is not a write we can execute it right here,
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index a051a7a..a81f9dd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ dev_dbg(&desc->intf->dev,
+ "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+- dr->wIndex, dr->wLength);
++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ break;
+
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
+ clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ dev_err(&desc->intf->dev,
+ "unknown notification %d received: index %d len %d\n",
+- dr->bNotificationType, dr->wIndex, dr->wLength);
++ dr->bNotificationType,
++ le16_to_cpu(dr->wIndex),
++ le16_to_cpu(dr->wLength));
+ goto exit;
+ }
+
+@@ -408,7 +410,7 @@ static ssize_t wdm_write
+ USB_RECIP_INTERFACE);
+ req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ req->wValue = 0;
+- req->wIndex = desc->inum;
++ req->wIndex = desc->inum; /* already converted */
+ req->wLength = cpu_to_le16(count);
+ set_bit(WDM_IN_USE, &desc->flags);
+ desc->outbuf = buf;
+@@ -422,7 +424,7 @@ static ssize_t wdm_write
+ rv = usb_translate_errors(rv);
+ } else {
+ dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+- req->wIndex);
++ le16_to_cpu(req->wIndex));
+ }
+ out:
+ usb_autopm_put_interface(desc->intf);
+@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ desc->irq->wValue = 0;
+- desc->irq->wIndex = desc->inum;
++ desc->irq->wIndex = desc->inum; /* already converted */
+ desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+
+ usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d2bd9d7..1847a7d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3289,10 +3289,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
+ port1, status);
+ } else {
+- /* drive resume for at least 20 msec */
++ /* drive resume for USB_RESUME_TIMEOUT msec */
+ dev_dbg(&udev->dev, "usb %sresume\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+- msleep(25);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Virtual root hubs can trigger on GET_PORT_STATUS to
+ * stop resume signaling. Then finish the resume
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 4d918ed..0f99800 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1501,7 +1501,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ writel(0, hsotg->regs + PCGCTL);
+- usleep_range(20000, 40000);
++ msleep(USB_RESUME_TIMEOUT);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index d742bed..82df926 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
+ usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
++ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
+
+ /*
+ * The Superspeed USB Capability descriptor shall be implemented by all
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 98a89d1..8aa4ba0 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index ba94990..3e3926a 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+- /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 240e792..b62298f 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ spin_unlock_irq(&isp116x->lock);
+
+ hcd->state = HC_STATE_RESUMING;
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Go operational */
+ spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index 110b4b9..f130bb2 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+- msleep(50);
++ msleep(USB_RESUME_TIMEOUT);
+ r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ }
+
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index a517151..0f53cc8 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1259,7 +1259,7 @@ sl811h_hub_control(
+ sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+ mod_timer(&sl811->timer, jiffies
+- + msecs_to_jiffies(20));
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case USB_PORT_FEAT_POWER:
+ port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 93e17b1..98c66d8 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->ports_timeout = jiffies +
+- msecs_to_jiffies(25);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
+
+@@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ uhci_finish_suspend(uhci, port, port_addr);
+
+ /* USB v2.0 7.1.7.5 */
+- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++ uhci->ports_timeout = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index a95eee8..05185b9 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1768,7 +1768,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ } else {
+ xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ bus_state->resume_done[faked_port_index] = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(faked_port_index, &bus_state->resuming_ports);
+ mod_timer(&hcd->rh_timer,
+ bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 0180eef..964ebaf 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+- return res == match_data;
++ struct usb_phy **phy = res;
++
++ return *phy == match_data;
+ }
+
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index f4d7b2f..78f4608 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -751,6 +751,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ int elf_prot = 0, elf_flags;
+ unsigned long k, vaddr;
++ unsigned long total_size = 0;
+
+ if (elf_ppnt->p_type != PT_LOAD)
+ continue;
+@@ -815,10 +816,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++ total_size = total_mapping_size(elf_phdata,
++ loc->elf_ex.e_phnum);
++ if (!total_size) {
++ error = -EINVAL;
++ goto out_free_dentry;
++ }
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+- elf_prot, elf_flags, 0);
++ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(error)) {
+ send_sig(SIGKILL, current, 0);
+ retval = IS_ERR((void *)error) ?
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d2f1c01..794d7c6 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6645,12 +6645,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ return -ENOSPC;
+ }
+
+- if (btrfs_test_opt(root, DISCARD))
+- ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ if (pin)
+ pin_down_extent(root, cache, start, len, 1);
+ else {
++ if (btrfs_test_opt(root, DISCARD))
++ ret = btrfs_discard_extent(root, start, len, NULL);
+ btrfs_add_free_space(cache, start, len);
+ btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 0b72006..3e16042 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2708,6 +2708,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ if (src == dst)
+ return -EINVAL;
+
++ if (len == 0)
++ return 0;
++
+ btrfs_double_lock(src, loff, dst, dst_loff, len);
+
+ ret = extent_same_check_offsets(src, loff, len);
+@@ -3226,6 +3229,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ if (off + len == src->i_size)
+ len = ALIGN(src->i_size, bs) - off;
+
++ if (len == 0) {
++ ret = 0;
++ goto out_unlock;
++ }
++
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index ad8328d..488e987 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -324,22 +324,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+ * Check if the attribute is in a supported namespace.
+ *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+ * namespace.
+ */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+- return !strncmp(name, XATTR_SECURITY_PREFIX,
+- XATTR_SECURITY_PREFIX_LEN) ||
+- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
++ int len = strlen(name);
++ int prefixlen = 0;
++
++ if (!strncmp(name, XATTR_SECURITY_PREFIX,
++ XATTR_SECURITY_PREFIX_LEN))
++ prefixlen = XATTR_SECURITY_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++ prefixlen = XATTR_USER_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
++ prefixlen = XATTR_BTRFS_PREFIX_LEN;
++ else
++ return -EOPNOTSUPP;
++
++ /*
++ * The name cannot consist of just prefix
++ */
++ if (len <= prefixlen)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+ {
++ int ret;
++
+ /*
+ * If this is a request for a synthetic attribute in the system.*
+ * namespace use the generic infrastructure to resolve a handler
+@@ -348,8 +368,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_getxattr(dentry, name, buffer, size);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+ return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+
+@@ -357,6 +378,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -373,8 +395,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_setxattr(dentry, name, value, size, flags);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+@@ -390,6 +413,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -406,8 +430,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_removexattr(dentry, name);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+diff --git a/fs/exec.c b/fs/exec.c
+index ea4449d..05f1942 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1268,6 +1268,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ spin_unlock(&p->fs->lock);
+ }
+
++static void bprm_fill_uid(struct linux_binprm *bprm)
++{
++ struct inode *inode;
++ unsigned int mode;
++ kuid_t uid;
++ kgid_t gid;
++
++ /* clear any previous set[ug]id data from a previous binary */
++ bprm->cred->euid = current_euid();
++ bprm->cred->egid = current_egid();
++
++ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++ return;
++
++ if (current->no_new_privs)
++ return;
++
++ inode = file_inode(bprm->file);
++ mode = ACCESS_ONCE(inode->i_mode);
++ if (!(mode & (S_ISUID|S_ISGID)))
++ return;
++
++ /* Be careful if suid/sgid is set */
++ mutex_lock(&inode->i_mutex);
++
++ /* reload atomically mode/uid/gid now that lock held */
++ mode = inode->i_mode;
++ uid = inode->i_uid;
++ gid = inode->i_gid;
++ mutex_unlock(&inode->i_mutex);
++
++ /* We ignore suid/sgid if there are no mappings for them in the ns */
++ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
++ !kgid_has_mapping(bprm->cred->user_ns, gid))
++ return;
++
++ if (mode & S_ISUID) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->euid = uid;
++ }
++
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->egid = gid;
++ }
++}
++
+ /*
+ * Fill the binprm structure from the inode.
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+@@ -1276,36 +1323,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ */
+ int prepare_binprm(struct linux_binprm *bprm)
+ {
+- struct inode *inode = file_inode(bprm->file);
+- umode_t mode = inode->i_mode;
+ int retval;
+
+-
+- /* clear any previous set[ug]id data from a previous binary */
+- bprm->cred->euid = current_euid();
+- bprm->cred->egid = current_egid();
+-
+- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+- !current->no_new_privs &&
+- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+- /* Set-uid? */
+- if (mode & S_ISUID) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->euid = inode->i_uid;
+- }
+-
+- /* Set-gid? */
+- /*
+- * If setgid is set but no group execute bit then this
+- * is a candidate for mandatory locking, not a setgid
+- * executable.
+- */
+- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->egid = inode->i_gid;
+- }
+- }
++ bprm_fill_uid(bprm);
+
+ /* fill in binprm security blob */
+ retval = security_bprm_set_creds(bprm);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 2dcbfb6..bc7e37b 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1869,7 +1869,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- struct buffer_head *bh;
++ struct buffer_head *bh = NULL;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+ struct super_block *sb;
+@@ -1893,14 +1893,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return retval;
+ if (retval == 1) {
+ retval = 0;
+- return retval;
++ goto out;
+ }
+ }
+
+ if (is_dx(dir)) {
+ retval = ext4_dx_add_entry(handle, dentry, inode);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+- return retval;
++ goto out;
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ dx_fallback++;
+ ext4_mark_inode_dirty(handle, dir);
+@@ -1912,14 +1912,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return PTR_ERR(bh);
+
+ retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+- if (retval != -ENOSPC) {
+- brelse(bh);
+- return retval;
+- }
++ if (retval != -ENOSPC)
++ goto out;
+
+ if (blocks == 1 && !dx_fallback &&
+- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+- return make_indexed_dir(handle, dentry, inode, bh);
++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++ retval = make_indexed_dir(handle, dentry, inode, bh);
++ bh = NULL; /* make_indexed_dir releases bh */
++ goto out;
++ }
+ brelse(bh);
+ }
+ bh = ext4_append(handle, dir, &block);
+@@ -1935,6 +1936,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ }
+
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ brelse(bh);
+ if (retval == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/namei.c b/fs/namei.c
+index 0dd72c8..ccb8000 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1545,7 +1545,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+
+ if (should_follow_link(path->dentry, follow)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ err = -ECHILD;
+ goto out_err;
+ }
+@@ -2992,7 +2993,8 @@ finish_lookup:
+
+ if (should_follow_link(path->dentry, !symlink_ok)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ error = -ECHILD;
+ goto out;
+ }
+diff --git a/fs/open.c b/fs/open.c
+index 2ed7325..17679f2 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -539,6 +539,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+
++retry_deleg:
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ if (!uid_valid(uid))
+@@ -555,7 +556,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+-retry_deleg:
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chown(path, uid, gid);
+ if (!error)
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 68a3ada..8fc12f8 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -198,9 +198,29 @@ typedef int INT32;
+ typedef s32 acpi_native_int;
+
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+
++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX ACPI_UINT32_MAX
+
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index b402eb6..579912c 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
+index f1a24b5..b58fd66 100644
+--- a/include/asm-generic/sections.h
++++ b/include/asm-generic/sections.h
+@@ -3,6 +3,8 @@
+
+ /* References to section boundaries */
+
++#include <linux/compiler.h>
++
+ /*
+ * Usage guidelines:
+ * _text, _data: architecture specific, don't use them in arch-independent code
+@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
+ /* Start and end of .ctors section - used for constructor calls. */
+ extern char __ctors_start[], __ctors_end[];
+
++extern __visible const void __nosave_begin, __nosave_end;
++
+ /* function descriptor handling (if any). Override
+ * in asm/sections.h */
+ #ifndef dereference_function_descriptor
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ad8f859..ab31337 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -661,6 +661,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+
+ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ gfp_t priority)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7f6eb85..49466be 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES 32
+ #define USB_MAXIADS (USB_MAXINTERFACES/2)
+
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT 40 /* ms */
++
+ /**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 34932540..e4b9e01 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -513,7 +513,7 @@ struct se_cmd {
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1f4bcb3..be9760f 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ unsigned long data)
+ {
++ bool need_siglock;
++
+ if (!valid_signal(data))
+ return -EIO;
+
+@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ user_disable_single_step(child);
+ }
+
++ /*
++ * Change ->exit_code and ->state under siglock to avoid the race
++ * with wait_task_stopped() in between; a non-zero ->exit_code will
++ * wrongly look like another report from tracee.
++ *
++ * Note that we need siglock even if ->exit_code == data and/or this
++ * status was not reported yet, the new status must not be cleared by
++ * wait_task_stopped() after resume.
++ *
++ * If data == 0 we do not care if wait_task_stopped() reports the old
++ * status and clears the code too; this can't race with the tracee, it
++ * takes siglock after resume.
++ */
++ need_siglock = data && !thread_group_empty(current);
++ if (need_siglock)
++ spin_lock_irq(&child->sighand->siglock);
+ child->exit_code = data;
+ wake_up_state(child, __TASK_TRACED);
++ if (need_siglock)
++ spin_unlock_irq(&child->sighand->siglock);
+
+ return 0;
+ }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 490fcbb..93be750 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -657,9 +657,13 @@ static void run_ksoftirqd(unsigned int cpu)
+ * in the task stack here.
+ */
+ __do_softirq();
+- rcu_note_context_switch(cpu);
+ local_irq_enable();
+ cond_resched();
++
++ preempt_disable();
++ rcu_note_context_switch(cpu);
++ preempt_enable();
++
+ return;
+ }
+ local_irq_enable();
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 774a080..da41de9 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2651,7 +2651,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+
+ static __always_inline int trace_recursive_lock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+ int bit;
+
+ if (in_interrupt()) {
+@@ -2668,18 +2668,17 @@ static __always_inline int trace_recursive_lock(void)
+ return 1;
+
+ val |= (1 << bit);
+- this_cpu_write(current_context, val);
++ __this_cpu_write(current_context, val);
+
+ return 0;
+ }
+
+ static __always_inline void trace_recursive_unlock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+
+- val--;
+- val &= this_cpu_read(current_context);
+- this_cpu_write(current_context, val);
++ val &= val & (val - 1);
++ __this_cpu_write(current_context, val);
+ }
+
+ #else
+diff --git a/lib/string.c b/lib/string.c
+index 43d0781..cb9ea21 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -598,7 +598,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ memset(s, 0, count);
+- OPTIMIZER_HIDE_VAR(s);
++ barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e2b1bba..69ec61a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -278,13 +278,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+ * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ * or vmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+@@ -295,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb);
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+@@ -309,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+- skb->head_frag = frag_size != 0;
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+@@ -326,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++ struct sk_buff *skb = __build_skb(data, frag_size);
++
++ if (skb && frag_size) {
++ skb->head_frag = 1;
++ if (virt_to_head_page(data)->pfmemalloc)
++ skb->pfmemalloc = 1;
++ }
++ return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+
+ struct netdev_alloc_cache {
+@@ -352,7 +369,8 @@ refill:
+ gfp_t gfp = gfp_mask;
+
+ if (order)
+- gfp |= __GFP_COMP | __GFP_NOWARN;
++ gfp |= __GFP_COMP | __GFP_NOWARN |
++ __GFP_NOMEMALLOC;
+ nc->frag.page = alloc_pages(gfp, order);
+ if (likely(nc->frag.page))
+ break;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index ecb34b5..57075c4 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -127,6 +127,9 @@ int ip_forward(struct sk_buff *skb)
+ struct rtable *rt; /* Route we use */
+ struct ip_options *opt = &(IPCB(skb)->opt);
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8c70c73..a68cd71 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2595,39 +2595,65 @@ begin_fwd:
+ }
+ }
+
+-/* Send a fin. The caller locks the socket for us. This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++ int amt, status;
++
++ if (size <= sk->sk_forward_alloc)
++ return;
++ amt = sk_mem_pages(size);
++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++ sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+ */
+ void tcp_send_fin(struct sock *sk)
+ {
++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_write_queue_tail(sk);
+- int mss_now;
+
+- /* Optimization, tack on the FIN if we have a queue of
+- * unsent frames. But be careful about outgoing SACKS
+- * and IP options.
++ /* Optimization, tack on the FIN if we have one skb in write queue and
++ * this skb was not yet sent, or we are under memory pressure.
++ * Note: in the latter case, FIN packet will be sent after a timeout,
++ * as TCP stack thinks it has already been transmitted.
+ */
+- mss_now = tcp_current_mss(sk);
+-
+- if (tcp_send_head(sk) != NULL) {
+- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+- TCP_SKB_CB(skb)->end_seq++;
++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(tskb)->end_seq++;
+ tp->write_seq++;
++ if (!tcp_send_head(sk)) {
++ /* This means tskb was already sent.
++ * Pretend we included the FIN on previous transmit.
++ * We need to set tp->snd_nxt to the value it would have
++ * if FIN had been sent. This is because retransmit path
++ * does not change tp->snd_nxt.
++ */
++ tp->snd_nxt++;
++ return;
++ }
+ } else {
+- /* Socket is locked, keep trying until memory is available. */
+- for (;;) {
+- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+- if (skb)
+- break;
+- yield();
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ if (unlikely(!skb)) {
++ if (tskb)
++ goto coalesce;
++ return;
+ }
++ skb_reserve(skb, MAX_TCP_HEADER);
++ sk_forced_wmem_schedule(sk, skb->truesize);
+ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ tcp_init_nondata_skb(skb, tp->write_seq,
+ TCPHDR_ACK | TCPHDR_FIN);
+ tcp_queue_skb(sk, skb);
+ }
+- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1d52506..a0b0ea9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1624,13 +1624,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ if (data == NULL)
+ return NULL;
+
+- skb = build_skb(data, size);
++ skb = __build_skb(data, size);
+ if (skb == NULL)
+ vfree(data);
+- else {
+- skb->head_frag = 0;
++ else
+ skb->destructor = netlink_skb_destructor;
+- }
+
+ return skb;
+ }
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e..53745f4 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+ u32 value2;
+- unsigned long flags;
+ u32 rate;
+
+ if (emu->card_capabilities->emu_model) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x38, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x1) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x2a, &value);
+ snd_emu1010_fpga_read(emu, 0x2b, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ } else {
+ snd_iprintf(buffer, "ADAT Unlocked\n");
+ }
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x20, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x4) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x28, &value);
+ snd_emu1010_fpga_read(emu, 0x29, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ } else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+- unsigned long flags;
+ int i;
+ snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+
+ for(i = 0; i < 0x40; i+=1) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, i, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ }
+ }
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index 5e3bc3c..f40a7a4 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -384,18 +384,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int davinci_evm_remove(struct platform_device *pdev)
+-{
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+- snd_soc_unregister_card(card);
+-
+- return 0;
+-}
+-
+ static struct platform_driver davinci_evm_driver = {
+ .probe = davinci_evm_probe,
+- .remove = davinci_evm_remove,
+ .driver = {
+ .name = "davinci_evm",
+ .owner = THIS_MODULE,
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc6652..deb3569 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ *length = read_4(kbuf, data);
+- data += *length;
+ break;
+
+ case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a36..4039854 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC = $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT := $(PWD)
++BUILD_OUTPUT := $(CURDIR)
+ PREFIX := /usr
+ DESTDIR :=
+
++ifeq ("$(origin O)", "command line")
++ BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS += -Wall
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6611253..eed250e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1549,8 +1549,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ ghc->generation = slots->generation;
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ ghc->hva += offset;
+ } else {
+ /*
diff --git a/3.14.40/4420_grsecurity-3.1-3.14.40-201505042052.patch b/3.14.41/4420_grsecurity-3.1-3.14.41-201505091723.patch
index e1edd45..9842fc2 100644
--- a/3.14.40/4420_grsecurity-3.1-3.14.40-201505042052.patch
+++ b/3.14.41/4420_grsecurity-3.1-3.14.41-201505091723.patch
@@ -295,7 +295,7 @@ index 5d91ba1..ef1d374 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index 070e0eb..e3359b0 100644
+index 7a60d4a..74e3f6c 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -1701,14 +1701,14 @@ index 6ddbe44..b5e38b1a 100644
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index f4b46d3..abc9b2b 100644
+index 051b726..abc9b2b 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
@@ -26591,10 +26591,10 @@ index ca7f0d5..8996469 100644
CFI_ENDPROC
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 3fb8d95..254dc51 100644
+index 1a1ff42..ea87a2b 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
-@@ -36,7 +36,8 @@
+@@ -37,7 +37,8 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
@@ -26604,7 +26604,7 @@ index 3fb8d95..254dc51 100644
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
-@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
+@@ -93,7 +94,7 @@ void arch_task_cache_init(void)
task_xstate_cachep =
kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
@@ -26613,7 +26613,7 @@ index 3fb8d95..254dc51 100644
}
/*
-@@ -105,7 +106,7 @@ void exit_thread(void)
+@@ -106,7 +107,7 @@ void exit_thread(void)
unsigned long *bp = t->io_bitmap_ptr;
if (bp) {
@@ -26622,7 +26622,7 @@ index 3fb8d95..254dc51 100644
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
-@@ -125,6 +126,9 @@ void flush_thread(void)
+@@ -126,6 +127,9 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -26632,7 +26632,7 @@ index 3fb8d95..254dc51 100644
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
drop_init_fpu(tsk);
-@@ -271,7 +275,7 @@ static void __exit_idle(void)
+@@ -272,7 +276,7 @@ static void __exit_idle(void)
void exit_idle(void)
{
/* idle loop has pid 0 */
@@ -26641,7 +26641,7 @@ index 3fb8d95..254dc51 100644
return;
__exit_idle();
}
-@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
+@@ -328,7 +332,7 @@ bool xen_set_default_idle(void)
return ret;
}
#endif
@@ -26650,7 +26650,7 @@ index 3fb8d95..254dc51 100644
{
local_irq_disable();
/*
-@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
+@@ -506,16 +510,37 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
@@ -38386,7 +38386,7 @@ index 969c3c2..9b72956 100644
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
-index 45d0fa7..89244c9 100644
+index 12b39dc..c244bbd 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
@@ -42945,10 +42945,10 @@ index cedc6da..2c3da2a 100644
if (atomic_read(&uhid->report_done))
goto unlock;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index e99e71a..e4ae549 100644
+index 356f22f..7dfce95 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
-@@ -365,8 +365,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+@@ -368,8 +368,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
unsigned long flags;
int ret = 0;
@@ -48354,7 +48354,7 @@ index c11ecbc..13bb299 100644
#include "ftgmac100.h"
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
-index 8be5b40..081bc1b 100644
+index 8be5b40..081bc1b6 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -31,6 +31,8 @@
@@ -50454,7 +50454,7 @@ index c5e082f..d6307a0 100644
1, asus->debug.method_id,
&input, &output);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
-index 7297df2..b832a73 100644
+index 2d9d198..dbf6b11 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -767,7 +767,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
@@ -53288,7 +53288,7 @@ index 093b8cb..9f07935 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 9e54c0f..9c8d784 100644
+index 6fc3890..bf962ce 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1154,7 +1154,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -54966,7 +54966,7 @@ index ee6c556..001eb9e 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index d2bd9d7..1ddb53a 100644
+index 1847a7d..7fa9e59 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -59185,7 +59185,7 @@ index ca0ba15..0fa3257 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex)) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index f4d7b2f..97fd3fc 100644
+index 78f4608..0f8b54b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -34,6 +34,7 @@
@@ -59819,10 +59819,15 @@ index f4d7b2f..97fd3fc 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -815,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -816,12 +1253,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
+- total_size = total_mapping_size(elf_phdata,
+- loc->elf_ex.e_phnum);
+- if (!total_size) {
+- error = -EINVAL;
+- goto out_free_dentry;
+
+#ifdef CONFIG_PAX_RANDMMAP
+ /* PaX: randomize base address at the default exe base if requested */
@@ -59834,13 +59839,14 @@ index f4d7b2f..97fd3fc 100644
+#endif
+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
+ elf_flags |= MAP_FIXED;
-+ }
+ }
+#endif
+
++ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -847,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -854,9 +1300,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -59853,7 +59859,7 @@ index f4d7b2f..97fd3fc 100644
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
-@@ -888,17 +1339,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -895,17 +1341,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -59905,7 +59911,7 @@ index f4d7b2f..97fd3fc 100644
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1120,7 +1599,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+@@ -1127,7 +1601,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -59914,7 +59920,7 @@ index f4d7b2f..97fd3fc 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1158,7 +1637,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1165,7 +1639,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -59923,7 +59929,7 @@ index f4d7b2f..97fd3fc 100644
goto whole;
/*
-@@ -1365,9 +1844,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1372,9 +1846,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -59935,7 +59941,7 @@ index f4d7b2f..97fd3fc 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1376,7 +1855,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+@@ -1383,7 +1857,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -59944,7 +59950,7 @@ index f4d7b2f..97fd3fc 100644
set_fs(old_fs);
fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
}
-@@ -2000,14 +2479,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -2007,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
}
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -59961,7 +59967,7 @@ index f4d7b2f..97fd3fc 100644
return size;
}
-@@ -2098,7 +2577,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2105,7 +2579,7 @@ static int elf_core_dump(struct coredump_params *cprm)
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
@@ -59970,7 +59976,7 @@ index f4d7b2f..97fd3fc 100644
offset += elf_core_extra_data_size();
e_shoff = offset;
-@@ -2126,7 +2605,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2133,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
@@ -59979,7 +59985,7 @@ index f4d7b2f..97fd3fc 100644
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -2159,7 +2638,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2166,7 +2640,7 @@ static int elf_core_dump(struct coredump_params *cprm)
unsigned long addr;
unsigned long end;
@@ -59988,7 +59994,7 @@ index f4d7b2f..97fd3fc 100644
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
-@@ -2200,6 +2679,167 @@ out:
+@@ -2207,6 +2681,167 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -61606,7 +61612,7 @@ index e4141f2..d8263e8 100644
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
diff --git a/fs/exec.c b/fs/exec.c
-index ea4449d..747fc21 100644
+index 05f1942..747fc21 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -56,8 +56,20 @@
@@ -61939,7 +61945,7 @@ index ea4449d..747fc21 100644
if (likely(leader->exit_state))
break;
__set_current_state(TASK_KILLABLE);
-@@ -1261,13 +1344,60 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+@@ -1261,7 +1344,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
@@ -61948,98 +61954,7 @@ index ea4449d..747fc21 100644
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
- spin_unlock(&p->fs->lock);
- }
-
-+static void bprm_fill_uid(struct linux_binprm *bprm)
-+{
-+ struct inode *inode;
-+ unsigned int mode;
-+ kuid_t uid;
-+ kgid_t gid;
-+
-+ /* clear any previous set[ug]id data from a previous binary */
-+ bprm->cred->euid = current_euid();
-+ bprm->cred->egid = current_egid();
-+
-+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
-+ return;
-+
-+ if (current->no_new_privs)
-+ return;
-+
-+ inode = file_inode(bprm->file);
-+ mode = ACCESS_ONCE(inode->i_mode);
-+ if (!(mode & (S_ISUID|S_ISGID)))
-+ return;
-+
-+ /* Be careful if suid/sgid is set */
-+ mutex_lock(&inode->i_mutex);
-+
-+ /* reload atomically mode/uid/gid now that lock held */
-+ mode = inode->i_mode;
-+ uid = inode->i_uid;
-+ gid = inode->i_gid;
-+ mutex_unlock(&inode->i_mutex);
-+
-+ /* We ignore suid/sgid if there are no mappings for them in the ns */
-+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
-+ !kgid_has_mapping(bprm->cred->user_ns, gid))
-+ return;
-+
-+ if (mode & S_ISUID) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->euid = uid;
-+ }
-+
-+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-+ bprm->per_clear |= PER_CLEAR_ON_SETID;
-+ bprm->cred->egid = gid;
-+ }
-+}
-+
- /*
- * Fill the binprm structure from the inode.
- * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
-@@ -1276,36 +1406,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
- */
- int prepare_binprm(struct linux_binprm *bprm)
- {
-- struct inode *inode = file_inode(bprm->file);
-- umode_t mode = inode->i_mode;
- int retval;
-
--
-- /* clear any previous set[ug]id data from a previous binary */
-- bprm->cred->euid = current_euid();
-- bprm->cred->egid = current_egid();
--
-- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
-- !current->no_new_privs &&
-- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
-- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
-- /* Set-uid? */
-- if (mode & S_ISUID) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->euid = inode->i_uid;
-- }
--
-- /* Set-gid? */
-- /*
-- * If setgid is set but no group execute bit then this
-- * is a candidate for mandatory locking, not a setgid
-- * executable.
-- */
-- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
-- bprm->per_clear |= PER_CLEAR_ON_SETID;
-- bprm->cred->egid = inode->i_gid;
-- }
-- }
-+ bprm_fill_uid(bprm);
-
- /* fill in binprm security blob */
- retval = security_bprm_set_creds(bprm);
-@@ -1437,6 +1540,31 @@ static int exec_binprm(struct linux_binprm *bprm)
+@@ -1457,6 +1540,31 @@ static int exec_binprm(struct linux_binprm *bprm)
return ret;
}
@@ -62071,7 +61986,7 @@ index ea4449d..747fc21 100644
/*
* sys_execve() executes a new program.
*/
-@@ -1444,6 +1572,11 @@ static int do_execve_common(struct filename *filename,
+@@ -1464,6 +1572,11 @@ static int do_execve_common(struct filename *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp)
{
@@ -62083,7 +61998,7 @@ index ea4449d..747fc21 100644
struct linux_binprm *bprm;
struct file *file;
struct files_struct *displaced;
-@@ -1452,6 +1585,8 @@ static int do_execve_common(struct filename *filename,
+@@ -1472,6 +1585,8 @@ static int do_execve_common(struct filename *filename,
if (IS_ERR(filename))
return PTR_ERR(filename);
@@ -62092,7 +62007,7 @@ index ea4449d..747fc21 100644
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
-@@ -1489,11 +1624,21 @@ static int do_execve_common(struct filename *filename,
+@@ -1509,11 +1624,21 @@ static int do_execve_common(struct filename *filename,
if (IS_ERR(file))
goto out_unmark;
@@ -62114,7 +62029,7 @@ index ea4449d..747fc21 100644
retval = bprm_mm_init(bprm);
if (retval)
goto out_unmark;
-@@ -1510,24 +1655,70 @@ static int do_execve_common(struct filename *filename,
+@@ -1530,24 +1655,70 @@ static int do_execve_common(struct filename *filename,
if (retval < 0)
goto out;
@@ -62189,7 +62104,7 @@ index ea4449d..747fc21 100644
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
-@@ -1538,6 +1729,14 @@ static int do_execve_common(struct filename *filename,
+@@ -1558,6 +1729,14 @@ static int do_execve_common(struct filename *filename,
put_files_struct(displaced);
return retval;
@@ -62204,7 +62119,7 @@ index ea4449d..747fc21 100644
out:
if (bprm->mm) {
acct_arg_size(bprm, 0);
-@@ -1629,3 +1828,312 @@ asmlinkage long compat_sys_execve(const char __user * filename,
+@@ -1649,3 +1828,312 @@ asmlinkage long compat_sys_execve(const char __user * filename,
return compat_do_execve(getname(filename), argv, envp);
}
#endif
@@ -65034,7 +64949,7 @@ index b29e42f..5ea7fdf 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index 0dd72c8..07c6710 100644
+index ccb8000..ac58c5a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,34 @@ int generic_permission(struct inode *inode, int mask)
@@ -65150,7 +65065,7 @@ index 0dd72c8..07c6710 100644
return 0;
failed:
-@@ -1593,6 +1612,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1594,6 +1613,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -65159,7 +65074,7 @@ index 0dd72c8..07c6710 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1665,7 +1686,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1666,7 +1687,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -65168,7 +65083,7 @@ index 0dd72c8..07c6710 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1894,7 +1915,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+@@ -1895,7 +1916,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
}
nd->inode = nd->path.dentry->d_inode;
@@ -65184,7 +65099,7 @@ index 0dd72c8..07c6710 100644
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
-@@ -1949,6 +1977,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1950,6 +1978,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -65193,7 +65108,7 @@ index 0dd72c8..07c6710 100644
put_link(nd, &link, cookie);
}
}
-@@ -1956,6 +1986,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1957,6 +1987,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -65207,7 +65122,7 @@ index 0dd72c8..07c6710 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -1983,8 +2020,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -1984,8 +2021,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -65224,7 +65139,7 @@ index 0dd72c8..07c6710 100644
return retval;
}
-@@ -2559,6 +2603,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2560,6 +2604,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -65238,7 +65153,7 @@ index 0dd72c8..07c6710 100644
return 0;
}
-@@ -2790,7 +2841,7 @@ looked_up:
+@@ -2791,7 +2842,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -65247,7 +65162,7 @@ index 0dd72c8..07c6710 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2825,6 +2876,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2826,6 +2877,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -65265,7 +65180,7 @@ index 0dd72c8..07c6710 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2846,6 +2908,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2847,6 +2909,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -65274,7 +65189,7 @@ index 0dd72c8..07c6710 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2860,7 +2924,7 @@ out_dput:
+@@ -2861,7 +2925,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -65283,7 +65198,7 @@ index 0dd72c8..07c6710 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2910,6 +2974,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2911,6 +2975,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -65299,7 +65214,7 @@ index 0dd72c8..07c6710 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2929,7 +3002,7 @@ retry_lookup:
+@@ -2930,7 +3003,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -65308,7 +65223,7 @@ index 0dd72c8..07c6710 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -2953,11 +3026,28 @@ retry_lookup:
+@@ -2954,11 +3027,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -65338,7 +65253,7 @@ index 0dd72c8..07c6710 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -2998,6 +3088,11 @@ finish_lookup:
+@@ -3000,6 +3090,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -65350,7 +65265,7 @@ index 0dd72c8..07c6710 100644
return 1;
}
-@@ -3007,7 +3102,6 @@ finish_lookup:
+@@ -3009,7 +3104,6 @@ finish_lookup:
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path->mnt);
nd->path.dentry = path->dentry;
@@ -65358,7 +65273,7 @@ index 0dd72c8..07c6710 100644
}
nd->inode = inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
-@@ -3017,7 +3111,18 @@ finish_open:
+@@ -3019,7 +3113,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -65377,7 +65292,16 @@ index 0dd72c8..07c6710 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3181,7 +3286,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3171,7 +3276,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+
+ if (unlikely(file->f_flags & __O_TMPFILE)) {
+ error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
+- goto out;
++ goto out2;
+ }
+
+ error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
+@@ -3183,7 +3288,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -65386,7 +65310,7 @@ index 0dd72c8..07c6710 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3199,7 +3304,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3201,7 +3306,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -65395,7 +65319,15 @@ index 0dd72c8..07c6710 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3299,9 +3404,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3209,6 +3314,7 @@ out:
+ path_put(&nd->root);
+ if (base)
+ fput(base);
++out2:
+ if (!(opened & FILE_OPENED)) {
+ BUG_ON(!error);
+ put_filp(file);
+@@ -3301,9 +3407,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -65409,7 +65341,7 @@ index 0dd72c8..07c6710 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3353,6 +3460,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3355,6 +3463,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -65430,7 +65362,7 @@ index 0dd72c8..07c6710 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3415,6 +3536,17 @@ retry:
+@@ -3417,6 +3539,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65448,7 +65380,7 @@ index 0dd72c8..07c6710 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3431,6 +3563,8 @@ retry:
+@@ -3433,6 +3566,8 @@ retry:
break;
}
out:
@@ -65457,7 +65389,7 @@ index 0dd72c8..07c6710 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3483,9 +3617,16 @@ retry:
+@@ -3485,9 +3620,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65474,7 +65406,7 @@ index 0dd72c8..07c6710 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3566,6 +3707,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3568,6 +3710,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -65483,7 +65415,7 @@ index 0dd72c8..07c6710 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3598,10 +3741,21 @@ retry:
+@@ -3600,10 +3744,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -65505,7 +65437,7 @@ index 0dd72c8..07c6710 100644
exit3:
dput(dentry);
exit2:
-@@ -3691,6 +3845,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3693,6 +3848,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -65514,7 +65446,7 @@ index 0dd72c8..07c6710 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3717,10 +3873,22 @@ retry_deleg:
+@@ -3719,10 +3876,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -65537,7 +65469,7 @@ index 0dd72c8..07c6710 100644
exit2:
dput(dentry);
}
-@@ -3808,9 +3976,17 @@ retry:
+@@ -3810,9 +3979,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -65555,7 +65487,7 @@ index 0dd72c8..07c6710 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3913,6 +4089,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3915,6 +4092,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -65563,7 +65495,7 @@ index 0dd72c8..07c6710 100644
int how = 0;
int error;
-@@ -3936,7 +4113,7 @@ retry:
+@@ -3938,7 +4116,7 @@ retry:
if (error)
return error;
@@ -65572,7 +65504,7 @@ index 0dd72c8..07c6710 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3948,11 +4125,28 @@ retry:
+@@ -3950,11 +4128,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -65601,7 +65533,7 @@ index 0dd72c8..07c6710 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4239,6 +4433,20 @@ retry_deleg:
+@@ -4241,6 +4436,20 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -65622,7 +65554,7 @@ index 0dd72c8..07c6710 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
-@@ -4246,6 +4454,9 @@ retry_deleg:
+@@ -4248,6 +4457,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode);
@@ -65632,7 +65564,7 @@ index 0dd72c8..07c6710 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4282,6 +4493,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4284,6 +4496,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -65641,7 +65573,7 @@ index 0dd72c8..07c6710 100644
int len;
len = PTR_ERR(link);
-@@ -4291,7 +4504,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -4293,7 +4507,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -66295,7 +66227,7 @@ index 49d84f8..4807e0b 100644
/* Copy the blockcheck stats from the superblock probe */
osb->osb_ecc_stats = *stats;
diff --git a/fs/open.c b/fs/open.c
-index 2ed7325..4e77ac3 100644
+index 17679f2..85f4981 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -32,6 +32,8 @@
@@ -66396,9 +66328,9 @@ index 2ed7325..4e77ac3 100644
+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
+ return -EACCES;
+
+ retry_deleg:
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
- if (!uid_valid(uid))
@@ -982,6 +1019,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
} else {
fsnotify_open(f);
@@ -66408,9 +66340,18 @@ index 2ed7325..4e77ac3 100644
}
putname(tmp);
diff --git a/fs/pipe.c b/fs/pipe.c
-index 78fd0d0..f71fc09 100644
+index 78fd0d0..6757bcf 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
+@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
+ /*
+ * Minimum pipe size, as required by POSIX
+ */
+-unsigned int pipe_min_size = PAGE_SIZE;
++unsigned int pipe_min_size __read_only = PAGE_SIZE;
+
+ /*
+ * We use a start+len construction, which provides full use of the
@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
@@ -66617,6 +66558,35 @@ index 78fd0d0..f71fc09 100644
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
+@@ -1208,7 +1209,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+-static inline unsigned int round_pipe_size(unsigned int size)
++static inline unsigned long round_pipe_size(unsigned long size)
+ {
+ unsigned long nr_pages;
+
+@@ -1256,13 +1257,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ switch (cmd) {
+ case F_SETPIPE_SZ: {
+- unsigned int size, nr_pages;
++ unsigned long size, nr_pages;
++
++ ret = -EINVAL;
++ if (arg < pipe_min_size)
++ goto out;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+- ret = -EINVAL;
+- if (!nr_pages)
++ if (size < pipe_min_size)
+ goto out;
+
+ if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 0855f77..6787d50 100644
--- a/fs/posix_acl.c
@@ -68762,7 +68732,7 @@ index 1d641bb..9ca7f61 100644
{
const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
diff --git a/fs/splice.c b/fs/splice.c
-index 12028fa..a6f2619 100644
+index 12028fa..2cde9b2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
@@ -68844,6 +68814,15 @@ index 12028fa..a6f2619 100644
return 0;
if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1171,7 +1171,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ long ret, bytes;
+ umode_t i_mode;
+ size_t len;
+- int i, flags;
++ int i, flags, more;
+
+ /*
+ * We require the input being a regular file, as we don't want to
@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
@@ -68853,7 +68832,31 @@ index 12028fa..a6f2619 100644
current->splice_pipe = pipe;
}
-@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+@@ -1214,6 +1214,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ * Don't block on output, we have to drain the direct pipe.
+ */
+ sd->flags &= ~SPLICE_F_NONBLOCK;
++ more = sd->flags & SPLICE_F_MORE;
+
+ while (len) {
+ size_t read_len;
+@@ -1227,6 +1228,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ sd->total_len = read_len;
+
+ /*
++ * If more data is pending, set SPLICE_F_MORE
++ * If this is the last data and SPLICE_F_MORE was not set
++ * initially, clears it.
++ */
++ if (read_len < len)
++ sd->flags |= SPLICE_F_MORE;
++ else if (!more)
++ sd->flags &= ~SPLICE_F_MORE;
++ /*
+ * NOTE: nonblocking mode only applies to the input. We
+ * must not do the output in nonblocking mode as then we
+ * could get stuck data in the internal pipe:
+@@ -1493,6 +1503,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
partial[buffers].offset = off;
partial[buffers].len = plen;
@@ -68861,7 +68864,7 @@ index 12028fa..a6f2619 100644
off = 0;
len -= plen;
-@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1795,9 +1806,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -68873,7 +68876,7 @@ index 12028fa..a6f2619 100644
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
-@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1829,7 +1840,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
@@ -68882,7 +68885,7 @@ index 12028fa..a6f2619 100644
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
-@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1842,9 +1853,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -68894,7 +68897,7 @@ index 12028fa..a6f2619 100644
}
pipe_unlock(pipe);
-@@ -1880,14 +1881,14 @@ retry:
+@@ -1880,14 +1891,14 @@ retry:
pipe_double_lock(ipipe, opipe);
do {
@@ -68911,7 +68914,7 @@ index 12028fa..a6f2619 100644
break;
/*
-@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -1984,7 +1995,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
pipe_double_lock(ipipe, opipe);
do {
@@ -68920,7 +68923,7 @@ index 12028fa..a6f2619 100644
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
-@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -2029,7 +2040,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
@@ -82295,7 +82298,7 @@ index 939533d..cf0a57c 100644
/**
* struct clk_init_data - holds init data that's common to all clocks and is
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index 3f448c6..df3ce1d 100644
+index 3f448c6..8dd869d 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
@@ -82307,6 +82310,15 @@ index 3f448c6..df3ce1d 100644
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
+@@ -322,7 +322,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ long compat_sys_msgctl(int first, int second, void __user *uptr);
+ long compat_sys_shmctl(int first, int second, void __user *uptr);
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+- unsigned nsems, const struct compat_timespec __user *timeout);
++ compat_long_t nsems, const struct compat_timespec __user *timeout);
+ asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -87137,19 +87149,19 @@ index 1e2cd2e..0288750 100644
/* shm_mode upper byte flags */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index ad8f859..e93b2e4 100644
+index ab31337..95e2121 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -662,7 +662,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
- struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+@@ -663,7 +663,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
-static inline struct sk_buff *alloc_skb(unsigned int size,
+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -1768,7 +1768,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -1769,7 +1769,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
return skb->inner_transport_header - skb->inner_network_header;
}
@@ -87158,7 +87170,7 @@ index ad8f859..e93b2e4 100644
{
return skb_network_header(skb) - skb->data;
}
-@@ -1828,7 +1828,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -1829,7 +1829,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -87167,7 +87179,7 @@ index ad8f859..e93b2e4 100644
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2427,7 +2427,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2428,7 +2428,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
@@ -87176,7 +87188,7 @@ index ad8f859..e93b2e4 100644
struct iovec *to, int size);
int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
struct iovec *iov);
-@@ -2722,6 +2722,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -2723,6 +2723,9 @@ static inline void nf_reset(struct sk_buff *skb)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -87492,7 +87504,7 @@ index a5ffd32..0935dea 100644
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index a747a77..02cf063 100644
+index a747a77..b44c9b4 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,7 +99,12 @@ struct sigaltstack;
@@ -87534,6 +87546,19 @@ index a747a77..02cf063 100644
asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags);
+@@ -646,10 +651,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+
+ asmlinkage long sys_semget(key_t key, int nsems, int semflg);
+ asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+- unsigned nsops);
++ long nsops);
+ asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
+- unsigned nsops,
++ long nsops,
+ const struct timespec __user *timeout);
+ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+ asmlinkage long sys_shmget(key_t key, size_t size, int flag);
diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
index 27b3b0b..e093dd9 100644
--- a/include/linux/syscore_ops.h
@@ -87833,10 +87858,10 @@ index 99c1b4d..562e6f3 100644
static inline void put_unaligned_le16(u16 val, void *p)
diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 7f6eb85..656e806 100644
+index 49466be..5954884 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
-@@ -563,7 +563,7 @@ struct usb_device {
+@@ -589,7 +589,7 @@ struct usb_device {
int maxchild;
u32 quirks;
@@ -87845,7 +87870,7 @@ index 7f6eb85..656e806 100644
unsigned long active_duration;
-@@ -1642,7 +1642,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+@@ -1668,7 +1668,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
@@ -89042,7 +89067,7 @@ index 9a00147..d814573 100644
struct snd_soc_platform {
const char *name;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 34932540..8d54ec7 100644
+index e4b9e01..bc9fff0 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -754,7 +754,7 @@ struct se_device {
@@ -90014,7 +90039,7 @@ index 58c132d..310b5fa 100644
* Ok, we have completed the initial bootup, and
* we're essentially up and running. Get rid of the
diff --git a/ipc/compat.c b/ipc/compat.c
-index f486b00..442867f 100644
+index f486b00..fbb78e9 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
@@ -90026,6 +90051,15 @@ index f486b00..442867f 100644
}
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
+@@ -750,7 +750,7 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
+ }
+
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+- unsigned nsops, const struct compat_timespec __user *timeout)
++ compat_long_t nsops, const struct compat_timespec __user *timeout)
+ {
+ struct timespec __user *ts64 = NULL;
+ if (timeout) {
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index cadddc8..c263084 100644
--- a/ipc/ipc_sysctl.c
@@ -90139,7 +90173,7 @@ index 6498531..b0ff3c8 100644
msg_params.flg = msgflg;
diff --git a/ipc/sem.c b/ipc/sem.c
-index bee5554..e9af81dd 100644
+index bee5554..ec7d947 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -561,10 +561,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
@@ -90170,6 +90204,24 @@ index bee5554..e9af81dd 100644
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
+@@ -1760,7 +1761,7 @@ static int get_queue_result(struct sem_queue *q)
+ }
+
+ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops, const struct timespec __user *, timeout)
++ long, nsops, const struct timespec __user *, timeout)
+ {
+ int error = -EINVAL;
+ struct sem_array *sma;
+@@ -1996,7 +1997,7 @@ out_free:
+ }
+
+ SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops)
++ long, nsops)
+ {
+ return sys_semtimedop(semid, tsops, nsops, NULL);
+ }
diff --git a/ipc/shm.c b/ipc/shm.c
index 7645961..afc7f02 100644
--- a/ipc/shm.c
@@ -93786,7 +93838,7 @@ index ebdd9c1..612ee05 100644
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 1f4bcb3..99cf7ab 100644
+index be9760f..6ee48dd 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
@@ -93807,7 +93859,7 @@ index 1f4bcb3..99cf7ab 100644
return -EFAULT;
copied += retval;
src += retval;
-@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -826,7 +826,7 @@ int ptrace_request(struct task_struct *child, long request,
bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
siginfo_t siginfo, *si;
@@ -93816,7 +93868,7 @@ index 1f4bcb3..99cf7ab 100644
unsigned long __user *datalp = datavp;
unsigned long flags;
-@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+@@ -1072,14 +1072,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out;
}
@@ -93839,7 +93891,7 @@ index 1f4bcb3..99cf7ab 100644
goto out_put_task_struct;
}
-@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+@@ -1107,7 +1114,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
@@ -93848,7 +93900,7 @@ index 1f4bcb3..99cf7ab 100644
}
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
-@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+@@ -1201,7 +1208,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
}
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -93857,7 +93909,7 @@ index 1f4bcb3..99cf7ab 100644
{
struct task_struct *child;
long ret;
-@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+@@ -1217,14 +1224,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out;
}
@@ -95109,7 +95161,7 @@ index 60d35ac5..59d289f 100644
mutex_unlock(&smpboot_threads_lock);
put_online_cpus();
diff --git a/kernel/softirq.c b/kernel/softirq.c
-index 490fcbb..1e502c6 100644
+index 93be750..06335f9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -52,7 +52,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
@@ -95157,7 +95209,7 @@ index 490fcbb..1e502c6 100644
{
struct tasklet_struct *list;
-@@ -742,7 +742,7 @@ static struct notifier_block cpu_nfb = {
+@@ -746,7 +746,7 @@ static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
@@ -95920,7 +95972,7 @@ index e3be87e..abc908f 100644
/* make curr_ret_stack visible before we add the ret_stack */
smp_wmb();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index 774a080..d09b170 100644
+index da41de9..7f9f568 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -352,9 +352,9 @@ struct buffer_data_page {
@@ -96132,7 +96184,7 @@ index 774a080..d09b170 100644
return NULL;
}
#endif
-@@ -2863,7 +2863,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -96141,7 +96193,7 @@ index 774a080..d09b170 100644
return;
}
-@@ -2875,7 +2875,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2874,7 +2874,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
start = bpage;
do {
if (bpage->page == (void *)addr) {
@@ -96150,7 +96202,7 @@ index 774a080..d09b170 100644
return;
}
rb_inc_page(cpu_buffer, &bpage);
-@@ -3159,7 +3159,7 @@ static inline unsigned long
+@@ -3158,7 +3158,7 @@ static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
@@ -96159,7 +96211,7 @@ index 774a080..d09b170 100644
}
/**
-@@ -3248,7 +3248,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3247,7 +3247,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -96168,7 +96220,7 @@ index 774a080..d09b170 100644
return ret;
}
-@@ -3271,7 +3271,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3270,7 +3270,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -96177,7 +96229,7 @@ index 774a080..d09b170 100644
return ret;
}
-@@ -3293,7 +3293,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3292,7 +3292,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -96186,7 +96238,7 @@ index 774a080..d09b170 100644
return ret;
}
-@@ -3356,7 +3356,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+@@ -3355,7 +3355,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -96195,7 +96247,7 @@ index 774a080..d09b170 100644
}
return overruns;
-@@ -3527,8 +3527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3526,8 +3526,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Reset the reader page to size zero.
*/
@@ -96206,7 +96258,7 @@ index 774a080..d09b170 100644
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
-@@ -3562,7 +3562,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3561,7 +3561,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* want to compare with the last_overrun.
*/
smp_mb();
@@ -96215,7 +96267,7 @@ index 774a080..d09b170 100644
/*
* Here's the tricky part.
-@@ -4134,8 +4134,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4133,8 +4133,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -96226,7 +96278,7 @@ index 774a080..d09b170 100644
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
-@@ -4145,18 +4145,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4144,18 +4144,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -96251,7 +96303,7 @@ index 774a080..d09b170 100644
cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0;
-@@ -4557,8 +4557,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+@@ -4556,8 +4556,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
@@ -96715,6 +96767,20 @@ index e0731c3..ad66444 100644
if (is_on_stack == onstack)
return;
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index f504027..97a15c0 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
+
+ /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+ uncompressed data. Allocate intermediate buffer for block. */
+- bd->dbufSize = 100000*(i-BZh0);
++ i -= BZh0;
++ bd->dbufSize = 100000 * i;
+
+ bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
diff --git a/lib/devres.c b/lib/devres.c
index 8235331..5881053 100644
--- a/lib/devres.c
@@ -97277,7 +97343,7 @@ index 0922579..9d7adb9 100644
#endif
}
diff --git a/lib/string.c b/lib/string.c
-index 43d0781..64941b2 100644
+index cb9ea21..909c993 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -805,9 +805,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
@@ -104044,10 +104110,10 @@ index b442e7e..6f5b5a2 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index e2b1bba..71bd8fe 100644
+index 69ec61a..61843ef 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -360,18 +360,29 @@ refill:
+@@ -378,18 +378,29 @@ refill:
goto end;
}
nc->frag.size = PAGE_SIZE << order;
@@ -104084,7 +104150,7 @@ index e2b1bba..71bd8fe 100644
}
data = page_address(nc->frag.page) + nc->frag.offset;
-@@ -2004,7 +2015,7 @@ EXPORT_SYMBOL(__skb_checksum);
+@@ -2022,7 +2033,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
@@ -104093,7 +104159,7 @@ index e2b1bba..71bd8fe 100644
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
-@@ -3225,13 +3236,15 @@ void __init skb_init(void)
+@@ -3243,13 +3254,15 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -104694,7 +104760,7 @@ index bf2cb4a..d83ba8a 100644
p->rate_tokens = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
-index ecb34b5..5c5ab40 100644
+index 57075c4..bf162142 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -82,7 +82,7 @@ static int ip_forward_finish_gso(struct sk_buff *skb)
@@ -107805,7 +107871,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 1d52506..b772b22 100644
+index a0b0ea9..62f0bc3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk)
@@ -107817,7 +107883,7 @@ index 1d52506..b772b22 100644
}
static void netlink_rcv_wake(struct sock *sk)
-@@ -2983,7 +2983,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -2981,7 +2981,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
@@ -110564,10 +110630,10 @@ index 8fac3fd..32ff38d 100644
unsigned int secindex_strings;
diff --git a/security/Kconfig b/security/Kconfig
-index beb86b5..86bc440 100644
+index beb86b5..57a1143 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,974 @@
+@@ -4,6 +4,979 @@
menu "Security options"
@@ -110635,6 +110701,11 @@ index beb86b5..86bc440 100644
+ grsecurity and PaX settings manually. Via this method, no options are
+ automatically enabled.
+
++ Take note that if menuconfig is exited with this configuration method
++ chosen, you will not be able to use the automatic configuration methods
++ without starting again with a kernel configuration with no grsecurity
++ or PaX options specified inside.
++
+endchoice
+
+choice
@@ -111542,7 +111613,7 @@ index beb86b5..86bc440 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1071,7 @@ config INTEL_TXT
+@@ -103,7 +1076,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -121159,10 +121230,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..f314f81
+index 0000000..038e79d
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,6058 @@
+@@ -0,0 +1,6059 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -125366,6 +125437,7 @@ index 0000000..f314f81
+unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
+bscnl_emit_45699 bscnl_emit 2-5-0 45699 &unix_dgram_sendmsg_45699
+sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
++sk_forced_wmem_schedule_45721 sk_forced_wmem_schedule 2 45721 NULL
+snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL nohasharray
+task_cgroup_path_45734 task_cgroup_path 3 45734 &snd_cs46xx_io_read_45734
+rw_copy_check_uvector_45748 rw_copy_check_uvector 3-0 45748 NULL nohasharray
@@ -128741,7 +128813,7 @@ index 0a578fe..b81f62d 100644
})
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 6611253..eb4bc0f 100644
+index eed250e..df7bf8c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -77,12 +77,17 @@ LIST_HEAD(vm_list);
diff --git a/4.0.1/4425_grsec_remove_EI_PAX.patch b/3.14.41/4425_grsec_remove_EI_PAX.patch
index 86e242a..a80a5d7 100644
--- a/4.0.1/4425_grsec_remove_EI_PAX.patch
+++ b/3.14.41/4425_grsec_remove_EI_PAX.patch
@@ -8,7 +8,7 @@ X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
+++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
-@@ -273,7 +273,7 @@
+@@ -278,7 +278,7 @@
config PAX_EI_PAX
bool 'Use legacy ELF header marking'
diff --git a/3.14.40/4427_force_XATTR_PAX_tmpfs.patch b/3.14.41/4427_force_XATTR_PAX_tmpfs.patch
index 4c236cc..4c236cc 100644
--- a/3.14.40/4427_force_XATTR_PAX_tmpfs.patch
+++ b/3.14.41/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.40/4430_grsec-remove-localversion-grsec.patch b/3.14.41/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/3.14.40/4430_grsec-remove-localversion-grsec.patch
+++ b/3.14.41/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.40/4435_grsec-mute-warnings.patch b/3.14.41/4435_grsec-mute-warnings.patch
index 392cefb..392cefb 100644
--- a/3.14.40/4435_grsec-mute-warnings.patch
+++ b/3.14.41/4435_grsec-mute-warnings.patch
diff --git a/3.14.40/4440_grsec-remove-protected-paths.patch b/3.14.41/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/3.14.40/4440_grsec-remove-protected-paths.patch
+++ b/3.14.41/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.40/4450_grsec-kconfig-default-gids.patch b/3.14.41/4450_grsec-kconfig-default-gids.patch
index 8c878fc..b96defc 100644
--- a/3.14.40/4450_grsec-kconfig-default-gids.patch
+++ b/3.14.41/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
-@@ -201,7 +201,7 @@
+@@ -206,7 +206,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
@@ -82,7 +82,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group will be exempted from
grsecurity's /proc restrictions, allowing users of the specified
-@@ -212,7 +212,7 @@
+@@ -217,7 +217,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -91,7 +91,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group untrusted users should
be added to. These users will be placed under grsecurity's Trusted Path
-@@ -224,7 +224,7 @@
+@@ -229,7 +229,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -100,7 +100,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -233,7 +233,7 @@
+@@ -238,7 +238,7 @@
config GRKERNSEC_SYMLINKOWN_GID
int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
depends on GRKERNSEC_CONFIG_SERVER
diff --git a/3.14.40/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.41/4465_selinux-avc_audit-log-curr_ip.patch
index bba906e..bba906e 100644
--- a/3.14.40/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.14.41/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.40/4470_disable-compat_vdso.patch b/3.14.41/4470_disable-compat_vdso.patch
index 3b3953b..3b3953b 100644
--- a/3.14.40/4470_disable-compat_vdso.patch
+++ b/3.14.41/4470_disable-compat_vdso.patch
diff --git a/4.0.1/4475_emutramp_default_on.patch b/3.14.41/4475_emutramp_default_on.patch
index ad4967a..a128205 100644
--- a/4.0.1/4475_emutramp_default_on.patch
+++ b/3.14.41/4475_emutramp_default_on.patch
@@ -10,7 +10,7 @@ See bug:
diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
+++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
-@@ -433,7 +433,7 @@
+@@ -438,7 +438,7 @@
config PAX_EMUTRAMP
bool "Emulate trampolines"
@@ -19,7 +19,7 @@ diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/secur
depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
help
There are some programs and libraries that for one reason or
-@@ -456,6 +456,12 @@
+@@ -461,6 +461,12 @@
utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
for the affected files.
diff --git a/3.2.68/0000_README b/3.2.68/0000_README
index 6316628..fb78cb8 100644
--- a/3.2.68/0000_README
+++ b/3.2.68/0000_README
@@ -190,7 +190,7 @@ Patch: 1067_linux-3.2.68.patch
From: http://www.kernel.org
Desc: Linux 3.2.68
-Patch: 4420_grsecurity-3.1-3.2.68-201505042051.patch
+Patch: 4420_grsecurity-3.1-3.2.68-201505091720.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.68/4420_grsecurity-3.1-3.2.68-201505042051.patch b/3.2.68/4420_grsecurity-3.1-3.2.68-201505091720.patch
index a45a2c2..eaddb1d 100644
--- a/3.2.68/4420_grsecurity-3.1-3.2.68-201505042051.patch
+++ b/3.2.68/4420_grsecurity-3.1-3.2.68-201505091720.patch
@@ -64660,9 +64660,18 @@ index bd8ae78..539d250 100644
ldm_crit ("Out of memory.");
return false;
diff --git a/fs/pipe.c b/fs/pipe.c
-index 8ca88fc..d1f8b8a 100644
+index 8ca88fc..a2aefd9 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
+@@ -33,7 +33,7 @@ unsigned int pipe_max_size = 1048576;
+ /*
+ * Minimum pipe size, as required by POSIX
+ */
+-unsigned int pipe_min_size = PAGE_SIZE;
++unsigned int pipe_min_size __read_only = PAGE_SIZE;
+
+ /*
+ * We use a start+len construction, which provides full use of the
@@ -437,9 +437,9 @@ redo:
}
if (bufs) /* More to do? */
@@ -64786,6 +64795,35 @@ index 8ca88fc..d1f8b8a 100644
inode->i_fop = &rdwr_pipefifo_fops;
/*
+@@ -1203,7 +1204,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+-static inline unsigned int round_pipe_size(unsigned int size)
++static inline unsigned long round_pipe_size(unsigned long size)
+ {
+ unsigned long nr_pages;
+
+@@ -1253,13 +1254,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ switch (cmd) {
+ case F_SETPIPE_SZ: {
+- unsigned int size, nr_pages;
++ unsigned long size, nr_pages;
++
++ ret = -EINVAL;
++ if (arg < pipe_min_size)
++ goto out;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+- ret = -EINVAL;
+- if (!nr_pages)
++ if (size < pipe_min_size)
+ goto out;
+
+ if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 6c70ab2..54c5656 100644
--- a/fs/posix_acl.c
@@ -66986,7 +67024,7 @@ index dba43c3..cb3437c 100644
{
const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
diff --git a/fs/splice.c b/fs/splice.c
-index 34c2b2b..2f91055 100644
+index 34c2b2b..cb9a1ed 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
@@ -67050,6 +67088,15 @@ index 34c2b2b..2f91055 100644
return 0;
if (sd->flags & SPLICE_F_NONBLOCK)
+@@ -1187,7 +1187,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ long ret, bytes;
+ umode_t i_mode;
+ size_t len;
+- int i, flags;
++ int i, flags, more;
+
+ /*
+ * We require the input being a regular file, as we don't want to
@@ -1213,7 +1213,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
@@ -67059,7 +67106,31 @@ index 34c2b2b..2f91055 100644
current->splice_pipe = pipe;
}
-@@ -1481,6 +1481,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+@@ -1230,6 +1230,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ * Don't block on output, we have to drain the direct pipe.
+ */
+ sd->flags &= ~SPLICE_F_NONBLOCK;
++ more = sd->flags & SPLICE_F_MORE;
+
+ while (len) {
+ size_t read_len;
+@@ -1243,6 +1244,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ sd->total_len = read_len;
+
+ /*
++ * If more data is pending, set SPLICE_F_MORE
++ * If this is the last data and SPLICE_F_MORE was not set
++ * initially, clears it.
++ */
++ if (read_len < len)
++ sd->flags |= SPLICE_F_MORE;
++ else if (!more)
++ sd->flags &= ~SPLICE_F_MORE;
++ /*
+ * NOTE: nonblocking mode only applies to the input. We
+ * must not do the output in nonblocking mode as then we
+ * could get stuck data in the internal pipe:
+@@ -1481,6 +1491,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
partial[buffers].offset = off;
partial[buffers].len = plen;
@@ -67067,7 +67138,7 @@ index 34c2b2b..2f91055 100644
off = 0;
len -= plen;
-@@ -1766,9 +1767,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1766,9 +1777,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -67079,7 +67150,7 @@ index 34c2b2b..2f91055 100644
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
-@@ -1800,7 +1801,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1800,7 +1811,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
@@ -67088,7 +67159,7 @@ index 34c2b2b..2f91055 100644
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
-@@ -1813,9 +1814,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1813,9 +1824,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -67100,7 +67171,7 @@ index 34c2b2b..2f91055 100644
}
pipe_unlock(pipe);
-@@ -1851,14 +1852,14 @@ retry:
+@@ -1851,14 +1862,14 @@ retry:
pipe_double_lock(ipipe, opipe);
do {
@@ -67117,7 +67188,7 @@ index 34c2b2b..2f91055 100644
break;
/*
-@@ -1955,7 +1956,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -1955,7 +1966,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
pipe_double_lock(ipipe, opipe);
do {
@@ -67126,7 +67197,7 @@ index 34c2b2b..2f91055 100644
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
-@@ -2000,7 +2001,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -2000,7 +2011,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
@@ -80829,10 +80900,10 @@ index 081147d..da89543 100644
extern void
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index d42bd48..554dcd5 100644
+index d42bd48..a20850d 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
-@@ -240,7 +240,7 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
+@@ -240,10 +240,10 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
int version, void __user *uptr);
long compat_sys_msgctl(int first, int second, void __user *uptr);
long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
@@ -80840,7 +80911,11 @@ index d42bd48..554dcd5 100644
+ void __user *uptr) __intentional_overflow(0);
long compat_sys_shmctl(int first, int second, void __user *uptr);
long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
+- unsigned nsems, const struct compat_timespec __user *timeout);
++ compat_long_t nsems, const struct compat_timespec __user *timeout);
+ asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
@@ -334,7 +334,7 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -86408,7 +86483,7 @@ index ea0c02f..0eed39d 100644
#ifdef __arch_swab64
return __arch_swab64(val);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 86a24b11..b6048c1 100644
+index 86a24b11..a84e6d1 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -83,12 +83,19 @@ struct file_handle;
@@ -86462,6 +86537,19 @@ index 86a24b11..b6048c1 100644
asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags);
+@@ -667,10 +674,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+
+ asmlinkage long sys_semget(key_t key, int nsems, int semflg);
+ asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+- unsigned nsops);
++ long nsops);
+ asmlinkage long sys_semctl(int semid, int semnum, int cmd, union semun arg);
+ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
+- unsigned nsops,
++ long nsops,
+ const struct timespec __user *timeout);
+ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+ asmlinkage long sys_shmget(key_t key, size_t size, int flag);
diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
index 27b3b0b..e093dd9 100644
--- a/include/linux/syscore_ops.h
@@ -89095,6 +89183,19 @@ index e937d9b..4700693 100644
/*
* Ok, we have completed the initial bootup, and
* we're essentially up and running. Get rid of the
+diff --git a/ipc/compat.c b/ipc/compat.c
+index 845a287..6a0666b 100644
+--- a/ipc/compat.c
++++ b/ipc/compat.c
+@@ -672,7 +672,7 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
+ }
+
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+- unsigned nsops, const struct compat_timespec __user *timeout)
++ compat_long_t nsops, const struct compat_timespec __user *timeout)
+ {
+ struct timespec __user *ts64 = NULL;
+ if (timeout) {
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 00fba2b..9afd545 100644
--- a/ipc/ipc_sysctl.c
@@ -89208,7 +89309,7 @@ index 25f1a61..58f7ac1 100644
msg_params.flg = msgflg;
diff --git a/ipc/sem.c b/ipc/sem.c
-index 5215a81..cfc0cac 100644
+index 5215a81..bbbca2e 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
@@ -89239,6 +89340,24 @@ index 5215a81..cfc0cac 100644
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
+@@ -1328,7 +1329,7 @@ static int get_queue_result(struct sem_queue *q)
+
+
+ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops, const struct timespec __user *, timeout)
++ long, nsops, const struct timespec __user *, timeout)
+ {
+ int error = -EINVAL;
+ struct sem_array *sma;
+@@ -1546,7 +1547,7 @@ out_free:
+ }
+
+ SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops)
++ long, nsops)
+ {
+ return sys_semtimedop(semid, tsops, nsops, NULL);
+ }
diff --git a/ipc/shm.c b/ipc/shm.c
index 326a20b..62e6b7e 100644
--- a/ipc/shm.c
@@ -96546,6 +96665,20 @@ index a78b7c6..2c73084 100644
if (is_on_stack == onstack)
return;
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 6a110e2..799667a 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -666,7 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
+
+ /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+ uncompressed data. Allocate intermediate buffer for block. */
+- bd->dbufSize = 100000*(i-BZh0);
++ i -= BZh0;
++ bd->dbufSize = 100000 * i;
+
+ bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
diff --git a/lib/devres.c b/lib/devres.c
index 7c0e953..f642b5c 100644
--- a/lib/devres.c
@@ -112432,10 +112565,10 @@ index 38f6617..e70b72b 100755
exuberant()
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..60c36a1 100644
+index 51bd5a0..9cb2b83 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,972 @@
+@@ -4,6 +4,977 @@
menu "Security options"
@@ -112502,6 +112635,11 @@ index 51bd5a0..60c36a1 100644
+ grsecurity and PaX settings manually. Via this method, no options are
+ automatically enabled.
+
++ Take note that if menuconfig is exited with this configuration method
++ chosen, you will not be able to use the automatic configuration methods
++ without starting again with a kernel configuration with no grsecurity
++ or PaX options specified inside.
++
+endchoice
+
+choice
@@ -113408,7 +113546,7 @@ index 51bd5a0..60c36a1 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -169,7 +1135,7 @@ config INTEL_TXT
+@@ -169,7 +1140,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/3.2.68/4425_grsec_remove_EI_PAX.patch b/3.2.68/4425_grsec_remove_EI_PAX.patch
index 366baa8..ba92792 100644
--- a/3.2.68/4425_grsec_remove_EI_PAX.patch
+++ b/3.2.68/4425_grsec_remove_EI_PAX.patch
@@ -8,7 +8,7 @@ X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
+++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
-@@ -272,7 +272,7 @@
+@@ -277,7 +277,7 @@
config PAX_EI_PAX
bool 'Use legacy ELF header marking'
diff --git a/3.2.68/4450_grsec-kconfig-default-gids.patch b/3.2.68/4450_grsec-kconfig-default-gids.patch
index 26dedae..5e1cb04 100644
--- a/3.2.68/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.68/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
-@@ -200,7 +200,7 @@
+@@ -205,7 +205,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
@@ -82,7 +82,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group will be exempted from
grsecurity's /proc restrictions, allowing users of the specified
-@@ -211,7 +211,7 @@
+@@ -216,7 +216,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -91,7 +91,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group untrusted users should
be added to. These users will be placed under grsecurity's Trusted Path
-@@ -223,7 +223,7 @@
+@@ -228,7 +228,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -100,7 +100,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -232,7 +232,7 @@
+@@ -237,7 +237,7 @@
config GRKERNSEC_SYMLINKOWN_GID
int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
depends on GRKERNSEC_CONFIG_SERVER
diff --git a/3.2.68/4475_emutramp_default_on.patch b/3.2.68/4475_emutramp_default_on.patch
index 1f3d51a..2db58ab 100644
--- a/3.2.68/4475_emutramp_default_on.patch
+++ b/3.2.68/4475_emutramp_default_on.patch
@@ -10,7 +10,7 @@ See bug:
diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
+++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
-@@ -432,7 +432,7 @@
+@@ -437,7 +437,7 @@
config PAX_EMUTRAMP
bool "Emulate trampolines"
@@ -19,7 +19,7 @@ diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/secur
depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
help
There are some programs and libraries that for one reason or
-@@ -455,6 +455,12 @@
+@@ -460,6 +460,12 @@
utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
for the affected files.
diff --git a/4.0.1/0000_README b/4.0.2/0000_README
index 9db745a..f34197d 100644
--- a/4.0.1/0000_README
+++ b/4.0.2/0000_README
@@ -2,7 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.0.1-201505042053.patch
+Patch: 1002_linux-4.0.2.patch
+From: http://www.kernel.org
+Desc: Linux 4.0.2
+
+Patch: 4420_grsecurity-3.1-4.0.2-201505091724.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.0.2/1001_linux-4.0.2.patch b/4.0.2/1001_linux-4.0.2.patch
new file mode 100644
index 0000000..38a75b2
--- /dev/null
+++ b/4.0.2/1001_linux-4.0.2.patch
@@ -0,0 +1,8587 @@
+diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
+index 99ca40e..5c204df 100644
+--- a/Documentation/networking/scaling.txt
++++ b/Documentation/networking/scaling.txt
+@@ -282,7 +282,7 @@ following is true:
+
+ - The current CPU's queue head counter >= the recorded tail counter
+ value in rps_dev_flow[i]
+-- The current CPU is unset (equal to RPS_NO_CPU)
++- The current CPU is unset (>= nr_cpu_ids)
+ - The current CPU is offline
+
+ After this check, the packet is sent to the (possibly updated) current
+diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
+index 4ceef53..d1ad9d5 100644
+--- a/Documentation/virtual/kvm/devices/s390_flic.txt
++++ b/Documentation/virtual/kvm/devices/s390_flic.txt
+@@ -27,6 +27,9 @@ Groups:
+ Copies all floating interrupts into a buffer provided by userspace.
+ When the buffer is too small it returns -ENOMEM, which is the indication
+ for userspace to try again with a bigger buffer.
++ -ENOBUFS is returned when the allocation of a kernelspace buffer has
++ failed.
++ -EFAULT is returned when copying data to userspace failed.
+ All interrupts remain pending, i.e. are not deleted from the list of
+ currently pending interrupts.
+ attr->addr contains the userspace address of the buffer into which all
+diff --git a/Makefile b/Makefile
+index f499cd2..0649a60 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 0
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma sheep
+
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index fec1fca..6c4bc53 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -167,7 +167,13 @@
+
+ macb1: ethernet@f802c000 {
+ phy-mode = "rmii";
++ #address-cells = <1>;
++ #size-cells = <0>;
+ status = "okay";
++
++ ethernet-phy@1 {
++ reg = <0x1>;
++ };
+ };
+
+ dbgu: serial@ffffee00 {
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index a5441d5..3cc8b83 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -154,7 +154,7 @@
+
+ uart2: serial@12200 {
+ compatible = "ns16550a";
+- reg = <0x12000 0x100>;
++ reg = <0x12200 0x100>;
+ reg-shift = <2>;
+ interrupts = <9>;
+ clocks = <&core_clk 0>;
+@@ -163,7 +163,7 @@
+
+ uart3: serial@12300 {
+ compatible = "ns16550a";
+- reg = <0x12100 0x100>;
++ reg = <0x12300 0x100>;
+ reg-shift = <2>;
+ interrupts = <10>;
+ clocks = <&core_clk 0>;
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index f027754..c41600e 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -429,7 +429,6 @@
+ &mmc_0 {
+ status = "okay";
+ num-slots = <1>;
+- supports-highspeed;
+ broken-cd;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+@@ -437,11 +436,8 @@
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
+-
+- slot@0 {
+- reg = <0>;
+- bus-width = <8>;
+- };
++ bus-width = <8>;
++ cap-mmc-highspeed;
+ };
+
+ /*
+@@ -451,7 +447,6 @@
+ &mmc_1 {
+ status = "okay";
+ num-slots = <1>;
+- supports-highspeed;
+ broken-cd;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+@@ -459,11 +454,8 @@
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
+-
+- slot@0 {
+- reg = <0>;
+- bus-width = <4>;
+- };
++ bus-width = <4>;
++ cap-sd-highspeed;
+ };
+
+ &pinctrl_0 {
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index afb9caf..674d03f 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
+index 0db25bc..3a42ac6 100644
+--- a/arch/arm/include/uapi/asm/kvm.h
++++ b/arch/arm/include/uapi/asm/kvm.h
+@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ 0
+ #define KVM_ARM_IRQ_CPU_FIQ 1
+
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX 127
++#endif
+
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE 0x95c1ba5e
+diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
+index c4cc50e..cfb354f 100644
+--- a/arch/arm/kernel/hibernate.c
++++ b/arch/arm/kernel/hibernate.c
+@@ -22,6 +22,7 @@
+ #include <asm/suspend.h>
+ #include <asm/memory.h>
+ #include <asm/sections.h>
++#include "reboot.h"
+
+ int pfn_is_nosave(unsigned long pfn)
+ {
+@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
+
+ ret = swsusp_save();
+ if (ret == 0)
+- soft_restart(virt_to_phys(cpu_resume));
++ _soft_restart(virt_to_phys(cpu_resume), false);
+ return ret;
+ }
+
+@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
+ for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ copy_page(pbe->orig_address, pbe->address);
+
+- soft_restart(virt_to_phys(cpu_resume));
++ _soft_restart(virt_to_phys(cpu_resume), false);
+ }
+
+ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index fdfa3a7..2bf1a16 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -41,6 +41,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/mach/time.h>
+ #include <asm/tls.h>
++#include "reboot.h"
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #include <linux/stackprotector.h>
+@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
+ BUG();
+ }
+
+-void soft_restart(unsigned long addr)
++void _soft_restart(unsigned long addr, bool disable_l2)
+ {
+ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+
+@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
+ local_fiq_disable();
+
+ /* Disable the L2 if we're the last man standing. */
+- if (num_online_cpus() == 1)
++ if (disable_l2)
+ outer_disable();
+
+ /* Change to the new stack and continue with the reset. */
+@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
+ BUG();
+ }
+
++void soft_restart(unsigned long addr)
++{
++ _soft_restart(addr, num_online_cpus() == 1);
++}
++
+ /*
+ * Function pointers to optional machine specific functions
+ */
+diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
+new file mode 100644
+index 0000000..c87f058
+--- /dev/null
++++ b/arch/arm/kernel/reboot.h
+@@ -0,0 +1,6 @@
++#ifndef REBOOT_H
++#define REBOOT_H
++
++extern void _soft_restart(unsigned long addr, bool disable_l2);
++
++#endif
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 5560f74..b652af5 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ if (!irqchip_in_kernel(kvm))
+ return -ENXIO;
+
+- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
+- irq_num > KVM_ARM_IRQ_GIC_MAX)
++ if (irq_num < VGIC_NR_PRIVATE_IRQS)
+ return -EINVAL;
+
+ return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index 8b9f5e2..4f4e222 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
+ void __iomem *mpsoc_base;
+ u32 reg;
+
++ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
++ return 0;
++
+ np = of_find_compatible_node(NULL, NULL,
+ "marvell,armada-380-coherency-fabric");
+ if (!np)
+@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ return 0;
+ of_node_put(np);
+
++ /*
++ * Currently the CPU idle support for Armada 38x is broken, as
++ * the CPU hotplug uses some of the CPU idle functions it is
++ * broken too, so let's disable it
++ */
++ if (of_machine_is_compatible("marvell,armada380")) {
++ cpu_hotplug_disable();
++ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
++ }
++
+ if (of_machine_is_compatible("marvell,armadaxp"))
+ ret = armada_xp_cpuidle_init();
+ else if (of_machine_is_compatible("marvell,armada370"))
+@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ return ret;
+
+ mvebu_v7_pmsu_enable_l2_powerdown_onidle();
+- platform_device_register(&mvebu_v7_cpuidle_device);
++ if (mvebu_v7_cpuidle_device.name)
++ platform_device_register(&mvebu_v7_cpuidle_device);
+ cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
+
+ return 0;
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 7bc6668..dcbe17f 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <mach/gpio-samsung.h>
+
+ #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
+
+ #define PCA935X_GPIO_BASE GPIO_BOARD_START
+ #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index 10b913b..65c426b 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+
+ static struct wm831x_pdata crag_pmic_pdata = {
+ .wm831x_num = 1,
++ .irq_base = BANFF_PMIC_IRQ_BASE,
+ .gpio_base = BANFF_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 1b8e973..a6186c2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_845719
++ bool "Cortex-A53: 845719: a load might read incorrect data"
++ depends on COMPAT
++ default y
++ help
++ This option adds an alternative code sequence to work around ARM
++ erratum 845719 on Cortex-A53 parts up to r0p4.
++
++ When running a compat (AArch32) userspace on an affected Cortex-A53
++ part, a load at EL0 from a virtual address that matches the bottom 32
++ bits of the virtual address used by a recent load at (AArch64) EL1
++ might return incorrect data.
++
++ The workaround is to write the contextidr_el1 register on exception
++ return to a 32-bit task.
++ Please note that this does not necessarily enable the workaround,
++ as it depends on the alternative framework, which will only patch
++ the kernel if an affected CPU is detected.
++
++ If unsure, say Y.
++
+ endmenu
+
+
+@@ -470,6 +491,10 @@ config HOTPLUG_CPU
+
+ source kernel/Kconfig.preempt
+
++config UP_LATE_INIT
++ def_bool y
++ depends on !SMP
++
+ config HZ
+ int
+ default 100
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 69ceedc..4d2a925 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
+ core-$(CONFIG_XEN) += arch/arm64/xen/
+ core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
+ libs-y := arch/arm64/lib/ $(libs-y)
+-libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
++core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+ # Default target when executing plain make
+ KBUILD_IMAGE := Image.gz
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index b6c16d5..3f0c53c 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -23,8 +23,9 @@
+
+ #define ARM64_WORKAROUND_CLEAN_CACHE 0
+ #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
++#define ARM64_WORKAROUND_845719 2
+
+-#define ARM64_NCAPS 2
++#define ARM64_NCAPS 3
+
+ #ifndef __ASSEMBLY__
+
+diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
+index 59e2823..8dcd61e 100644
+--- a/arch/arm64/include/asm/smp_plat.h
++++ b/arch/arm64/include/asm/smp_plat.h
+@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
+ extern u64 __cpu_logical_map[NR_CPUS];
+ #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
++void __init do_post_cpus_up_work(void);
++
+ #endif /* __ASM_SMP_PLAT_H */
+diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
+index 3ef77a4..bc49a18 100644
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ 0
+ #define KVM_ARM_IRQ_CPU_FIQ 1
+
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX 127
++#endif
+
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE 0x95c1ba5e
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index fa62637..ad6d523 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
+ /* Cortex-A57 r0p0 - r1p2 */
+ .desc = "ARM erratum 832075",
+ .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
++ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
++ (1 << MIDR_VARIANT_SHIFT) | 2),
++ },
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_845719
++ {
++ /* Cortex-A53 r0p[01234] */
++ .desc = "ARM erratum 845719",
++ .capability = ARM64_WORKAROUND_845719,
++ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
+ },
+ #endif
+ {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index cf21bb3..959fe87 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -21,8 +21,10 @@
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+
++#include <asm/alternative-asm.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
++#include <asm/cpufeature.h>
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/thread_info.h>
+@@ -120,6 +122,24 @@
+ ct_user_enter
+ ldr x23, [sp, #S_SP] // load return stack pointer
+ msr sp_el0, x23
++
++#ifdef CONFIG_ARM64_ERRATUM_845719
++ alternative_insn \
++ "nop", \
++ "tbz x22, #4, 1f", \
++ ARM64_WORKAROUND_845719
++#ifdef CONFIG_PID_IN_CONTEXTIDR
++ alternative_insn \
++ "nop; nop", \
++ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
++ ARM64_WORKAROUND_845719
++#else
++ alternative_insn \
++ "nop", \
++ "msr contextidr_el1, xzr; 1:", \
++ ARM64_WORKAROUND_845719
++#endif
++#endif
+ .endif
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 07f9305..c237ffb 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -426,6 +426,7 @@ __create_page_tables:
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
++ dmb sy
+ bl __inval_cache_range
+
+ mov lr, x27
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index e8420f6..781f469 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void)
+ }
+ #endif
+
++void __init do_post_cpus_up_work(void)
++{
++ apply_alternatives_all();
++}
++
++#ifdef CONFIG_UP_LATE_INIT
++void __init up_late_init(void)
++{
++ do_post_cpus_up_work();
++}
++#endif /* CONFIG_UP_LATE_INIT */
++
+ static void __init setup_processor(void)
+ {
+ struct cpu_info *cpu_info;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 328b8ce..4257369 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -309,7 +309,7 @@ void cpu_die(void)
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+- apply_alternatives_all();
++ do_post_cpus_up_work();
+ }
+
+ void __init smp_prepare_boot_cpu(void)
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84..04845aa 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+
+-void time_init(void)
++void __init time_init(void)
+ {
+ u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
+index e41c56e..1e38f0e 100644
+--- a/arch/mips/include/asm/asm-eva.h
++++ b/arch/mips/include/asm/asm-eva.h
+@@ -11,6 +11,36 @@
+ #define __ASM_ASM_EVA_H
+
+ #ifndef __ASSEMBLY__
++
++/* Kernel variants */
++
++#define kernel_cache(op, base) "cache " op ", " base "\n"
++#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
++#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
++#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
++#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
++#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
++#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
++#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
++#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
++#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
++#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
++#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
++#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
++#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr) user_sw(reg, addr)
++#define kernel_ld(reg, addr) user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
++#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+
+ #define __BUILD_EVA_INSN(insn, reg, addr) \
+@@ -41,37 +71,60 @@
+
+ #else
+
+-#define user_cache(op, base) "cache " op ", " base "\n"
+-#define user_ll(reg, addr) "ll " reg ", " addr "\n"
+-#define user_sc(reg, addr) "sc " reg ", " addr "\n"
+-#define user_lw(reg, addr) "lw " reg ", " addr "\n"
+-#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
+-#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
+-#define user_lh(reg, addr) "lh " reg ", " addr "\n"
+-#define user_lb(reg, addr) "lb " reg ", " addr "\n"
+-#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
+-#define user_sw(reg, addr) "sw " reg ", " addr "\n"
+-#define user_swl(reg, addr) "swl " reg ", " addr "\n"
+-#define user_swr(reg, addr) "swr " reg ", " addr "\n"
+-#define user_sh(reg, addr) "sh " reg ", " addr "\n"
+-#define user_sb(reg, addr) "sb " reg ", " addr "\n"
++#define user_cache(op, base) kernel_cache(op, base)
++#define user_ll(reg, addr) kernel_ll(reg, addr)
++#define user_sc(reg, addr) kernel_sc(reg, addr)
++#define user_lw(reg, addr) kernel_lw(reg, addr)
++#define user_lwl(reg, addr) kernel_lwl(reg, addr)
++#define user_lwr(reg, addr) kernel_lwr(reg, addr)
++#define user_lh(reg, addr) kernel_lh(reg, addr)
++#define user_lb(reg, addr) kernel_lb(reg, addr)
++#define user_lbu(reg, addr) kernel_lbu(reg, addr)
++#define user_sw(reg, addr) kernel_sw(reg, addr)
++#define user_swl(reg, addr) kernel_swl(reg, addr)
++#define user_swr(reg, addr) kernel_swr(reg, addr)
++#define user_sh(reg, addr) kernel_sh(reg, addr)
++#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr) user_sw(reg, addr)
+-#define user_ld(reg, addr) user_lw(reg, addr)
++#define user_sd(reg, addr) kernel_sw(reg, addr)
++#define user_ld(reg, addr) kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr) "sd " reg", " addr "\n"
+-#define user_ld(reg, addr) "ld " reg", " addr "\n"
++#define user_sd(reg, addr) kernel_sd(reg, addr)
++#define user_ld(reg, addr) kernel_ld(reg, addr)
+ #endif /* CONFIG_32BIT */
+
+ #endif /* CONFIG_EVA */
+
+ #else /* __ASSEMBLY__ */
+
++#define kernel_cache(op, base) cache op, base
++#define kernel_ll(reg, addr) ll reg, addr
++#define kernel_sc(reg, addr) sc reg, addr
++#define kernel_lw(reg, addr) lw reg, addr
++#define kernel_lwl(reg, addr) lwl reg, addr
++#define kernel_lwr(reg, addr) lwr reg, addr
++#define kernel_lh(reg, addr) lh reg, addr
++#define kernel_lb(reg, addr) lb reg, addr
++#define kernel_lbu(reg, addr) lbu reg, addr
++#define kernel_sw(reg, addr) sw reg, addr
++#define kernel_swl(reg, addr) swl reg, addr
++#define kernel_swr(reg, addr) swr reg, addr
++#define kernel_sh(reg, addr) sh reg, addr
++#define kernel_sb(reg, addr) sb reg, addr
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr) user_sw(reg, addr)
++#define kernel_ld(reg, addr) user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr) sd reg, addr
++#define kernel_ld(reg, addr) ld reg, addr
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+
+ #define __BUILD_EVA_INSN(insn, reg, addr) \
+@@ -101,31 +154,27 @@
+ #define user_sd(reg, addr) user_sw(reg, addr)
+ #else
+
+-#define user_cache(op, base) cache op, base
+-#define user_ll(reg, addr) ll reg, addr
+-#define user_sc(reg, addr) sc reg, addr
+-#define user_lw(reg, addr) lw reg, addr
+-#define user_lwl(reg, addr) lwl reg, addr
+-#define user_lwr(reg, addr) lwr reg, addr
+-#define user_lh(reg, addr) lh reg, addr
+-#define user_lb(reg, addr) lb reg, addr
+-#define user_lbu(reg, addr) lbu reg, addr
+-#define user_sw(reg, addr) sw reg, addr
+-#define user_swl(reg, addr) swl reg, addr
+-#define user_swr(reg, addr) swr reg, addr
+-#define user_sh(reg, addr) sh reg, addr
+-#define user_sb(reg, addr) sb reg, addr
++#define user_cache(op, base) kernel_cache(op, base)
++#define user_ll(reg, addr) kernel_ll(reg, addr)
++#define user_sc(reg, addr) kernel_sc(reg, addr)
++#define user_lw(reg, addr) kernel_lw(reg, addr)
++#define user_lwl(reg, addr) kernel_lwl(reg, addr)
++#define user_lwr(reg, addr) kernel_lwr(reg, addr)
++#define user_lh(reg, addr) kernel_lh(reg, addr)
++#define user_lb(reg, addr) kernel_lb(reg, addr)
++#define user_lbu(reg, addr) kernel_lbu(reg, addr)
++#define user_sw(reg, addr) kernel_sw(reg, addr)
++#define user_swl(reg, addr) kernel_swl(reg, addr)
++#define user_swr(reg, addr) kernel_swr(reg, addr)
++#define user_sh(reg, addr) kernel_sh(reg, addr)
++#define user_sb(reg, addr) kernel_sb(reg, addr)
+
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr) user_sw(reg, addr)
+-#define user_ld(reg, addr) user_lw(reg, addr)
++#define user_sd(reg, addr) kernel_sw(reg, addr)
++#define user_ld(reg, addr) kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr) sd reg, addr
+-#define user_ld(reg, addr) ld reg, addr
++#define user_sd(reg, addr) kernel_sd(reg, addr)
++#define user_ld(reg, addr) kernel_sd(reg, addr)
+ #endif /* CONFIG_32BIT */
+
+ #endif /* CONFIG_EVA */
+diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
+index dd083e9..9f26b07 100644
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
+ }
+ disable_msa();
+ clear_thread_flag(TIF_USEDMSA);
++ __disable_fpu();
+ } else if (is_fpu_owner()) {
+ if (save)
+ _save_fp(current);
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index ac4fc71..f722b05 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -322,6 +322,7 @@ enum mips_mmu_types {
+ #define T_TRAP 13 /* Trap instruction */
+ #define T_VCEI 14 /* Virtual coherency exception */
+ #define T_FPE 15 /* Floating point exception */
++#define T_MSADIS 21 /* MSA disabled exception */
+ #define T_WATCH 23 /* Watch address reference */
+ #define T_VCED 31 /* Virtual coherency data */
+
+@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
+ int (*handle_syscall)(struct kvm_vcpu *vcpu);
+ int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+ int (*handle_break)(struct kvm_vcpu *vcpu);
++ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+ int (*vm_init)(struct kvm *kvm);
+ int (*vcpu_init)(struct kvm_vcpu *vcpu);
+ int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index bbb6969..7659da2 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -109,10 +109,11 @@ static u32 unaligned_action;
+ extern void show_registers(struct pt_regs *regs);
+
+ #ifdef __BIG_ENDIAN
+-#define LoadHW(addr, value, res) \
++#define _LoadHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+- "1:\t"user_lb("%0", "0(%2)")"\n" \
+- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:\t"type##_lb("%0", "0(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "(%2)")"\n" \
+- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+- "1:"user_lb("%0", "0(%2)")"\n\t" \
+- "2:"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:"type##_lb("%0", "0(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "2(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "3(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+-#define LoadHWU(addr, value, res) \
++#define _LoadHWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_lbu("%0", "0(%2)")"\n" \
+- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "(%2)")"\n" \
+- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+@@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, (%2)\n" \
+ "2:\tldr\t%0, 7(%2)\n\t" \
+@@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_lbu("%0", "0(%2)")"\n\t" \
+- "2:"user_lbu("$1", "1(%2)")"\n\t" \
++ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "2(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "3(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+
+-#define StoreHW(addr, value, res) \
++#define _StoreHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_sb("%1", "1(%2)")"\n" \
++ "1:\t"type##_sb("%1", "1(%2)")"\n" \
+ "srl\t$1, %1, 0x8\n" \
+- "2:\t"user_sb("$1", "0(%2)")"\n" \
++ "2:\t"type##_sb("$1", "0(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+@@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_swl("%1", "(%2)")"\n" \
+- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
++ "1:\t"type##_swl("%1", "(%2)")"\n" \
++ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1,(%2)\n" \
+ "2:\tsdr\t%1, 7(%2)\n\t" \
+@@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_sb("%1", "3(%2)")"\n\t" \
++ "1:"type##_sb("%1", "3(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+- "2:"user_sb("$1", "2(%2)")"\n\t" \
++ "2:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "3:"user_sb("$1", "1(%2)")"\n\t" \
++ "3:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "4:"user_sb("$1", "0(%2)")"\n\t" \
++ "4:"type##_sb("$1", "0(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+@@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
+
+ #define StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+ #else /* __BIG_ENDIAN */
+
+-#define LoadHW(addr, value, res) \
++#define _LoadHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ (".set\tnoat\n" \
+- "1:\t"user_lb("%0", "1(%2)")"\n" \
+- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++ "1:\t"type##_lb("%0", "1(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "3(%2)")"\n" \
+- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no lwl instruction */
+-#define LoadW(addr, value, res) \
++#define _LoadW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n" \
+ ".set\tnoat\n\t" \
+- "1:"user_lb("%0", "3(%2)")"\n\t" \
+- "2:"user_lbu("$1", "2(%2)")"\n\t" \
++ "1:"type##_lb("%0", "3(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "1(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "0(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+
+-#define LoadHWU(addr, value, res) \
++#define _LoadHWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_lbu("%0", "1(%2)")"\n" \
+- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
++ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
++ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+ #ifndef CONFIG_CPU_MIPSR6
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_lwl("%0", "3(%2)")"\n" \
+- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
++ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
++ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+@@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, 7(%2)\n" \
+ "2:\tldr\t%0, (%2)\n\t" \
+@@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has not lwl and ldl instructions */
+-#define LoadWU(addr, value, res) \
++#define _LoadWU(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_lbu("%0", "3(%2)")"\n\t" \
+- "2:"user_lbu("$1", "2(%2)")"\n\t" \
++ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
++ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "3:"user_lbu("$1", "1(%2)")"\n\t" \
++ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+- "4:"user_lbu("$1", "0(%2)")"\n\t" \
++ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+@@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define LoadDW(addr, value, res) \
++#define _LoadDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+- : "r" (addr), "i" (-EFAULT));
++ : "r" (addr), "i" (-EFAULT)); \
++} while(0)
+ #endif /* CONFIG_CPU_MIPSR6 */
+
+-#define StoreHW(addr, value, res) \
++#define _StoreHW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+- "1:\t"user_sb("%1", "0(%2)")"\n" \
++ "1:\t"type##_sb("%1", "0(%2)")"\n" \
+ "srl\t$1,%1, 0x8\n" \
+- "2:\t"user_sb("$1", "1(%2)")"\n" \
++ "2:\t"type##_sb("$1", "1(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+@@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT));\
++} while(0)
++
+ #ifndef CONFIG_CPU_MIPSR6
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+- "1:\t"user_swl("%1", "3(%2)")"\n" \
+- "2:\t"user_swr("%1", "(%2)")"\n\t" \
++ "1:\t"type##_swl("%1", "3(%2)")"\n" \
++ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+@@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1, 7(%2)\n" \
+ "2:\tsdr\t%1, (%2)\n\t" \
+@@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs);
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+- : "r" (value), "r" (addr), "i" (-EFAULT));
++ : "r" (value), "r" (addr), "i" (-EFAULT)); \
++} while(0)
++
+ #else
+ /* MIPSR6 has no swl and sdl instructions */
+-#define StoreW(addr, value, res) \
++#define _StoreW(addr, value, res, type) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+- "1:"user_sb("%1", "0(%2)")"\n\t" \
++ "1:"type##_sb("%1", "0(%2)")"\n\t" \
+ "srl\t$1, %1, 0x8\n\t" \
+- "2:"user_sb("$1", "1(%2)")"\n\t" \
++ "2:"type##_sb("$1", "1(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "3:"user_sb("$1", "2(%2)")"\n\t" \
++ "3:"type##_sb("$1", "2(%2)")"\n\t" \
+ "srl\t$1, $1, 0x8\n\t" \
+- "4:"user_sb("$1", "3(%2)")"\n\t" \
++ "4:"type##_sb("$1", "3(%2)")"\n\t" \
+ ".set\tpop\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+@@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
+
+-#define StoreDW(addr, value, res) \
++#define _StoreDW(addr, value, res) \
++do { \
+ __asm__ __volatile__ ( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+@@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs);
+ ".previous" \
+ : "=&r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+- : "memory");
++ : "memory"); \
++} while(0)
++
+ #endif /* CONFIG_CPU_MIPSR6 */
+ #endif
+
++#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
++#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
++#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
++#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
++#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
++#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
++#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
++#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
++#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
++
++#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
++#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
++#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
++#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
++#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
++
+ static void emulate_load_store_insn(struct pt_regs *regs,
+ void __user *addr, unsigned int __user *pc)
+ {
+@@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadHW(addr, value, res);
++ LoadHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadW(addr, value, res);
++ LoadWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ set_fs(seg);
+ goto sigbus;
+ }
+- LoadHWU(addr, value, res);
++ LoadHWUE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+- StoreHW(addr, value, res);
++ StoreHWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+- StoreW(addr, value, res);
++ StoreWE(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+@@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+- LoadHW(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadHW(addr, value, res);
++ else
++ LoadHWE(addr, value, res);
++ } else {
++ LoadHW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+- LoadW(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadW(addr, value, res);
++ else
++ LoadWE(addr, value, res);
++ } else {
++ LoadW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+- LoadHWU(addr, value, res);
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ LoadHWU(addr, value, res);
++ else
++ LoadHWUE(addr, value, res);
++ } else {
++ LoadHWU(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+@@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+- StoreHW(addr, value, res);
++
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ StoreHW(addr, value, res);
++ else
++ StoreHWE(addr, value, res);
++ } else {
++ StoreHW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ break;
+@@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+- StoreW(addr, value, res);
++
++ if (config_enabled(CONFIG_EVA)) {
++ if (segment_eq(get_fs(), get_ds()))
++ StoreW(addr, value, res);
++ else
++ StoreWE(addr, value, res);
++ } else {
++ StoreW(addr, value, res);
++ }
++
+ if (res)
+ goto fault;
+ break;
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index fb3e8df..838d3a6 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+ case T_SYSCALL:
+ case T_BREAK:
+ case T_RES_INST:
++ case T_MSADIS:
+ break;
+
+ case T_COP_UNUSABLE:
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index c9eccf5..f5e7dda 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ ret = kvm_mips_callbacks->handle_break(vcpu);
+ break;
+
++ case T_MSADIS:
++ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
++ break;
++
+ default:
+ kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
+ exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
+index fd7257b..4372cc8 100644
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+ return ret;
+ }
+
++static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
++{
++ struct kvm_run *run = vcpu->run;
++ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
++ unsigned long cause = vcpu->arch.host_cp0_cause;
++ enum emulation_result er = EMULATE_DONE;
++ int ret = RESUME_GUEST;
++
++ /* No MSA supported in guest, guest reserved instruction exception */
++ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
++
++ switch (er) {
++ case EMULATE_DONE:
++ ret = RESUME_GUEST;
++ break;
++
++ case EMULATE_FAIL:
++ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++ ret = RESUME_HOST;
++ break;
++
++ default:
++ BUG();
++ }
++ return ret;
++}
++
+ static int kvm_trap_emul_vm_init(struct kvm *kvm)
+ {
+ return 0;
+@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ .handle_syscall = kvm_trap_emul_handle_syscall,
+ .handle_res_inst = kvm_trap_emul_handle_res_inst,
+ .handle_break = kvm_trap_emul_handle_break,
++ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+
+ .vm_init = kvm_trap_emul_vm_init,
+ .vcpu_init = kvm_trap_emul_vcpu_init,
+diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
+index 21221ed..0f75b6b 100644
+--- a/arch/mips/loongson/loongson-3/irq.c
++++ b/arch/mips/loongson/loongson-3/irq.c
+@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
+
+ static struct irqaction cascade_irqaction = {
+ .handler = no_action,
++ .flags = IRQF_NO_SUSPEND,
+ .name = "cascade",
+ };
+
+diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
+index 8fddd2cd..efe366d 100644
+--- a/arch/mips/mti-malta/malta-memory.c
++++ b/arch/mips/mti-malta/malta-memory.c
+@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
+ pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+ physical_memsize = 0x02000000;
+ } else {
++ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
++ pr_warn("Unsupported memsize value (0x%lx) detected! "
++ "Using 0x10000000 (256M) instead\n",
++ memsize);
++ memsize = 256 << 20;
++ }
+ /* If ememsize is set, then set physical_memsize to that */
+ physical_memsize = ememsize ? : memsize;
+ }
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c82..e7567c8 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+
+ LEAF(swsusp_arch_resume)
++ /* Avoid TLB mismatch during and after kernel resume */
++ jal local_flush_tlb_all
+ PTR_L t0, restore_pblist
+ 0:
+ PTR_L t1, PBE_ADDRESS(t0) /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
+- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index ae77b7e..c641983 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -61,12 +61,22 @@ struct cache_type_info {
+ };
+
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED 0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA 2
++#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA 3
+
+ static const struct cache_type_info cache_type_info[] = {
+ {
++ /* Embedded systems that use cache-size, cache-block-size,
++ * etc. for the Unified (typically L2) cache. */
++ .name = "Unified",
++ .size_prop = "cache-size",
++ .line_size_props = { "cache-line-size",
++ "cache-block-size", },
++ .nr_sets_prop = "cache-sets",
++ },
++ {
+ /* PowerPC Processor binding says the [di]-cache-*
+ * must be equal on unified caches, so just use
+ * d-cache properties. */
+@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ struct cache *iter;
+
+- if (cache->type == CACHE_TYPE_UNIFIED)
++ if (cache->type == CACHE_TYPE_UNIFIED ||
++ cache->type == CACHE_TYPE_UNIFIED_D)
+ return cache;
+
+ list_for_each_entry(iter, &cache_list, list)
+@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ return of_get_property(np, "cache-unified", NULL);
+ }
+
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+- int level)
++/*
++ * Unified caches can have two different sets of tags. Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc. Check on initialization for which type we have, and
++ * return the appropriate structure type. Assume it's embedded if it isn't
++ * open firmware. If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+- struct cache *cache;
++ return of_get_property(np,
++ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+- return cache;
++ return new_cache(cache_is_unified_d(node), level, node);
+ }
+
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 7e408bf..cecbe00 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+ pmd = pmd_offset(pud, start);
+ pud_clear(pud);
+ pmd_free_tlb(tlb, pmd, start);
++ mm_dec_nr_pmds(tlb->mm);
+ }
+
+ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda..ead5535 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+- for (;;) {
++ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ fp = (unsigned long __user *) sp;
+ if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ return;
+diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
+index 4c11421..3af8324 100644
+--- a/arch/powerpc/platforms/cell/interrupt.c
++++ b/arch/powerpc/platforms/cell/interrupt.c
+@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
+
+ void iic_setup_cpu(void)
+ {
+- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
++ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
+ }
+
+ u8 iic_get_target_id(int cpu)
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index c7c8720..63db1b0 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
++ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+
+ mb();
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 6c9ff2b..1d9369e 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
+ region.start += phb->ioda.io_segsize;
+ index++;
+ }
+- } else if (res->flags & IORESOURCE_MEM) {
++ } else if ((res->flags & IORESOURCE_MEM) &&
++ !pnv_pci_is_mem_pref_64(res->flags)) {
+ region.start = res->start -
+ hose->mem_offset[0] -
+ phb->ioda.m32_pci_base;
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index 1c4c5ac..d3236c9 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ /* Always save lowcore pages (LC protection might be enabled). */
+ if (pfn <= LC_PAGES)
+@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++ if (pfn >= stext_pfn && pfn <= eshared_pfn)
++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ if (tprot(PFN_PHYS(pfn)))
+ return 1;
+ return 0;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 073b5f3..e7bc2fd 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -17,6 +17,7 @@
+ #include <linux/signal.h>
+ #include <linux/slab.h>
+ #include <linux/bitmap.h>
++#include <linux/vmalloc.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/uaccess.h>
+ #include <asm/sclp.h>
+@@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ return rc;
+ }
+
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
++int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti)
+ {
+- __inject_vm(kvm, inti);
++ return __inject_vm(kvm, inti);
+ }
+
+ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ spin_unlock(&fi->lock);
+ }
+
+-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
+- u8 *addr)
++static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
++ struct kvm_s390_irq *irq)
+ {
+- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
+- struct kvm_s390_irq irq = {0};
+-
+- irq.type = inti->type;
++ irq->type = inti->type;
+ switch (inti->type) {
+ case KVM_S390_INT_PFAULT_INIT:
+ case KVM_S390_INT_PFAULT_DONE:
+ case KVM_S390_INT_VIRTIO:
+ case KVM_S390_INT_SERVICE:
+- irq.u.ext = inti->ext;
++ irq->u.ext = inti->ext;
+ break;
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+- irq.u.io = inti->io;
++ irq->u.io = inti->io;
+ break;
+ case KVM_S390_MCHK:
+- irq.u.mchk = inti->mchk;
++ irq->u.mchk = inti->mchk;
+ break;
+- default:
+- return -EINVAL;
+ }
+-
+- if (copy_to_user(uptr, &irq, sizeof(irq)))
+- return -EFAULT;
+-
+- return 0;
+ }
+
+-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
++static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
+ {
+ struct kvm_s390_interrupt_info *inti;
+ struct kvm_s390_float_interrupt *fi;
++ struct kvm_s390_irq *buf;
++ int max_irqs;
+ int ret = 0;
+ int n = 0;
+
++ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
++ return -EINVAL;
++
++ /*
++ * We are already using -ENOMEM to signal
++ * userspace it may retry with a bigger buffer,
++ * so we need to use something else for this case
++ */
++ buf = vzalloc(len);
++ if (!buf)
++ return -ENOBUFS;
++
++ max_irqs = len / sizeof(struct kvm_s390_irq);
++
+ fi = &kvm->arch.float_int;
+ spin_lock(&fi->lock);
+-
+ list_for_each_entry(inti, &fi->list, list) {
+- if (len < sizeof(struct kvm_s390_irq)) {
++ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ break;
+ }
+- ret = copy_irq_to_user(inti, buf);
+- if (ret)
+- break;
+- buf += sizeof(struct kvm_s390_irq);
+- len -= sizeof(struct kvm_s390_irq);
++ inti_to_irq(inti, &buf[n]);
+ n++;
+ }
+-
+ spin_unlock(&fi->lock);
++ if (!ret && n > 0) {
++ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
++ ret = -EFAULT;
++ }
++ vfree(buf);
+
+ return ret < 0 ? ret : n;
+ }
+@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+
+ switch (attr->group) {
+ case KVM_DEV_FLIC_GET_ALL_IRQS:
+- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
++ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
+ attr->attr);
+ break;
+ default:
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index c34109a..6995a30 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ u64 cr6, u64 schid);
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
+- struct kvm_s390_interrupt_info *inti);
++int kvm_s390_reinject_io_int(struct kvm *kvm,
++ struct kvm_s390_interrupt_info *inti);
+ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+
+ /* implemented in intercept.c */
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 3511169..b982fbc 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ struct kvm_s390_interrupt_info *inti;
+ unsigned long len;
+ u32 tpi_data[3];
+- int cc, rc;
++ int rc;
+ u64 addr;
+
+- rc = 0;
+ addr = kvm_s390_get_base_disp_s(vcpu);
+ if (addr & 3)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+- cc = 0;
++
+ inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
+- if (!inti)
+- goto no_interrupt;
+- cc = 1;
++ if (!inti) {
++ kvm_s390_set_psw_cc(vcpu, 0);
++ return 0;
++ }
++
+ tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
+ tpi_data[1] = inti->io.io_int_parm;
+ tpi_data[2] = inti->io.io_int_word;
+@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ */
+ len = sizeof(tpi_data) - 4;
+ rc = write_guest(vcpu, addr, &tpi_data, len);
+- if (rc)
+- return kvm_s390_inject_prog_cond(vcpu, rc);
++ if (rc) {
++ rc = kvm_s390_inject_prog_cond(vcpu, rc);
++ goto reinject_interrupt;
++ }
+ } else {
+ /*
+ * Store the three-word I/O interruption code into
+ * the appropriate lowcore area.
+ */
+ len = sizeof(tpi_data);
+- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
++ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
++ /* failed writes to the low core are not recoverable */
+ rc = -EFAULT;
++ goto reinject_interrupt;
++ }
+ }
++
++ /* irq was successfully handed to the guest */
++ kfree(inti);
++ kvm_s390_set_psw_cc(vcpu, 1);
++ return 0;
++reinject_interrupt:
+ /*
+ * If we encounter a problem storing the interruption code, the
+ * instruction is suppressed from the guest's view: reinject the
+ * interrupt.
+ */
+- if (!rc)
++ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
+ kfree(inti);
+- else
+- kvm_s390_reinject_io_int(vcpu->kvm, inti);
+-no_interrupt:
+- /* Set condition code and we're done. */
+- if (!rc)
+- kvm_s390_set_psw_cc(vcpu, cc);
++ rc = -EFAULT;
++ }
++ /* don't set the cc, a pgm irq was injected or we drop to user space */
+ return rc ? -EFAULT : 0;
+ }
+
+@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
++ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
+index 47f29b1..e7814b7 100644
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -69,7 +69,7 @@ struct insn {
+ const insn_byte_t *next_byte;
+ };
+
+-#define MAX_INSN_SIZE 16
++#define MAX_INSN_SIZE 15
+
+ #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+ #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index a1410db..653dfa7 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ :: "a" (eax), "c" (ecx));
+ }
+
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++ trace_hardirqs_on();
++ /* "mwait %eax, %ecx;" */
++ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
++ :: "a" (eax), "c" (ecx));
++}
++
+ /*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
+index d6b078e..25b1cc0 100644
+--- a/arch/x86/include/asm/pvclock.h
++++ b/arch/x86/include/asm/pvclock.h
+@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+
+ struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
++ u32 migrate_count;
+ } __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+ #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 0739833..666bcf1 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
++ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 046e2d6..a388bb8 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -24,6 +24,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/idle.h>
+ #include <asm/uaccess.h>
++#include <asm/mwait.h>
+ #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
+ #include <asm/debugreg.h>
+@@ -399,6 +400,53 @@ static void amd_e400_idle(void)
+ default_idle();
+ }
+
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++ if (c->x86_vendor != X86_VENDOR_INTEL)
++ return 0;
++
++ if (!cpu_has(c, X86_FEATURE_MWAIT))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++ if (!current_set_polling_and_test()) {
++ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
++ smp_mb(); /* quirk */
++ clflush((void *)&current_thread_info()->flags);
++ smp_mb(); /* quirk */
++ }
++
++ __monitor((void *)&current_thread_info()->flags, 0, 0);
++ if (!need_resched())
++ __sti_mwait(0, 0);
++ else
++ local_irq_enable();
++ } else {
++ local_irq_enable();
++ }
++ __current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ pr_info("using AMD E400 aware idle routine\n");
+ x86_idle = amd_e400_idle;
++ } else if (prefer_mwait_c1_over_halt(c)) {
++ pr_info("using mwait in idle threads\n");
++ x86_idle = mwait_idle;
+ } else
+ x86_idle = default_idle;
+ }
+diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
+index 2f355d2..e5ecd20 100644
+--- a/arch/x86/kernel/pvclock.c
++++ b/arch/x86/kernel/pvclock.c
+@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
+ set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+ }
+
++static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
++
++static struct pvclock_vsyscall_time_info *
++pvclock_get_vsyscall_user_time_info(int cpu)
++{
++ if (!pvclock_vdso_info) {
++ BUG();
++ return NULL;
++ }
++
++ return &pvclock_vdso_info[cpu];
++}
++
++struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
++{
++ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
++}
++
+ #ifdef CONFIG_X86_64
++static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
++ void *v)
++{
++ struct task_migration_notifier *mn = v;
++ struct pvclock_vsyscall_time_info *pvti;
++
++ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
++
++ /* this is NULL when pvclock vsyscall is not initialized */
++ if (unlikely(pvti == NULL))
++ return NOTIFY_DONE;
++
++ pvti->migrate_count++;
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block pvclock_migrate = {
++ .notifier_call = pvclock_task_migrate,
++};
++
+ /*
+ * Initialize the generic pvclock vsyscall state. This will allocate
+ * a/some page(s) for the per-vcpu pvclock information, set up a
+@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+
+ WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
+
++ pvclock_vdso_info = i;
++
+ for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
+ __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
+ __pa(i) + (idx*PAGE_SIZE),
+ PAGE_KERNEL_VVAR);
+ }
+
++
++ register_task_migration_notifier(&pvclock_migrate);
++
+ return 0;
+ }
+ #endif
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ae4f6d3..a60bd3a 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+
+ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
+- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
++ /*
++ * Pass through host's Machine Check Enable value to hw_cr4, which
++ * is in force while we are in guest mode. Do not let guests control
++ * this bit, even if host CR4.MCE == 0.
++ */
++ unsigned long hw_cr4 =
++ (cr4_read_shadow() & X86_CR4_MCE) |
++ (cr4 & ~X86_CR4_MCE) |
++ (to_vmx(vcpu)->rmode.vm86_active ?
++ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+
+ if (cr4 & X86_CR4_VMXE) {
+ /*
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 32bf19e..e222ba5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque)
+ kvm_set_mmio_spte_mask();
+
+ kvm_x86_ops = ops;
+- kvm_init_msr_list();
+
+ kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
+ PT_DIRTY_MASK, PT64_NX_MASK, 0);
+@@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void)
+
+ int kvm_arch_hardware_setup(void)
+ {
+- return kvm_x86_ops->hardware_setup();
++ int r;
++
++ r = kvm_x86_ops->hardware_setup();
++ if (r != 0)
++ return r;
++
++ kvm_init_msr_list();
++ return 0;
+ }
+
+ void kvm_arch_hardware_unsetup(void)
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 1313ae6..85994f5 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -52,6 +52,13 @@
+ */
+ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+ {
++ /*
++ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
++ * even if the input buffer is long enough to hold them.
++ */
++ if (buf_len > MAX_INSN_SIZE)
++ buf_len = MAX_INSN_SIZE;
++
+ memset(insn, 0, sizeof(*insn));
+ insn->kaddr = kaddr;
+ insn->end_kaddr = kaddr + buf_len;
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 1f33b3d..0a42327 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
+ clac();
+
+ /* If the destination is a kernel buffer, we always clear the end */
+- if ((unsigned long)to >= TASK_SIZE_MAX)
++ if (!__addr_ok(to))
+ memset(to, 0, len);
+ return len;
+ }
+diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
+index 9793322..40d2473 100644
+--- a/arch/x86/vdso/vclock_gettime.c
++++ b/arch/x86/vdso/vclock_gettime.c
+@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
+ cycle_t ret;
+ u64 last;
+ u32 version;
++ u32 migrate_count;
+ u8 flags;
+ unsigned cpu, cpu1;
+
+
+ /*
+- * Note: hypervisor must guarantee that:
+- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+- * 2. that per-CPU pvclock time info is updated if the
+- * underlying CPU changes.
+- * 3. that version is increased whenever underlying CPU
+- * changes.
+- *
++ * When looping to get a consistent (time-info, tsc) pair, we
++ * also need to deal with the possibility we can switch vcpus,
++ * so make sure we always re-fetch time-info for the current vcpu.
+ */
+ do {
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
+ * __getcpu() calls (Gleb).
+ */
+
+- pvti = get_pvti(cpu);
++ /* Make sure migrate_count will change if we leave the VCPU. */
++ do {
++ pvti = get_pvti(cpu);
++ migrate_count = pvti->migrate_count;
++
++ cpu1 = cpu;
++ cpu = __getcpu() & VGETCPU_CPU_MASK;
++ } while (unlikely(cpu != cpu1));
+
+ version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
+
+ /*
+ * Test we're still on the cpu as well as the version.
+- * We could have been migrated just after the first
+- * vgetcpu but before fetching the version, so we
+- * wouldn't notice a version change.
++ * - We must read TSC of pvti's VCPU.
++ * - KVM doesn't follow the versioning protocol, so data could
++ * change before version if we left the VCPU.
+ */
+- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+- } while (unlikely(cpu != cpu1 ||
+- (pvti->pvti.version & 1) ||
+- pvti->pvti.version != version));
++ smp_rmb();
++ } while (unlikely((pvti->pvti.version & 1) ||
++ pvti->pvti.version != version ||
++ pvti->migrate_count != migrate_count));
+
+ if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
+ *mode = VCLOCK_NONE;
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index e31d494..87be10e 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
+
+ If unsure, leave the default value here.
+
++config XTFPGA_LCD
++ bool "Enable XTFPGA LCD driver"
++ depends on XTENSA_PLATFORM_XTFPGA
++ default n
++ help
++ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++ progress messages there during bootup/shutdown. It may be useful
++ during board bringup.
++
++ If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++ hex "XTFPGA LCD base address"
++ depends on XTFPGA_LCD
++ default "0x0d0c0000"
++ help
++ Base address of the LCD controller inside KIO region.
++ Different boards from XTFPGA family have LCD controller at different
++ addresses. Please consult prototyping user guide for your board for
++ the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++ bool "Use 8-bit access to XTFPGA LCD"
++ depends on XTFPGA_LCD
++ default n
++ help
++ LCD may be connected with 4- or 8-bit interface, 8-bit access may
++ only be used with 8-bit interface. Please consult prototyping user
++ guide for your board for the correct interface width.
++
+ endmenu
+
+ menu "Executable file formats"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index db5bb72..62d8465 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at 325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range 326
++#define __NR_sync_file_range2 326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open 327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index d05f8fe..17b1ef3 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
+ {
+ struct iss_net_private *lp = (struct iss_net_private *)priv;
+
+- spin_lock(&lp->lock);
+ iss_net_poll();
++ spin_lock(&lp->lock);
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+ spin_unlock(&lp->lock);
+ }
+@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
+ struct iss_net_private *lp = netdev_priv(dev);
+ int err;
+
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ err = lp->tp.open(lp);
+ if (err < 0)
+@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+- spin_lock(&opened_lock);
++ spin_unlock_bh(&lp->lock);
++ spin_lock_bh(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+- spin_unlock(&opened_lock);
++ spin_unlock_bh(&opened_lock);
++ spin_lock_bh(&lp->lock);
+
+ init_timer(&lp->timer);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+ out:
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return err;
+ }
+
+@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
+
+ lp->tp.close(lp);
+
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+- unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+- spin_lock_irqsave(&lp->lock, flags);
++ spin_lock_bh(&lp->lock);
+
+ len = lp->tp.write(lp, &skb);
+
+@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ }
+
+- spin_unlock_irqrestore(&lp->lock, flags);
++ spin_unlock_bh(&lp->lock);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
+ *lp = (struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
+ .dev = dev,
+ .index = index,
+- };
++ };
+
++ spin_lock_init(&lp->lock);
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206..7839d38 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+
+-obj-y = setup.o lcd.o
++obj-y += setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index 6edd20b..4e0af26 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -40,9 +40,6 @@
+
+ /* UART */
+ #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e43564..4c8541e 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301..4dc0c1b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+
+-#define LCD_PAUSE_ITERATIONS 4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR 0x1
+ #define LCD_DISPLAY_ON 0xc
+
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT 0x38
++#define LCD_DISPLAY_MODE4BIT 0x28
+ #define LCD_DISPLAY_POS 0x80
+ #define LCD_SHIFT_LEFT 0x18
+ #define LCD_SHIFT_RIGHT 0x1c
+
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*addr) = data;
++#else
++ ACCESS_ONCE(*addr) = data & 0xf0;
++ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++ udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++ udelay(50);
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_CLEAR;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ udelay(100);
+ while (*str != 0) {
+- *LCD_DATA_ADDR = *str;
++ lcd_put_byte(LCD_DATA_ADDR, *str);
+ udelay(200);
+ str++;
+ }
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+
+ void lcd_shiftleft(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ udelay(50);
+ }
+
+ void lcd_shiftright(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ udelay(50);
+ }
+
+diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
+index 5ed064e..ccf7932 100644
+--- a/drivers/acpi/acpica/evgpe.c
++++ b/drivers/acpi/acpica/evgpe.c
+@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
+ ACPI_SET_BIT(gpe_register_info->enable_for_run,
+ (u8)register_bit);
+ }
++ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
+
+ return_ACPI_STATUS(AE_OK);
+ }
+@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+
+ /* Enable the requested GPE */
+
+- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
++ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ return_ACPI_STATUS(status);
+ }
+
+@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+ if (ACPI_SUCCESS(status)) {
+ status =
+ acpi_hw_low_set_gpe(gpe_event_info,
+- ACPI_GPE_DISABLE_SAVE);
++ ACPI_GPE_DISABLE);
+ }
+
+ if (ACPI_FAILURE(status)) {
+diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
+index 84bc550..af6514e 100644
+--- a/drivers/acpi/acpica/hwgpe.c
++++ b/drivers/acpi/acpica/hwgpe.c
+@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
++ * The enable_mask field of the involved GPE register must be
++ * updated by the caller if necessary.
+ *
+ ******************************************************************************/
+
+@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
+ /* Set or clear just the bit that corresponds to this GPE */
+
+ register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
+- switch (action & ~ACPI_GPE_SAVE_MASK) {
++ switch (action) {
+ case ACPI_GPE_CONDITIONAL_ENABLE:
+
+ /* Only enable if the corresponding enable_mask bit is set */
+@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
+ /* Write the updated enable mask */
+
+ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
+- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
+- gpe_register_info->enable_mask = (u8)enable_mask;
+- }
+ return (status);
+ }
+
+@@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
+ {
+ acpi_status status;
+
++ gpe_register_info->enable_mask = enable_mask;
+ status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
+- if (ACPI_SUCCESS(status)) {
+- gpe_register_info->enable_mask = enable_mask;
+- }
+ return (status);
+ }
+
+diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
+index 9bad45e..7fbc2b9 100644
+--- a/drivers/acpi/acpica/tbinstal.c
++++ b/drivers/acpi/acpica/tbinstal.c
+@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
+ */
+ acpi_tb_uninstall_table(&new_table_desc);
+ *table_index = i;
+- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(AE_OK);
+ }
+ }
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index bbca783..349f4fd 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+- mutex_lock(&adev->physical_node_lock);
++ /*
++ * acpi_container_offline() calls this for all of the container's
++ * children under the container's physical_node_lock lock.
++ */
++ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 876bae5..79bc203 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
+ goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+- goto out_groups;
++ goto out_id;
+ error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+- goto out_id;
++ goto out_groups;
+ error = sysfs_create_link(&dev->kobj,
+ &dev->bus->p->subsys.kobj, "subsystem");
+ if (error)
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 6e64563..9c2ba1c 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
+ return -ENOENT;
+ }
+
+- while (np && index < cache_leaves(cpu)) {
++ while (index < cache_leaves(cpu)) {
+ this_leaf = this_cpu_ci->info_list + index;
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
++ if (!np)
++ break;
+ this_leaf->of_node = np;
+ index++;
+ }
++
++ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
++ return -ENOENT;
++
+ return 0;
+ }
+
+@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
+ * will be set up here only if they are not populated already
+ */
+ ret = cache_shared_cpu_map_setup(cpu);
+- if (ret)
++ if (ret) {
++ pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
++ cpu);
+ goto free_ci;
++ }
+ return 0;
+
+ free_ci:
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 9421fed..e68ab79 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
++ /*
++ * The resources may pass trigger flags to the irqs that need
++ * to be set up. It so happens that the trigger flags for
++ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
++ * settings.
++ */
++ if (r && r->flags & IORESOURCE_BITS)
++ irqd_set_trigger_type(irq_get_irq_data(r->start),
++ r->flags & IORESOURCE_BITS);
+
+ return r ? r->start : -ENXIO;
+ #endif
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index de4c849..288547a 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x04F2, 0xAFF1) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 8bfc4c2..2c527da 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index e096e9c..283f00a 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
+ device_unregister(&chip->dev);
+ }
+
++static int tpm1_chip_register(struct tpm_chip *chip)
++{
++ int rc;
++
++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
++ return 0;
++
++ rc = tpm_sysfs_add_device(chip);
++ if (rc)
++ return rc;
++
++ rc = tpm_add_ppi(chip);
++ if (rc) {
++ tpm_sysfs_del_device(chip);
++ return rc;
++ }
++
++ chip->bios_dir = tpm_bios_log_setup(chip->devname);
++
++ return 0;
++}
++
++static void tpm1_chip_unregister(struct tpm_chip *chip)
++{
++ if (chip->flags & TPM_CHIP_FLAG_TPM2)
++ return;
++
++ if (chip->bios_dir)
++ tpm_bios_log_teardown(chip->bios_dir);
++
++ tpm_remove_ppi(chip);
++
++ tpm_sysfs_del_device(chip);
++}
++
+ /*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
+ {
+ int rc;
+
+- /* Populate sysfs for TPM1 devices. */
+- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+- rc = tpm_sysfs_add_device(chip);
+- if (rc)
+- goto del_misc;
+-
+- rc = tpm_add_ppi(chip);
+- if (rc)
+- goto del_sysfs;
+-
+- chip->bios_dir = tpm_bios_log_setup(chip->devname);
+- }
++ rc = tpm1_chip_register(chip);
++ if (rc)
++ return rc;
+
+ rc = tpm_dev_add_device(chip);
+ if (rc)
+- return rc;
++ goto out_err;
+
+ /* Make the chip available. */
+ spin_lock(&driver_lock);
+@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
+ chip->flags |= TPM_CHIP_FLAG_REGISTERED;
+
+ return 0;
+-del_sysfs:
+- tpm_sysfs_del_device(chip);
+-del_misc:
+- tpm_dev_del_device(chip);
++out_err:
++ tpm1_chip_unregister(chip);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_register);
+@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
+ spin_unlock(&driver_lock);
+ synchronize_rcu();
+
+- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+- if (chip->bios_dir)
+- tpm_bios_log_teardown(chip->bios_dir);
+- tpm_remove_ppi(chip);
+- tpm_sysfs_del_device(chip);
+- }
+-
++ tpm1_chip_unregister(chip);
+ tpm_dev_del_device(chip);
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_unregister);
+diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
+index a23ac0c..0b7c3e8 100644
+--- a/drivers/clk/at91/clk-usb.c
++++ b/drivers/clk/at91/clk-usb.c
+@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
+ }
+
+-static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+- unsigned long *parent_rate)
++static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
++ unsigned long rate,
++ unsigned long min_rate,
++ unsigned long max_rate,
++ unsigned long *best_parent_rate,
++ struct clk_hw **best_parent_hw)
+ {
+- unsigned long div;
++ struct clk *parent = NULL;
++ long best_rate = -EINVAL;
++ unsigned long tmp_rate;
++ int best_diff = -1;
++ int tmp_diff;
++ int i;
+
+- if (!rate)
+- return -EINVAL;
++ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
++ int div;
+
+- if (rate >= *parent_rate)
+- return *parent_rate;
++ parent = clk_get_parent_by_index(hw->clk, i);
++ if (!parent)
++ continue;
++
++ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
++ unsigned long tmp_parent_rate;
++
++ tmp_parent_rate = rate * div;
++ tmp_parent_rate = __clk_round_rate(parent,
++ tmp_parent_rate);
++ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
++ if (tmp_rate < rate)
++ tmp_diff = rate - tmp_rate;
++ else
++ tmp_diff = tmp_rate - rate;
++
++ if (best_diff < 0 || best_diff > tmp_diff) {
++ best_rate = tmp_rate;
++ best_diff = tmp_diff;
++ *best_parent_rate = tmp_parent_rate;
++ *best_parent_hw = __clk_get_hw(parent);
++ }
++
++ if (!best_diff || tmp_rate < rate)
++ break;
++ }
+
+- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+- if (div > SAM9X5_USB_MAX_DIV + 1)
+- div = SAM9X5_USB_MAX_DIV + 1;
++ if (!best_diff)
++ break;
++ }
+
+- return DIV_ROUND_CLOSEST(*parent_rate, div);
++ return best_rate;
+ }
+
+ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+
+ static const struct clk_ops at91sam9x5_usb_ops = {
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+- .round_rate = at91sam9x5_clk_usb_round_rate,
++ .determine_rate = at91sam9x5_clk_usb_determine_rate,
+ .get_parent = at91sam9x5_clk_usb_get_parent,
+ .set_parent = at91sam9x5_clk_usb_set_parent,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
+ .disable = at91sam9n12_clk_usb_disable,
+ .is_enabled = at91sam9n12_clk_usb_is_enabled,
+ .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+- .round_rate = at91sam9x5_clk_usb_round_rate,
++ .determine_rate = at91sam9x5_clk_usb_determine_rate,
+ .set_rate = at91sam9x5_clk_usb_set_rate,
+ };
+
+@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ init.ops = &at91sam9x5_usb_ops;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
++ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
++ CLK_SET_RATE_PARENT;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ init.ops = &at91sam9n12_usb_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+- init.flags = CLK_SET_RATE_GATE;
++ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+
+ usb->hw.init = &init;
+ usb->pmc = pmc;
+diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
+index 0039bd7..466f30c 100644
+--- a/drivers/clk/qcom/clk-rcg.c
++++ b/drivers/clk/qcom/clk-rcg.c
+@@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
+ return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
+ }
+
++/*
++ * This type of clock has a glitch-free mux that switches between the output of
++ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
++ * called we need to make sure that we don't switch to the M/N counter if it
++ * isn't clocking because the mux will get stuck and the clock will stop
++ * outputting a clock. This can happen if the framework isn't aware that this
++ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
++ * this we switch the mux in the enable/disable ops and reprogram the M/N
++ * counter in the set_rate op. We also make sure to switch away from the M/N
++ * counter in set_rate if software thinks the clock is off.
++ */
++static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_rate)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ const struct freq_tbl *f;
++ int ret;
++ u32 gfm = BIT(10);
++
++ f = qcom_find_freq(rcg->freq_tbl, rate);
++ if (!f)
++ return -EINVAL;
++
++ /* Switch to XO to avoid glitches */
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
++ ret = __clk_rcg_set_rate(rcg, f);
++ /* Switch back to M/N if it's clocking */
++ if (__clk_is_enabled(hw->clk))
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
++
++ return ret;
++}
++
++static int clk_rcg_lcc_enable(struct clk_hw *hw)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ u32 gfm = BIT(10);
++
++ /* Use M/N */
++ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
++}
++
++static void clk_rcg_lcc_disable(struct clk_hw *hw)
++{
++ struct clk_rcg *rcg = to_clk_rcg(hw);
++ u32 gfm = BIT(10);
++
++ /* Use XO */
++ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
++}
++
+ static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
+ {
+ struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
+@@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+
++const struct clk_ops clk_rcg_lcc_ops = {
++ .enable = clk_rcg_lcc_enable,
++ .disable = clk_rcg_lcc_disable,
++ .get_parent = clk_rcg_get_parent,
++ .set_parent = clk_rcg_set_parent,
++ .recalc_rate = clk_rcg_recalc_rate,
++ .determine_rate = clk_rcg_determine_rate,
++ .set_rate = clk_rcg_lcc_set_rate,
++};
++EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
++
+ const struct clk_ops clk_dyn_rcg_ops = {
+ .enable = clk_enable_regmap,
+ .is_enabled = clk_is_enabled_regmap,
+diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
+index 687e41f..d09d06b 100644
+--- a/drivers/clk/qcom/clk-rcg.h
++++ b/drivers/clk/qcom/clk-rcg.h
+@@ -96,6 +96,7 @@ struct clk_rcg {
+
+ extern const struct clk_ops clk_rcg_ops;
+ extern const struct clk_ops clk_rcg_bypass_ops;
++extern const struct clk_ops clk_rcg_lcc_ops;
+
+ #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
+
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 742acfa..381f274 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+- if (rcg->mnd_width && f->n)
++ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap,
+ rcg->cmd_rcgr + CFG_REG, mask, cfg);
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
+index cbdc31d..a015bb0 100644
+--- a/drivers/clk/qcom/gcc-ipq806x.c
++++ b/drivers/clk/qcom/gcc-ipq806x.c
+@@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ { 10800000, P_PXO, 1, 2, 5 },
+ { 15060000, P_PLL8, 1, 2, 51 },
+ { 24000000, P_PLL8, 4, 1, 4 },
++ { 25000000, P_PXO, 1, 0, 0 },
+ { 25600000, P_PLL8, 1, 1, 15 },
+- { 27000000, P_PXO, 1, 0, 0 },
+ { 48000000, P_PLL8, 4, 1, 2 },
+ { 51200000, P_PLL8, 1, 2, 15 },
+ { }
+diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
+index c9ff27b..a6d3a67 100644
+--- a/drivers/clk/qcom/lcc-ipq806x.c
++++ b/drivers/clk/qcom/lcc-ipq806x.c
+@@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = {
+ };
+
+ static struct freq_tbl clk_tbl_aif_osr[] = {
+- { 22050, P_PLL4, 1, 147, 20480 },
+- { 32000, P_PLL4, 1, 1, 96 },
+- { 44100, P_PLL4, 1, 147, 10240 },
+- { 48000, P_PLL4, 1, 1, 64 },
+- { 88200, P_PLL4, 1, 147, 5120 },
+- { 96000, P_PLL4, 1, 1, 32 },
+- { 176400, P_PLL4, 1, 147, 2560 },
+- { 192000, P_PLL4, 1, 1, 16 },
++ { 2822400, P_PLL4, 1, 147, 20480 },
++ { 4096000, P_PLL4, 1, 1, 96 },
++ { 5644800, P_PLL4, 1, 147, 10240 },
++ { 6144000, P_PLL4, 1, 1, 64 },
++ { 11289600, P_PLL4, 1, 147, 5120 },
++ { 12288000, P_PLL4, 1, 1, 32 },
++ { 22579200, P_PLL4, 1, 147, 2560 },
++ { 24576000, P_PLL4, 1, 1, 16 },
+ { },
+ };
+
+@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
+ };
+
+ static struct freq_tbl clk_tbl_ahbix[] = {
+- { 131072, P_PLL4, 1, 1, 3 },
++ { 131072000, P_PLL4, 1, 1, 3 },
+ { },
+ };
+
+@@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = {
+ .freq_tbl = clk_tbl_ahbix,
+ .clkr = {
+ .enable_reg = 0x38,
+- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
++ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "ahbix",
+ .parent_names = lcc_pxo_pll4,
+ .num_parents = 2,
+- .ops = &clk_rcg_ops,
+- .flags = CLK_SET_RATE_GATE,
++ .ops = &clk_rcg_lcc_ops,
+ },
+ },
+ };
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index 51462e8..714d6ba 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+ VPLL_LOCK, VPLL_CON0, NULL),
+ };
+
+-static void __init exynos4_core_down_clock(enum exynos4_soc soc)
++static void __init exynos4x12_core_down_clock(void)
+ {
+ unsigned int tmp;
+
+@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+ __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+ /*
+- * Disable the clock up feature on Exynos4x12, in case it was
+- * enabled by bootloader.
++ * Disable the clock up feature in case it was enabled by bootloader.
+ */
+- if (exynos4_soc == EXYNOS4X12)
+- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
++ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ }
+
+ /* register exynos4 clocks */
+@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
+ samsung_clk_register_alias(ctx, exynos4_aliases,
+ ARRAY_SIZE(exynos4_aliases));
+
+- exynos4_core_down_clock(soc);
++ if (soc == EXYNOS4X12)
++ exynos4x12_core_down_clock();
+ exynos4_clk_sleep_init();
+
+ samsung_clk_of_add_provider(np, ctx);
+diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
+index 9a893f2..23ce0af 100644
+--- a/drivers/clk/tegra/clk-tegra124.c
++++ b/drivers/clk/tegra/clk-tegra124.c
+@@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
+ 1, 2);
+ clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
+
+- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
++ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
+ clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+- clks[TEGRA124_CLK_PLLD_DSI] = clk;
++ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
+
+- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+- 0, 48, periph_clk_enb_refcnt);
++ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
++ clk_base, 0, 48,
++ periph_clk_enb_refcnt);
+ clks[TEGRA124_CLK_DSIA] = clk;
+
+- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+- 0, 82, periph_clk_enb_refcnt);
++ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
++ clk_base, 0, 82,
++ periph_clk_enb_refcnt);
+ clks[TEGRA124_CLK_DSIB] = clk;
+
+ /* emc mux */
+diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
+index 9ddb754..7a1df61 100644
+--- a/drivers/clk/tegra/clk.c
++++ b/drivers/clk/tegra/clk.c
+@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+- rst_ctlr.nr_resets = clk_num * 32;
++ rst_ctlr.nr_resets = periph_banks * 32;
+ reset_controller_register(&rst_ctlr);
+ }
+
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index 42f95a4..9a28b7e 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ return err;
+ }
+
+-static int omap_aes_check_aligned(struct scatterlist *sg)
++static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++ int len = 0;
++
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return -1;
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return -1;
++
++ len += sg->length;
+ sg = sg_next(sg);
+ }
++
++ if (len != total)
++ return -1;
++
+ return 0;
+ }
+
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+- if (omap_aes_check_aligned(dd->in_sg) ||
+- omap_aes_check_aligned(dd->out_sg)) {
++ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++ omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ if (omap_aes_copy_sgs(dd))
+ pr_err("Failed to copy SGs for unaligned cases\n");
+ dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index d0bc123..1a54205 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
+index bf17a60..1dbfba5 100644
+--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
++++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
+@@ -32,10 +32,16 @@
+ #include <drm/bridge/ptn3460.h>
+
+ #include "exynos_dp_core.h"
++#include "exynos_drm_fimd.h"
+
+ #define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
+ connector)
+
++static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
++{
++ return to_exynos_crtc(dp->encoder->crtc);
++}
++
+ static inline struct exynos_dp_device *
+ display_to_dp(struct exynos_drm_display *d)
+ {
+@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
+ }
+ }
+
++ fimd_dp_clock_enable(dp_to_crtc(dp), true);
++
+ clk_prepare_enable(dp->clock);
+ exynos_dp_phy_init(dp);
+ exynos_dp_init_dp(dp);
+@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+ exynos_dp_phy_exit(dp);
+ clk_disable_unprepare(dp->clock);
+
++ fimd_dp_clock_enable(dp_to_crtc(dp), false);
++
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index 33a10ce..5d58f6c 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -32,6 +32,7 @@
+ #include "exynos_drm_fbdev.h"
+ #include "exynos_drm_crtc.h"
+ #include "exynos_drm_iommu.h"
++#include "exynos_drm_fimd.h"
+
+ /*
+ * FIMD stands for Fully Interactive Mobile Display and
+@@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
++{
++ struct fimd_context *ctx = crtc->ctx;
++ u32 val;
++
++ /*
++ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
++ * clock. On these SoCs the bootloader may enable it but any
++ * power domain off/on will reset it to disable state.
++ */
++ if (ctx->driver_data != &exynos5_fimd_driver_data)
++ return;
++
++ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
++ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
++}
++EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
++
+ struct platform_driver fimd_driver = {
+ .probe = fimd_probe,
+ .remove = fimd_remove,
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
+new file mode 100644
+index 0000000..b4fcaa5
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
+@@ -0,0 +1,15 @@
++/*
++ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#ifndef _EXYNOS_DRM_FIMD_H_
++#define _EXYNOS_DRM_FIMD_H_
++
++extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
++
++#endif /* _EXYNOS_DRM_FIMD_H_ */
+diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
+index fa140e0..60ab1f7 100644
+--- a/drivers/gpu/drm/i2c/adv7511.c
++++ b/drivers/gpu/drm/i2c/adv7511.c
+@@ -33,6 +33,7 @@ struct adv7511 {
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
++ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
+ return false;
+ }
+
+-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+-{
+- struct adv7511 *adv7511 = devid;
+-
+- if (adv7511_hpd(adv7511))
+- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+-
+- wake_up_all(&adv7511->wq);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+- unsigned int irq)
++static int adv7511_irq_process(struct adv7511 *adv7511)
+ {
+ unsigned int irq0, irq1;
+- unsigned int pending;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+- return 0;
++ return ret;
++
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+- return 0;
++ return ret;
++
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
++
++ if (irq0 & ADV7511_INT0_HDP)
++ drm_helper_hpd_irq_event(adv7511->encoder->dev);
++
++ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
++ adv7511->edid_read = true;
++
++ if (adv7511->i2c_main->irq)
++ wake_up_all(&adv7511->wq);
++ }
++
++ return 0;
++}
+
+- pending = (irq1 << 8) | irq0;
++static irqreturn_t adv7511_irq_handler(int irq, void *devid)
++{
++ struct adv7511 *adv7511 = devid;
++ int ret;
+
+- return pending & irq;
++ ret = adv7511_irq_process(adv7511);
++ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+ }
+
+-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+- int timeout)
++/* -----------------------------------------------------------------------------
++ * EDID retrieval
++ */
++
++static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+ {
+- unsigned int pending;
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+- adv7511_is_interrupt_pending(adv7511, irq),
+- msecs_to_jiffies(timeout));
+- if (ret <= 0)
+- return 0;
+- pending = adv7511_is_interrupt_pending(adv7511, irq);
++ adv7511->edid_read, msecs_to_jiffies(timeout));
+ } else {
+- if (timeout < 25)
+- timeout = 25;
+- do {
+- pending = adv7511_is_interrupt_pending(adv7511, irq);
+- if (pending)
++ for (; timeout > 0; timeout -= 25) {
++ ret = adv7511_irq_process(adv7511);
++ if (ret < 0)
+ break;
++
++ if (adv7511->edid_read)
++ break;
++
+ msleep(25);
+- timeout -= 25;
+- } while (timeout >= 25);
++ }
+ }
+
+- return pending;
++ return adv7511->edid_read ? 0 : -EIO;
+ }
+
+-/* -----------------------------------------------------------------------------
+- * EDID retrieval
+- */
+-
+ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+ {
+@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ return ret;
+
+ if (status != 2) {
++ adv7511->edid_read = false;
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+- ret = adv7511_wait_for_interrupt(adv7511,
+- ADV7511_INT0_EDID_READY |
+- ADV7511_INT1_DDC_ERROR, 200);
+-
+- if (!(ret & ADV7511_INT0_EDID_READY))
+- return -EIO;
++ ret = adv7511_wait_for_edid(adv7511, 200);
++ if (ret < 0)
++ return ret;
+ }
+
+- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+-
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
+ /* Reading the EDID only works if the device is powered */
+ if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++ ADV7511_INT0_EDID_READY);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ adv7511->current_edid_segment = -1;
+@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+ adv7511->current_edid_segment = -1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++ ADV7511_INT0_EDID_READY);
++ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++ ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ /*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 5c66b56..ec4d932 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+
+ s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
++ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
+
+ s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
+ s->ecochk = I915_READ(GAM_ECOCHK);
+@@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+
+ I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
++ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ I915_WRITE(GAM_ECOCHK, s->ecochk);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index ede5bbb..07320cb 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+@@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 33b3d0a2..f536ff2 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1740,6 +1740,7 @@ enum punit_power_well {
+ #define GMBUS_CYCLE_INDEX (2<<25)
+ #define GMBUS_CYCLE_STOP (4<<25)
+ #define GMBUS_BYTE_COUNT_SHIFT 16
++#define GMBUS_BYTE_COUNT_MAX 256U
+ #define GMBUS_SLAVE_INDEX_SHIFT 8
+ #define GMBUS_SLAVE_ADDR_SHIFT 1
+ #define GMBUS_SLAVE_READ (1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index b31088a..56e437e 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+- u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len,
++ u32 gmbus1_index)
+ {
+ int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+- u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++ u32 gmbus1_index)
+ {
+- int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+ u8 *buf = msg->buf;
++ unsigned int rx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++ buf, len, gmbus1_index);
++ if (ret)
++ return ret;
++
++ rx_size -= len;
++ buf += len;
++ } while (rx_size != 0);
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len)
++{
++ int reg_offset = dev_priv->gpio_mmio_base;
++ unsigned int chunk_size = len;
+ u32 val, loop;
+
+ val = loop = 0;
+@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ if (ret)
+ return ret;
+ }
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++ u8 *buf = msg->buf;
++ unsigned int tx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++ if (ret)
++ return ret;
++
++ buf += len;
++ tx_size -= len;
++ } while (tx_size != 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 86807ee..9bd5611 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9c47867..7fe5590 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -459,6 +459,10 @@
+ #define USB_DEVICE_ID_UGCI_FLYING 0x0020
+ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+
++#define USB_VENDOR_ID_HP 0x03f0
++#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a
++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
++
+ #define USB_VENDOR_ID_HUION 0x256c
+ #define USB_DEVICE_ID_HUION_TABLET 0x006e
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index a821277..4e3ae9f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -78,6 +78,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 2978f5e..00bc30e 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ if (userdatalen)
+@@ -195,6 +195,9 @@ error1:
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
++error_gpadl:
++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 5f96b1b..019d542 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+ clk_disable(i2c->clk);
+ spin_unlock_irqrestore(&i2c->lock, flags);
+
+- return ret;
++ return ret < 0 ? ret : num;
+ }
+
+ static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index edf274c..8143162 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ adap->bus_recovery_info->set_scl(adap, 1);
+ return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+
+ static int i2c_device_probe(struct device *dev)
+ {
+@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+
+ dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+
++ pm_runtime_no_callbacks(&adap->dev);
++
+ #ifdef CONFIG_I2C_COMPAT
+ res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
+ adap->dev.parent);
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 593f7ca..06cc1ff 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -32,8 +32,9 @@ struct i2c_mux_priv {
+ struct i2c_algorithm algo;
+
+ struct i2c_adapter *parent;
+- void *mux_priv; /* the mux chip/device */
+- u32 chan_id; /* the channel id */
++ struct device *mux_dev;
++ void *mux_priv;
++ u32 chan_id;
+
+ int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+
+ /* Set up private adapter data */
+ priv->parent = parent;
++ priv->mux_dev = mux_dev;
+ priv->mux_priv = mux_priv;
+ priv->chan_id = chan_id;
+ priv->select = select;
+@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
+ char symlink_name[20];
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
+- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
++ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
+
+ sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
+ i2c_del_adapter(adap);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index b0e5852..44d1d79 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+- .name = "C1E-BYT",
+- .desc = "MWAIT 0x01",
+- .flags = MWAIT2flg(0x01),
+- .exit_latency = 15,
+- .target_residency = 30,
+- .enter = &intel_idle,
+- .enter_freeze = intel_idle_freeze, },
+- {
+ .name = "C6N-BYT",
+ .desc = "MWAIT 0x58",
+ .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
+- .exit_latency = 40,
++ .exit_latency = 300,
+ .target_residency = 275,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
+ .name = "C6S-BYT",
+ .desc = "MWAIT 0x52",
+ .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+- .exit_latency = 140,
++ .exit_latency = 500,
+ .target_residency = 560,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1200,
+- .target_residency = 1500,
++ .target_residency = 4000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 8c014b5..38acb3c 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
++ if (!size)
++ return ERR_PTR(-EINVAL);
++
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+- if ((PAGE_ALIGN(addr + size) <= size) ||
+- (PAGE_ALIGN(addr + size) <= addr))
++ if (((addr + size) < addr) ||
++ PAGE_ALIGN(addr + size) < (addr + size))
+ return ERR_PTR(-EINVAL);
+
+ if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index ed2bd67..fbde33a 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+- wr->wr.ud.hlen);
++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ *lso_seg_len = halign;
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 20e859a..76eb57b 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
+ if (scsi_prot_sg_count(sc)) {
+ prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->size = scsi_prot_sg_count(sc);
+- prot_buf->data_len = data_buf->data_len >>
+- ilog2(sc->device->sector_size) * 8;
++ prot_buf->data_len = (data_buf->data_len >>
++ ilog2(sc->device->sector_size)) * 8;
+ }
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 075b19c..147029a 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -222,7 +222,7 @@ fail:
+ static void
+ isert_free_rx_descriptors(struct isert_conn *isert_conn)
+ {
+- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
+ struct iser_rx_desc *rx_desc;
+ int i;
+
+@@ -719,8 +719,8 @@ out:
+ static void
+ isert_connect_release(struct isert_conn *isert_conn)
+ {
+- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->conn_device;
++ struct ib_device *ib_dev = device->ib_device;
+
+ isert_dbg("conn %p\n", isert_conn);
+
+@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
+ isert_conn_free_fastreg_pool(isert_conn);
+
+ isert_free_rx_descriptors(isert_conn);
+- rdma_destroy_id(isert_conn->conn_cm_id);
++ if (isert_conn->conn_cm_id)
++ rdma_destroy_id(isert_conn->conn_cm_id);
+
+ if (isert_conn->conn_qp) {
+ struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
+@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ return 0;
+ }
+
+-static void
++static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
++ isert_conn->conn_cm_id = NULL;
+ isert_put_conn(isert_conn);
++
++ return -1;
+ }
+
+ static int
+@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+- isert_connect_error(cma_id);
++ ret = isert_connect_error(cma_id);
+ break;
+ default:
+ isert_err("Unhandled RDMA CMA event: %d\n", event->event);
+@@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+- if (ret)
++ if (ret) {
++ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->pi_err, 0);
+- else
++ } else {
+ target_execute_cmd(se_cmd);
++ }
+ }
+
+ static void
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 27bcdbc..ea6cb64 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ bool report_buttons)
+ {
+ struct alps_data *priv = psmouse->private;
+- struct input_dev *dev;
++ struct input_dev *dev, *dev2 = NULL;
+
+ /* Figure out which device to use to report the bare packet */
+ if (priv->proto_version == ALPS_PROTO_V2 &&
+ (priv->flags & ALPS_DUALPOINT)) {
+ /* On V2 devices the DualPoint Stick reports bare packets */
+ dev = priv->dev2;
++ dev2 = psmouse->dev;
+ } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
+ /* Register dev3 mouse if we received PS/2 packet first time */
+ if (!IS_ERR(priv->dev3))
+@@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ }
+
+ if (report_buttons)
+- alps_report_buttons(dev, NULL,
++ alps_report_buttons(dev, dev2,
+ packet[0] & 1, packet[0] & 2, packet[0] & 4);
+
+ input_report_rel(dev, REL_X,
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 6e22682..991dc6b 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++ unsigned int rate)
++{
++ struct elantech_data *etd = psmouse->private;
++
++ etd->original_set_rate(psmouse, rate);
++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++ psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+ * Put the touchpad into absolute mode
+ */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
++ * Asus X750JN 0x381f17 10, 14, 0e clickpad
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
+ goto init_fail;
+ }
+
++ if (etd->fw_version == 0x381f17) {
++ etd->original_set_rate = psmouse->set_rate;
++ psmouse->set_rate = elantech_set_rate_restore_reg_07;
++ }
++
+ if (elantech_set_input_params(psmouse)) {
+ psmouse_err(psmouse, "failed to query touchpad range.\n");
+ goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 6f3afec..f965d15 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -142,6 +142,7 @@ struct elantech_data {
+ struct finger_pos mt[ETP_MAX_FINGERS];
+ unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 713a962..41473929 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc,
+
+ switch (r) {
+ /* async */
++ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+- /* fall through*/
+- case -EINPROGRESS:
+ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ struct crypt_config *cc = io->cc;
+
+- if (error == -EINPROGRESS) {
+- complete(&ctx->restart);
++ if (error == -EINPROGRESS)
+ return;
+- }
+
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
+
+ if (!atomic_dec_and_test(&ctx->cc_pending))
+- return;
++ goto done;
+
+ if (bio_data_dir(io->base_bio) == READ)
+ kcryptd_crypt_read_done(io);
+ else
+ kcryptd_crypt_write_io_submit(io, 1);
++done:
++ if (!completion_done(&ctx->restart))
++ complete(&ctx->restart);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 717daad..e617878 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ const int rw = bio_data_dir(bio);
+ struct mddev *mddev = q->queuedata;
+ unsigned int sectors;
++ int cpu;
+
+ if (mddev == NULL || mddev->pers == NULL
+ || !mddev->ready) {
+@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ sectors = bio_sectors(bio);
+ mddev->pers->make_request(mddev, bio);
+
+- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
++ cpu = part_stat_lock();
++ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
++ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
++ part_stat_unlock();
+
+ if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ wake_up(&mddev->sb_wait);
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 3ed9f42..3b5d7f7 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
+
+ /*
+ * remaps the bio to the target device. we separate two flows.
+- * power 2 flow and a general flow for the sake of perfromance
++ * power 2 flow and a general flow for the sake of performance
+ */
+ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ sector = bio->bi_iter.bi_sector;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
+index 77c78de..7020659 100644
+--- a/drivers/media/rc/img-ir/img-ir-core.c
++++ b/drivers/media/rc/img-ir/img-ir-core.c
+@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
+ {
+ struct img_ir_priv *priv = platform_get_drvdata(pdev);
+
+- free_irq(priv->irq, img_ir_isr);
++ free_irq(priv->irq, priv);
+ img_ir_remove_hw(priv);
+ img_ir_remove_raw(priv);
+
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index 65a326c..749ad56 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ if (mutex_lock_interruptible(&dev->v4l_lock))
+ return -ERESTARTSYS;
+
++ /*
++ * Once URBs are cancelled, the URB complete handler
++ * won't be running. This is required to safely release the
++ * current buffer (dev->isoc_ctl.buf).
++ */
+ stk1160_cancel_isoc(dev);
+
+ /*
+@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
+- /* It's important to clear current buffer */
+- dev->isoc_ctl.buf = NULL;
++
++ /* It's important to release the current buffer */
++ if (dev->isoc_ctl.buf) {
++ buf = dev->isoc_ctl.buf;
++ dev->isoc_ctl.buf = NULL;
++
++ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++ stk1160_info("buffer [%p/%d] aborted\n",
++ buf, buf->vb.v4l2_buf.index);
++ }
+ spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index fc145d2..922a750 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+- for (cnt = 0; cnt < msb->current_seg; cnt++)
++ for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
++ }
+ }
+ } else
+ t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 2a87f69..1aed3b7 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
+ int platform_id;
+ int r;
+
+- if (id < 0)
++ if (id == PLATFORM_DEVID_AUTO)
+ platform_id = id;
+ else
+ platform_id = id + cell->id;
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index e8a4218..459ed1b 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ return PTR_ERR(host->clk_sample);
+ }
+
+- host->reset = devm_reset_control_get(&pdev->dev, "ahb");
++ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
++ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
++ return PTR_ERR(host->reset);
+
+ ret = clk_prepare_enable(host->clk_ahb);
+ if (ret) {
+diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
+index a31c357..dba7e1c 100644
+--- a/drivers/mmc/host/tmio_mmc_pio.c
++++ b/drivers/mmc/host/tmio_mmc_pio.c
+@@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc);
+ void tmio_mmc_host_free(struct tmio_mmc_host *host)
+ {
+ mmc_free_host(host->mmc);
+-
+- host->mmc = NULL;
+ }
+ EXPORT_SYMBOL(tmio_mmc_host_free);
+
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index 9d2e16f..b5e1548 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+- bitflips = !!err;
++ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index d647e50..d16fccf 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 16e34b3..8c9a710 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+- vol->eba_tbl[aeb->lnum] = aeb->pnum;
++ else
++ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 8f7bde6..0bd92d8 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+- int vol_id = -1, uninitialized_var(lnum);
++ int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
+index 81d4153..77bf133 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp)
+ }
+ }
+
+- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
++ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2)
+ bp->caps |= MACB_CAPS_MACB_IS_GEM;
+
+ if (macb_is_gem(bp)) {
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 7f997d3..a71c446 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++ struct e1000_rx_ring *rx_ring,
++ int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+- if (netif_running(netdev))
++ if (netif_running(netdev)) {
++ /* prevent buffers from being reallocated */
++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ e1000_down(adapter);
++ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index af829c5..7ace07d 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "missing phy-handle\n");
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_netdev;
+ }
+ of_property_read_u32(np, "reg", &pep->phy_addr);
+ pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ pep->smi_bus = mdiobus_alloc();
+ if (pep->smi_bus == NULL) {
+ err = -ENOMEM;
+- goto err_base;
++ goto err_netdev;
+ }
+ pep->smi_bus->priv = pep;
+ pep->smi_bus->name = "pxa168_eth smi";
+@@ -1551,13 +1552,10 @@ err_mdiobus:
+ mdiobus_unregister(pep->smi_bus);
+ err_free_mdio:
+ mdiobus_free(pep->smi_bus);
+-err_base:
+- iounmap(pep->base);
+ err_netdev:
+ free_netdev(dev);
+ err_clk:
+- clk_disable(clk);
+- clk_put(clk);
++ clk_disable_unprepare(clk);
+ return err;
+ }
+
+@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ if (pep->phy)
+ phy_disconnect(pep->phy);
+ if (pep->clk) {
+- clk_disable(pep->clk);
+- clk_put(pep->clk);
+- pep->clk = NULL;
++ clk_disable_unprepare(pep->clk);
+ }
+
+- iounmap(pep->base);
+- pep->base = NULL;
+ mdiobus_unregister(pep->smi_bus);
+ mdiobus_free(pep->smi_bus);
+ unregister_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index a7b58ba..3dccf01 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ /* check if requested function is supported by the device */
+- if ((hfunc == ETH_RSS_HASH_TOP &&
+- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
+- (hfunc == ETH_RSS_HASH_XOR &&
+- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
+- return -EINVAL;
++ if (hfunc == ETH_RSS_HASH_TOP) {
++ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
++ return -EINVAL;
++ if (!(dev->features & NETIF_F_RXHASH))
++ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
++ return 0;
++ } else if (hfunc == ETH_RSS_HASH_XOR) {
++ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
++ return -EINVAL;
++ if (dev->features & NETIF_F_RXHASH)
++ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
++ return 0;
++ }
+
+- priv->rss_hash_fn = hfunc;
+- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
+- en_warn(priv,
+- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
+- en_warn(priv,
+- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+- return 0;
++ return -EINVAL;
+ }
+
+ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+@@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ priv->prof->rss_rings = rss_rings;
+ if (key)
+ memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
++ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
++ priv->rss_hash_fn = hfunc;
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index af034db..9d15566 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+ {
+ /* note: a 0-length skb is used as an error indication */
+ if (skb->len > 0) {
++ skb_checksum_complete_unset(skb);
+ #ifdef CONFIG_PPP_MULTILINK
+ /* XXX do channel-level decompression here */
+ if (PPP_PROTO(skb) == PPP_MP)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 90a714c..23806c2 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index c93fae9..5fbd223 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index 0f2cfb0..bf14676 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+
+ #include "wlcore.h"
+
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+- loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++ loff_t *ppos, char *fmt, ...);
+
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
+index eb88693..7b53a5c 100644
+--- a/drivers/nfc/st21nfcb/i2c.c
++++ b/drivers/nfc/st21nfcb/i2c.c
+@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+ return phy->ndlc->hard_fault;
+
+ r = i2c_master_send(client, skb->data, skb->len);
+- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
++ if (r < 0) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_send(client, skb->data, skb->len);
+ }
+@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
++ if (r < 0) { /* Retry, chip was in standby */
+ usleep_range(1000, 4000);
+ r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ }
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 15c0fab..bceb30b 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
+ if (err)
+ return err;
+
+- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+- "compal", data,
+- compal_hwmon_groups);
++ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
++ "compal", data,
++ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
+@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+- power_supply_register(&compal_device->dev, &data->psy);
++ err = power_supply_register(&compal_device->dev, &data->psy);
++ if (err < 0)
++ goto remove;
+
+ platform_set_drvdata(pdev, data);
+
+diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
+index 9d69460..96b15e0 100644
+--- a/drivers/power/ipaq_micro_battery.c
++++ b/drivers/power/ipaq_micro_battery.c
+@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
+ static int micro_batt_probe(struct platform_device *pdev)
+ {
+ struct micro_battery *mb;
++ int ret;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
+
+ mb->micro = dev_get_drvdata(pdev->dev.parent);
+ mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
++ if (!mb->wq)
++ return -ENOMEM;
++
+ INIT_DELAYED_WORK(&mb->update, micro_battery_work);
+ platform_set_drvdata(pdev, mb);
+ queue_delayed_work(mb->wq, &mb->update, 1);
+- power_supply_register(&pdev->dev, &micro_batt_power);
+- power_supply_register(&pdev->dev, &micro_ac_power);
++
++ ret = power_supply_register(&pdev->dev, &micro_batt_power);
++ if (ret < 0)
++ goto batt_err;
++
++ ret = power_supply_register(&pdev->dev, &micro_ac_power);
++ if (ret < 0)
++ goto ac_err;
+
+ dev_info(&pdev->dev, "iPAQ micro battery driver\n");
+ return 0;
++
++ac_err:
++ power_supply_unregister(&micro_ac_power);
++batt_err:
++ cancel_delayed_work_sync(&mb->update);
++ destroy_workqueue(mb->wq);
++ return ret;
+ }
+
+ static int micro_batt_remove(struct platform_device *pdev)
+@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
+ power_supply_unregister(&micro_ac_power);
+ power_supply_unregister(&micro_batt_power);
+ cancel_delayed_work_sync(&mb->update);
++ destroy_workqueue(mb->wq);
+
+ return 0;
+ }
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index 21fc233..176dab2 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ pchg->battery.get_property = lp8788_battery_get_property;
+
+- if (power_supply_register(&pdev->dev, &pchg->battery))
++ if (power_supply_register(&pdev->dev, &pchg->battery)) {
++ power_supply_unregister(&pchg->charger);
+ return -EPERM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a..cf90760 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++ int ret = 0;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ if (ret < 0)
++ kfree(twl4030_madc_bat);
+
+- return 0;
++ return ret;
+ }
+
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 675b5e7..5a0800d 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
+ fp_possible = io_info.fpOkForIo;
+ }
+
+- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
++ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+- smp_processor_id() % instance->msix_vectors : 0;
++ raw_smp_processor_id() % instance->msix_vectors : 0;
+
+ if (fp_possible) {
+ megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
++ instance->msix_vectors ?
++ raw_smp_processor_id() %
++ instance->msix_vectors :
++ 0;
+ os_timeout_value = scmd->request->timeout / HZ;
+
+ if (instance->secure_jbod_support &&
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 2d5ab6d..454536c 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+ {
+- struct sas_ha_struct *sha = mvi->sas;
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+- struct sas_phy *sphy = dev->phy;
+- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
++ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6b78476..3290a3e 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
++ blk_integrity_unregister(disk);
+ disk->private_data = NULL;
+ put_disk(disk);
+ put_device(&sdkp->device->sdev_gendev);
+diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
+index 14c7d42..5c06d29 100644
+--- a/drivers/scsi/sd_dif.c
++++ b/drivers/scsi/sd_dif.c
+@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
+
+ disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+
+- if (!sdkp)
++ if (!sdkp->ATO)
+ return;
+
+ if (type == SD_DIF_TYPE3_PROTECTION)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index efc6e44..bf8c5c1 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ sg_kunmap_atomic(bounce_addr);
++ bounce_addr = 0;
+ j++;
++ }
+
+- /* if we need to use another bounce buffer */
+- if (srclen || i != orig_sgl_count - 1)
+- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++ /* if we need to use another bounce buffer */
++ if (srclen && bounce_addr == 0)
++ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+
+- } else if (srclen == 0 && i == orig_sgl_count - 1) {
+- /* unmap the last bounce that is < PAGE_SIZE */
+- sg_kunmap_atomic(bounce_addr);
+- }
+ }
+
+ sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ }
+
++ if (bounce_addr)
++ sg_kunmap_atomic(bounce_addr);
++
+ local_irq_restore(flags);
+
+ return total_copied;
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 6fea4af..aea3a67 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
+ if (spi_imx->dma_is_inited) {
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+
+- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+ tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ master->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
+ SPI_MASTER_MUST_TX;
++ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
++ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->dma_is_inited = 1;
+
+ return 0;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 4eb7a98..7bf5186 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev,
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+- if (total > bufsiz) {
++ /* Check total length of transfers. Also check each
++ * transfer length to avoid arithmetic overflow.
++ */
++ if (total > bufsiz || k_tmp->len > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
+index 7bdb62b..f83e00c 100644
+--- a/drivers/staging/android/sync.c
++++ b/drivers/staging/android/sync.c
+@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+- list_del(&pt->active_list);
++ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 6ed35b6..04fc217 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
+ * LCD types
+ */
+ #define LCD_TYPE_NONE 0
+-#define LCD_TYPE_OLD 1
+-#define LCD_TYPE_KS0074 2
+-#define LCD_TYPE_HANTRONIX 3
+-#define LCD_TYPE_NEXCOM 4
+-#define LCD_TYPE_CUSTOM 5
++#define LCD_TYPE_CUSTOM 1
++#define LCD_TYPE_OLD 2
++#define LCD_TYPE_KS0074 3
++#define LCD_TYPE_HANTRONIX 4
++#define LCD_TYPE_NEXCOM 5
+
+ /*
+ * keypad types
+@@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type,
+ static int lcd_type = NOT_SET;
+ module_param(lcd_type, int, 0000);
+ MODULE_PARM_DESC(lcd_type,
+- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
++ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
+
+ static int lcd_height = NOT_SET;
+ module_param(lcd_height, int, 0000);
+diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
+index 07ce3fd..fdf5c56 100644
+--- a/drivers/staging/vt6655/rxtx.c
++++ b/drivers/staging/vt6655/rxtx.c
+@@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ priv->hw->conf.chandef.chan->hw_value);
+ }
+
+- if (current_rate > RATE_11M)
+- pkt_type = (u8)priv->byPacketType;
+- else
++ if (current_rate > RATE_11M) {
++ if (info->band == IEEE80211_BAND_5GHZ) {
++ pkt_type = PK_TYPE_11A;
++ } else {
++ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
++ pkt_type = PK_TYPE_11GB;
++ else
++ pkt_type = PK_TYPE_11GA;
++ }
++ } else {
+ pkt_type = PK_TYPE_11B;
++ }
+
+ /*Set fifo controls */
+ if (pkt_type == PK_TYPE_11A)
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 77d6425..5e35612 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = {
+
+ static int __init iscsi_target_init_module(void)
+ {
+- int ret = 0;
++ int ret = 0, size;
+
+ pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+@@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void)
+ pr_err("Unable to allocate memory for iscsit_global\n");
+ return -1;
+ }
++ spin_lock_init(&iscsit_global->ts_bitmap_lock);
+ mutex_init(&auth_id_lock);
+ spin_lock_init(&sess_idr_lock);
+ idr_init(&tiqn_idr);
+@@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void)
+ if (ret < 0)
+ goto out;
+
+- ret = iscsi_thread_set_init();
+- if (ret < 0)
++ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
++ iscsit_global->ts_bitmap = vzalloc(size);
++ if (!iscsit_global->ts_bitmap) {
++ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ goto configfs_out;
+-
+- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+- TARGET_THREAD_SET_COUNT) {
+- pr_err("iscsi_allocate_thread_sets() returned"
+- " unexpected value!\n");
+- goto ts_out1;
+ }
+
+ lio_qr_cache = kmem_cache_create("lio_qr_cache",
+@@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void)
+ if (!lio_qr_cache) {
+ pr_err("nable to kmem_cache_create() for"
+ " lio_qr_cache\n");
+- goto ts_out2;
++ goto bitmap_out;
+ }
+
+ lio_dr_cache = kmem_cache_create("lio_dr_cache",
+@@ -617,10 +614,8 @@ dr_out:
+ kmem_cache_destroy(lio_dr_cache);
+ qr_out:
+ kmem_cache_destroy(lio_qr_cache);
+-ts_out2:
+- iscsi_deallocate_thread_sets();
+-ts_out1:
+- iscsi_thread_set_free();
++bitmap_out:
++ vfree(iscsit_global->ts_bitmap);
+ configfs_out:
+ iscsi_target_deregister_configfs();
+ out:
+@@ -630,8 +625,6 @@ out:
+
+ static void __exit iscsi_target_cleanup_module(void)
+ {
+- iscsi_deallocate_thread_sets();
+- iscsi_thread_set_free();
+ iscsit_release_discovery_tpg();
+ iscsit_unregister_transport(&iscsi_target_transport);
+ kmem_cache_destroy(lio_qr_cache);
+@@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void)
+
+ iscsi_target_deregister_configfs();
+
++ vfree(iscsit_global->ts_bitmap);
+ kfree(iscsit_global);
+ }
+
+@@ -3715,17 +3709,16 @@ static int iscsit_send_reject(
+
+ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+ {
+- struct iscsi_thread_set *ts = conn->thread_set;
+ int ord, cpu;
+ /*
+- * thread_id is assigned from iscsit_global->ts_bitmap from
+- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
++ * bitmap_id is assigned from iscsit_global->ts_bitmap from
++ * within iscsit_start_kthreads()
+ *
+- * Here we use thread_id to determine which CPU that this
+- * iSCSI connection's iscsi_thread_set will be scheduled to
++ * Here we use bitmap_id to determine which CPU that this
++ * iSCSI connection's RX/TX threads will be scheduled to
+ * execute upon.
+ */
+- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
++ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+@@ -3914,7 +3907,7 @@ check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_LOGOUTRSP:
+ if (!iscsit_logout_post_handler(cmd, conn))
+- goto restart;
++ return -ECONNRESET;
+ /* fall through */
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_ASYNCMSG:
+@@ -3942,8 +3935,6 @@ check_rsp_state:
+
+ err:
+ return -1;
+-restart:
+- return -EAGAIN;
+ }
+
+ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+@@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+ int iscsi_target_tx_thread(void *arg)
+ {
+ int ret = 0;
+- struct iscsi_conn *conn;
+- struct iscsi_thread_set *ts = arg;
++ struct iscsi_conn *conn = arg;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+-restart:
+- conn = iscsi_tx_thread_pre_handler(ts);
+- if (!conn)
+- goto out;
+-
+- ret = 0;
+-
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+@@ -3993,11 +3976,9 @@ restart:
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+ wait_event_interruptible(conn->queues_wq,
+- !iscsit_conn_all_queues_empty(conn) ||
+- ts->status == ISCSI_THREAD_SET_RESET);
++ !iscsit_conn_all_queues_empty(conn));
+
+- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+- signal_pending(current))
++ if (signal_pending(current))
+ goto transport_err;
+
+ get_immediate:
+@@ -4008,15 +3989,14 @@ get_immediate:
+ ret = iscsit_handle_response_queue(conn);
+ if (ret == 1)
+ goto get_immediate;
+- else if (ret == -EAGAIN)
+- goto restart;
++ else if (ret == -ECONNRESET)
++ goto out;
+ else if (ret < 0)
+ goto transport_err;
+ }
+
+ transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+- goto restart;
+ out:
+ return 0;
+ }
+@@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg)
+ int ret;
+ u8 buffer[ISCSI_HDR_LEN], opcode;
+ u32 checksum = 0, digest = 0;
+- struct iscsi_conn *conn = NULL;
+- struct iscsi_thread_set *ts = arg;
++ struct iscsi_conn *conn = arg;
+ struct kvec iov;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+@@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg)
+ */
+ allow_signal(SIGINT);
+
+-restart:
+- conn = iscsi_rx_thread_pre_handler(ts);
+- if (!conn)
+- goto out;
+-
+ if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ struct completion comp;
+ int rc;
+@@ -4134,7 +4108,7 @@ restart:
+ if (rc < 0)
+ goto transport_err;
+
+- goto out;
++ goto transport_err;
+ }
+
+ while (!kthread_should_stop()) {
+@@ -4210,8 +4184,6 @@ transport_err:
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+ iscsit_take_action_for_connection_exit(conn);
+- goto restart;
+-out:
+ return 0;
+ }
+
+@@ -4273,7 +4245,24 @@ int iscsit_close_connection(
+ if (conn->conn_transport->transport_type == ISCSI_TCP)
+ complete(&conn->conn_logout_comp);
+
+- iscsi_release_thread_set(conn);
++ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
++ if (conn->tx_thread &&
++ cmpxchg(&conn->tx_thread_active, true, false)) {
++ send_sig(SIGINT, conn->tx_thread, 1);
++ kthread_stop(conn->tx_thread);
++ }
++ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
++ if (conn->rx_thread &&
++ cmpxchg(&conn->rx_thread_active, true, false)) {
++ send_sig(SIGINT, conn->rx_thread, 1);
++ kthread_stop(conn->rx_thread);
++ }
++ }
++
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++ get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+@@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession(
+ struct iscsi_conn *conn)
+ {
+ struct iscsi_session *sess = conn->sess;
+-
+- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
+- iscsit_stop_session(sess, 1, 1);
++ iscsit_stop_session(sess, sleep, sleep);
+ iscsit_dec_session_usage_count(sess);
+ target_put_session(sess->se_sess);
+ }
+@@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession(
+ static void iscsit_logout_post_handler_samecid(
+ struct iscsi_conn *conn)
+ {
+- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
++ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+- iscsit_cause_connection_reinstatement(conn, 1);
++ iscsit_cause_connection_reinstatement(conn, sleep);
+ iscsit_dec_conn_usage_count(conn);
+ }
+
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index bdd8731..e008ed2 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+ }
+ spin_unlock_bh(&conn->state_lock);
+
+- iscsi_thread_set_force_reinstatement(conn);
++ if (conn->tx_thread && conn->tx_thread_active)
++ send_sig(SIGINT, conn->tx_thread, 1);
++ if (conn->rx_thread && conn->rx_thread_active)
++ send_sig(SIGINT, conn->rx_thread, 1);
+
+ sleep:
+ wait_for_completion(&conn->conn_wait_rcfr_comp);
+@@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+ return;
+ }
+
+- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+- spin_unlock_bh(&conn->state_lock);
+- return;
+- }
++ if (conn->tx_thread && conn->tx_thread_active)
++ send_sig(SIGINT, conn->tx_thread, 1);
++ if (conn->rx_thread && conn->rx_thread_active)
++ send_sig(SIGINT, conn->rx_thread, 1);
+
+ atomic_set(&conn->connection_reinstatement, 1);
+ if (!sleep) {
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 153fb66..345f073 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+ iscsit_start_nopin_timer(conn);
+ }
+
++int iscsit_start_kthreads(struct iscsi_conn *conn)
++{
++ int ret = 0;
++
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
++ ISCSIT_BITMAP_BITS, get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
++
++ if (conn->bitmap_id < 0) {
++ pr_err("bitmap_find_free_region() failed for"
++ " iscsit_start_kthreads()\n");
++ return -ENOMEM;
++ }
++
++ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
++ "%s", ISCSI_TX_THREAD_NAME);
++ if (IS_ERR(conn->tx_thread)) {
++ pr_err("Unable to start iscsi_target_tx_thread\n");
++ ret = PTR_ERR(conn->tx_thread);
++ goto out_bitmap;
++ }
++ conn->tx_thread_active = true;
++
++ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
++ "%s", ISCSI_RX_THREAD_NAME);
++ if (IS_ERR(conn->rx_thread)) {
++ pr_err("Unable to start iscsi_target_rx_thread\n");
++ ret = PTR_ERR(conn->rx_thread);
++ goto out_tx;
++ }
++ conn->rx_thread_active = true;
++
++ return 0;
++out_tx:
++ kthread_stop(conn->tx_thread);
++ conn->tx_thread_active = false;
++out_bitmap:
++ spin_lock(&iscsit_global->ts_bitmap_lock);
++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++ get_order(1));
++ spin_unlock(&iscsit_global->ts_bitmap_lock);
++ return ret;
++}
++
+ int iscsi_post_login_handler(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+@@ -709,7 +754,7 @@ int iscsi_post_login_handler(
+ struct se_session *se_sess = sess->se_sess;
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+- struct iscsi_thread_set *ts;
++ int rc;
+
+ iscsit_inc_conn_usage_count(conn);
+
+@@ -724,7 +769,6 @@ int iscsi_post_login_handler(
+ /*
+ * SCSI Initiator -> SCSI Target Port Mapping
+ */
+- ts = iscsi_get_thread_set();
+ if (!zero_tsih) {
+ iscsi_set_session_parameters(sess->sess_ops,
+ conn->param_list, 0);
+@@ -751,9 +795,11 @@ int iscsi_post_login_handler(
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+- iscsi_post_login_start_timers(conn);
++ rc = iscsit_start_kthreads(conn);
++ if (rc)
++ return rc;
+
+- iscsi_activate_thread_set(conn, ts);
++ iscsi_post_login_start_timers(conn);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+@@ -810,8 +856,11 @@ int iscsi_post_login_handler(
+ " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ spin_unlock_bh(&se_tpg->session_lock);
+
++ rc = iscsit_start_kthreads(conn);
++ if (rc)
++ return rc;
++
+ iscsi_post_login_start_timers(conn);
+- iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 44620fb..cbb0cc2 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+- struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+- u32 prot_size, len, size;
+- int rc, ret = 1, i;
++ u32 prot_size;
++ int rc, ret = 1;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+- fd_prot->prot_buf = vzalloc(prot_size);
++ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+- fd_prot->prot_sg_nents = cmd->t_prot_nents;
+- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+- fd_prot->prot_sg_nents, GFP_KERNEL);
++ fd_prot->prot_sg_nents = 1;
++ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
++ GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+- size = prot_size;
+-
+- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+-
+- len = min_t(u32, PAGE_SIZE, size);
+- sg_set_buf(sg, buf, len);
+- size -= len;
+- buf += len;
+- }
++ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
++ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
+ }
+
+ if (is_write) {
+@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+@@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd)
+ return 0;
+ }
+
++static int
++fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
++ void *buf, size_t bufsize)
++{
++ struct fd_dev *fd_dev = FD_DEV(se_dev);
++ struct file *prot_fd = fd_dev->fd_prot_file;
++ sector_t prot_length, prot;
++ loff_t pos = lba * se_dev->prot_length;
++
++ if (!prot_fd) {
++ pr_err("Unable to locate fd_dev->fd_prot_file\n");
++ return -ENODEV;
++ }
++
++ prot_length = nolb * se_dev->prot_length;
++
++ for (prot = 0; prot < prot_length;) {
++ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
++ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
++
++ if (ret != len) {
++ pr_err("vfs_write to prot file failed: %zd\n", ret);
++ return ret < 0 ? ret : -ENODEV;
++ }
++ prot += ret;
++ }
++
++ return 0;
++}
++
++static int
++fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
++{
++ void *buf;
++ int rc;
++
++ buf = (void *)__get_free_page(GFP_KERNEL);
++ if (!buf) {
++ pr_err("Unable to allocate FILEIO prot buf\n");
++ return -ENOMEM;
++ }
++ memset(buf, 0xff, PAGE_SIZE);
++
++ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
++
++ free_page((unsigned long)buf);
++
++ return rc;
++}
++
+ static sense_reason_t
+ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ {
+@@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+
++ if (cmd->se_dev->dev_attrib.pi_prot_type) {
++ ret = fd_do_prot_unmap(cmd, lba, nolb);
++ if (ret)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ }
++
+ if (S_ISBLK(inode->i_mode)) {
+ /* The backend is block device, use discard */
+ struct block_device *bdev = inode->i_bdev;
+@@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ }
+ } else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+@@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+@@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+@@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev)
+
+ static int fd_format_prot(struct se_device *dev)
+ {
+- struct fd_dev *fd_dev = FD_DEV(dev);
+- struct file *prot_fd = fd_dev->fd_prot_file;
+- sector_t prot_length, prot;
+ unsigned char *buf;
+- loff_t pos = 0;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+- int rc, ret = 0, size, len;
++ int ret;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+- if (!prot_fd) {
+- pr_err("Unable to locate fd_dev->fd_prot_file\n");
+- return -ENODEV;
+- }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+- size = prot_length;
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+- (unsigned long long)prot_length);
++ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
++ dev->prot_length);
+
+ memset(buf, 0xff, unit_size);
+- for (prot = 0; prot < prot_length; prot += unit_size) {
+- len = min(unit_size, size);
+- rc = kernel_write(prot_fd, buf, len, pos);
+- if (rc != len) {
+- pr_err("vfs_write to prot file failed: %d\n", rc);
+- ret = -ENODEV;
+- goto out;
+- }
+- pos += len;
+- size -= len;
+- }
+-
+-out:
++ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
++ buf, unit_size);
+ vfree(buf);
+ return ret;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 3e72974..755bd9b3 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ return 0;
+ }
+
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+@@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ cmd->data_direction);
+ }
+
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+
+@@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ return TCM_NO_SENSE;
+ }
+
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+@@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+- * which will not have taken ->caw_mutex yet..
++ * which will not have taken ->caw_sem yet..
+ */
+- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ return TCM_NO_SENSE;
+ /*
++ * Handle special case for zero-length COMPARE_AND_WRITE
++ */
++ if (!cmd->data_length)
++ goto out;
++ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index ac3cbab..f786de0 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+- * callback is expected to drop the per device ->caw_mutex.
++ * callback is expected to drop the per device ->caw_sem.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+- cmd->transport_complete_callback(cmd);
++ cmd->transport_complete_callback(cmd, false);
+
+ switch (sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+- rc = cmd->transport_complete_callback(cmd);
++ rc = cmd->transport_complete_callback(cmd, true);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ !cmd->data_length)
++ goto queue_rsp;
++
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ }
+ }
+
++queue_rsp:
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++ /*
++ * Release special case READ buffer payload required for
++ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++ */
++ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++ transport_free_sgl(cmd->t_bidi_data_sg,
++ cmd->t_bidi_data_nents);
++ cmd->t_bidi_data_sg = NULL;
++ cmd->t_bidi_data_nents = 0;
++ }
+ transport_reset_sgl_orig(cmd);
+ return;
+ }
+@@ -2246,6 +2261,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ int ret = 0;
++ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ cmd->data_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ cmd->data_length) {
++ /*
++ * Special case for COMPARE_AND_WRITE with fabrics
++ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++ */
++ u32 caw_length = cmd->t_task_nolb *
++ cmd->se_dev->dev_attrib.block_size;
++
++ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++ &cmd->t_bidi_data_nents,
++ caw_length, zero_flag);
++ if (ret < 0)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * If this command is not a write we can execute it right here,
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index deae122..d465ace 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line)
+ port->type != PORT_8250) {
+ unsigned char canary = 0xa5;
+ serial_out(up, UART_SCR, canary);
+- up->canary = canary;
++ if (serial_in(up, UART_SCR) == canary)
++ up->canary = canary;
+ }
+
+ uart_suspend_port(&serial8250_reg, port);
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 6ae5b85..7a80250 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
+ { "80860F0A", 0 },
+ { "8086228A", 0 },
+ { "APMC0D08", 0},
++ { "AMD0020", 0 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 0eb29b1..2306191 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
+ if (sts2 & USR2_ORE) {
+ dev_err(sport->port.dev, "Rx FIFO overrun\n");
+ sport->port.icount.overrun++;
+- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
++ writel(USR2_ORE, sport->port.membase + USR2);
+ }
+
+ return IRQ_HANDLED;
+@@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port)
+ imx_uart_dma_init(sport);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
++
+ /*
+ * Finally, clear and enable interrupts
+ */
+ writel(USR1_RTSD, sport->port.membase + USR1);
++ writel(USR2_ORE, sport->port.membase + USR2);
+
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
+@@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port)
+
+ writel(temp, sport->port.membase + UCR1);
+
+- /* Clear any pending ORE flag before enabling interrupt */
+- temp = readl(sport->port.membase + USR2);
+- writel(temp | USR2_ORE, sport->port.membase + USR2);
+-
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_OREN;
+ writel(temp, sport->port.membase + UCR4);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index a051a7a..a81f9dd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ dev_dbg(&desc->intf->dev,
+ "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+- dr->wIndex, dr->wLength);
++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ break;
+
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
+ clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ dev_err(&desc->intf->dev,
+ "unknown notification %d received: index %d len %d\n",
+- dr->bNotificationType, dr->wIndex, dr->wLength);
++ dr->bNotificationType,
++ le16_to_cpu(dr->wIndex),
++ le16_to_cpu(dr->wLength));
+ goto exit;
+ }
+
+@@ -408,7 +410,7 @@ static ssize_t wdm_write
+ USB_RECIP_INTERFACE);
+ req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ req->wValue = 0;
+- req->wIndex = desc->inum;
++ req->wIndex = desc->inum; /* already converted */
+ req->wLength = cpu_to_le16(count);
+ set_bit(WDM_IN_USE, &desc->flags);
+ desc->outbuf = buf;
+@@ -422,7 +424,7 @@ static ssize_t wdm_write
+ rv = usb_translate_errors(rv);
+ } else {
+ dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+- req->wIndex);
++ le16_to_cpu(req->wIndex));
+ }
+ out:
+ usb_autopm_put_interface(desc->intf);
+@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ desc->irq->wValue = 0;
+- desc->irq->wIndex = desc->inum;
++ desc->irq->wIndex = desc->inum; /* already converted */
+ desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+
+ usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d7c3d5a..3b71516 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ if (status) {
+ dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
+ } else {
+- /* drive resume for at least 20 msec */
++ /* drive resume for USB_RESUME_TIMEOUT msec */
+ dev_dbg(&udev->dev, "usb %sresume\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+- msleep(25);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Virtual root hubs can trigger on GET_PORT_STATUS to
+ * stop resume signaling. Then finish the resume
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index c78c874..758b7e0 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ writel(0, hsotg->regs + PCGCTL);
+- usleep_range(20000, 40000);
++ msleep(USB_RESUME_TIMEOUT);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
+index 9054598..6385c19 100644
+--- a/drivers/usb/gadget/legacy/printer.c
++++ b/drivers/usb/gadget/legacy/printer.c
+@@ -1031,6 +1031,15 @@ unknown:
+ break;
+ }
+ /* host either stalls (value < 0) or reports success */
++ if (value >= 0) {
++ req->length = value;
++ req->zero = value < wLength;
++ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++ if (value < 0) {
++ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
++ req->status = 0;
++ }
++ }
+ return value;
+ }
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 85e56d1..f4d88df 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ ehci->reset_done[i] == 0))
+ continue;
+
+- /* start 20 msec resume signaling from this port,
+- * and make hub_wq collect PORT_STAT_C_SUSPEND to
+- * stop that signaling. Use 5 ms extra for safety,
+- * like usb_port_resume() does.
++ /* start USB_RESUME_TIMEOUT msec resume signaling from
++ * this port, and make hub_wq collect
++ * PORT_STAT_C_SUSPEND to stop that signaling.
+ */
+- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
++ ehci->reset_done[i] = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(i, &ehci->resuming_ports);
+ ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ usb_hcd_start_port_resume(&hcd->self, i);
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 87cf86f..7354d01 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
+ ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+ }
+
+- /* msleep for 20ms only if code is trying to resume port */
++ /*
++ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
++ * port
++ */
+ if (resume_needed) {
+ spin_unlock_irq(&ehci->lock);
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
+@@ -942,7 +945,7 @@ int ehci_hub_control(
+ temp &= ~PORT_WAKE_BITS;
+ ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ ehci->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(wIndex, &ehci->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
+ break;
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 475b21f..7a6681f 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index a83eefe..ba77e2e 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+- /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 113d0cc..9ef5644 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ spin_unlock_irq(&isp116x->lock);
+
+ hcd->state = HC_STATE_RESUMING;
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Go operational */
+ spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
+index ef7efb2..28a2866 100644
+--- a/drivers/usb/host/oxu210hp-hcd.c
++++ b/drivers/usb/host/oxu210hp-hcd.c
+@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ || oxu->reset_done[i] != 0)
+ continue;
+
+- /* start 20 msec resume signaling from this port,
+- * and make hub_wq collect PORT_STAT_C_SUSPEND to
++ /* start USB_RESUME_TIMEOUT resume signaling from this
++ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
+ * stop that signaling.
+ */
+- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
++ oxu->reset_done[i] = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+ mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+ }
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index bdc82fe..54a4170 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+- msleep(50);
++ msleep(USB_RESUME_TIMEOUT);
+ r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ }
+
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 4f4ba1e..9118cd8 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1259,7 +1259,7 @@ sl811h_hub_control(
+ sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+ mod_timer(&sl811->timer, jiffies
+- + msecs_to_jiffies(20));
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case USB_PORT_FEAT_POWER:
+ port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 19ba5ea..7b3d1af 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->ports_timeout = jiffies +
+- msecs_to_jiffies(25);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
+
+@@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ uhci_finish_suspend(uhci, port, port_addr);
+
+ /* USB v2.0 7.1.7.5 */
+- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++ uhci->ports_timeout = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 73485fa..eeedde8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ } else {
+ xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ bus_state->resume_done[faked_port_index] = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(faked_port_index, &bus_state->resuming_ports);
+ mod_timer(&hcd->rh_timer,
+ bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
+index 3cb98b1..7911b6b 100644
+--- a/drivers/usb/isp1760/isp1760-hcd.c
++++ b/drivers/usb/isp1760/isp1760-hcd.c
+@@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
+ reg_write32(hcd->regs, HC_PORTSC1,
+ temp | PORT_RESUME);
+ priv->reset_done = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ }
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 067920f..ec0ee3b 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -99,6 +99,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
++#include <linux/usb.h>
+
+ #include "musb_core.h"
+
+@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ musb->need_finish_resume = 1;
+
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+@@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ is_host_active(musb) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+- /* the core can interrupt us for multiple reasons; docs have
+- * a generic interrupt flowchart to follow
++ /**
++ * According to Mentor Graphics' documentation, flowchart on page 98,
++ * IRQ should be handled as follows:
++ *
++ * . Resume IRQ
++ * . Session Request IRQ
++ * . VBUS Error IRQ
++ * . Suspend IRQ
++ * . Connect IRQ
++ * . Disconnect IRQ
++ * . Reset/Babble IRQ
++ * . SOF IRQ (we're not using this one)
++ * . Endpoint 0 IRQ
++ * . TX Endpoints
++ * . RX Endpoints
++ *
++ * We will be following that flowchart in order to avoid any problems
++ * that might arise with internal Finite State Machine.
+ */
++
+ if (musb->int_usb)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl);
+
+- /* "stage 1" is handling endpoint irqs */
+-
+- /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (is_host_active(musb))
+ retval |= musb_h_ep0_irq(musb);
+@@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+- /* RX on endpoints 1-15 */
+- reg = musb->int_rx >> 1;
++ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
+- /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+- musb_host_rx(musb, ep_num);
++ musb_host_tx(musb, ep_num);
+ else
+- musb_g_rx(musb, ep_num);
++ musb_g_tx(musb, ep_num);
+ }
+-
+ reg >>= 1;
+ ep_num++;
+ }
+
+- /* TX on endpoints 1-15 */
+- reg = musb->int_tx >> 1;
++ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+- /* musb_ep_select(musb->mregs, ep_num); */
+- /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (is_host_active(musb))
+- musb_host_tx(musb, ep_num);
++ musb_host_rx(musb, ep_num);
+ else
+- musb_g_tx(musb, ep_num);
++ musb_g_rx(musb, ep_num);
+ }
++
+ reg >>= 1;
+ ep_num++;
+ }
+@@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev)
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+
+ /*
+@@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev)
+ if (musb->need_finish_resume) {
+ musb->need_finish_resume = 0;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+
+ return 0;
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 294e159..5428ed1 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
+ /* later, GetPortStatus will stop RESUME signaling */
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(20));
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ }
+ }
+
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 2f9735b..d1cd6b5 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+- return res == match_data;
++ struct usb_phy **phy = res;
++
++ return *phy == match_data;
+ }
+
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 995986b..d925f55 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ int elf_prot = 0, elf_flags;
+ unsigned long k, vaddr;
++ unsigned long total_size = 0;
+
+ if (elf_ppnt->p_type != PT_LOAD)
+ continue;
+@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++ total_size = total_mapping_size(elf_phdata,
++ loc->elf_ex.e_phnum);
++ if (!total_size) {
++ error = -EINVAL;
++ goto out_free_dentry;
++ }
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+- elf_prot, elf_flags, 0);
++ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(error)) {
+ retval = IS_ERR((void *)error) ?
+ PTR_ERR((void*)error) : -EINVAL;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8b353ad..0a795c9 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ return -ENOSPC;
+ }
+
+- if (btrfs_test_opt(root, DISCARD))
+- ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ if (pin)
+ pin_down_extent(root, cache, start, len, 1);
+ else {
++ if (btrfs_test_opt(root, DISCARD))
++ ret = btrfs_discard_extent(root, start, len, NULL);
+ btrfs_add_free_space(cache, start, len);
+ btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 74609b9..f23d4be 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ if (src == dst)
+ return -EINVAL;
+
++ if (len == 0)
++ return 0;
++
+ btrfs_double_lock(src, loff, dst, dst_loff, len);
+
+ ret = extent_same_check_offsets(src, loff, len);
+@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ if (off + len == src->i_size)
+ len = ALIGN(src->i_size, bs) - off;
+
++ if (len == 0) {
++ ret = 0;
++ goto out_unlock;
++ }
++
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 883b936..45ea704 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+ * Check if the attribute is in a supported namespace.
+ *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+ * namespace.
+ */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+- return !strncmp(name, XATTR_SECURITY_PREFIX,
+- XATTR_SECURITY_PREFIX_LEN) ||
+- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
++ int len = strlen(name);
++ int prefixlen = 0;
++
++ if (!strncmp(name, XATTR_SECURITY_PREFIX,
++ XATTR_SECURITY_PREFIX_LEN))
++ prefixlen = XATTR_SECURITY_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++ prefixlen = XATTR_USER_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
++ prefixlen = XATTR_BTRFS_PREFIX_LEN;
++ else
++ return -EOPNOTSUPP;
++
++ /*
++ * The name cannot consist of just prefix
++ */
++ if (len <= prefixlen)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+ {
++ int ret;
++
+ /*
+ * If this is a request for a synthetic attribute in the system.*
+ * namespace use the generic infrastructure to resolve a handler
+@@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_getxattr(dentry, name, buffer, size);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+ return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+
+@@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_setxattr(dentry, name, value, size, flags);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+@@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_removexattr(dentry, name);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 28fe71a..aae7011 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- struct buffer_head *bh;
++ struct buffer_head *bh = NULL;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+ struct super_block *sb;
+@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return retval;
+ if (retval == 1) {
+ retval = 0;
+- return retval;
++ goto out;
+ }
+ }
+
+ if (is_dx(dir)) {
+ retval = ext4_dx_add_entry(handle, dentry, inode);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+- return retval;
++ goto out;
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ dx_fallback++;
+ ext4_mark_inode_dirty(handle, dir);
+@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return PTR_ERR(bh);
+
+ retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+- if (retval != -ENOSPC) {
+- brelse(bh);
+- return retval;
+- }
++ if (retval != -ENOSPC)
++ goto out;
+
+ if (blocks == 1 && !dx_fallback &&
+- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+- return make_indexed_dir(handle, dentry, inode, bh);
++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++ retval = make_indexed_dir(handle, dentry, inode, bh);
++ bh = NULL; /* make_indexed_dir releases bh */
++ goto out;
++ }
+ brelse(bh);
+ }
+ bh = ext4_append(handle, dir, &block);
+@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ }
+
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ brelse(bh);
+ if (retval == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index 665ef5a..a563ddb 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -31,7 +31,7 @@
+ static struct hlist_head nlm_files[FILE_NRHASH];
+ static DEFINE_MUTEX(nlm_file_mutex);
+
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
+ {
+ u32 *fhp = (u32*)f->data;
+diff --git a/fs/namei.c b/fs/namei.c
+index c83145a..caa38a2 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+
+ if (should_follow_link(path->dentry, follow)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ err = -ECHILD;
+ goto out_err;
+ }
+@@ -3047,7 +3048,8 @@ finish_lookup:
+
+ if (should_follow_link(path->dentry, !symlink_ok)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ error = -ECHILD;
+ goto out;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 82ef140..4622ee3 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+ */
+ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
+ {
+- struct mount *p, *res;
+- res = p = __lookup_mnt(mnt, dentry);
++ struct mount *p, *res = NULL;
++ p = __lookup_mnt(mnt, dentry);
+ if (!p)
+ goto out;
++ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
++ res = p;
+ hlist_for_each_entry_continue(p, mnt_hash) {
+ if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+ break;
+- res = p;
++ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
++ res = p;
+ }
+ out:
+ return res;
+@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
+ /*
+ * vfsmount lock must be held for write
+ */
+-static void detach_mnt(struct mount *mnt, struct path *old_path)
++static void unhash_mnt(struct mount *mnt)
+ {
+- old_path->dentry = mnt->mnt_mountpoint;
+- old_path->mnt = &mnt->mnt_parent->mnt;
+ mnt->mnt_parent = mnt;
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ list_del_init(&mnt->mnt_child);
+@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
+ /*
+ * vfsmount lock must be held for write
+ */
++static void detach_mnt(struct mount *mnt, struct path *old_path)
++{
++ old_path->dentry = mnt->mnt_mountpoint;
++ old_path->mnt = &mnt->mnt_parent->mnt;
++ unhash_mnt(mnt);
++}
++
++/*
++ * vfsmount lock must be held for write
++ */
++static void umount_mnt(struct mount *mnt)
++{
++ /* old mountpoint will be dropped when we can do that */
++ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
++ unhash_mnt(mnt);
++}
++
++/*
++ * vfsmount lock must be held for write
++ */
+ void mnt_set_mountpoint(struct mount *mnt,
+ struct mountpoint *mp,
+ struct mount *child_mnt)
+@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
+ rcu_read_unlock();
+
+ list_del(&mnt->mnt_instance);
++
++ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
++ struct mount *p, *tmp;
++ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
++ umount_mnt(p);
++ }
++ }
+ unlock_mount_hash();
+
+ if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
+@@ -1319,49 +1347,63 @@ static inline void namespace_lock(void)
+ down_write(&namespace_sem);
+ }
+
++enum umount_tree_flags {
++ UMOUNT_SYNC = 1,
++ UMOUNT_PROPAGATE = 2,
++ UMOUNT_CONNECTED = 4,
++};
+ /*
+ * mount_lock must be held
+ * namespace_sem must be held for write
+- * how = 0 => just this tree, don't propagate
+- * how = 1 => propagate; we know that nobody else has reference to any victims
+- * how = 2 => lazy umount
+ */
+-void umount_tree(struct mount *mnt, int how)
++static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ {
+- HLIST_HEAD(tmp_list);
++ LIST_HEAD(tmp_list);
+ struct mount *p;
+
++ if (how & UMOUNT_PROPAGATE)
++ propagate_mount_unlock(mnt);
++
++ /* Gather the mounts to umount */
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+- hlist_del_init_rcu(&p->mnt_hash);
+- hlist_add_head(&p->mnt_hash, &tmp_list);
++ p->mnt.mnt_flags |= MNT_UMOUNT;
++ list_move(&p->mnt_list, &tmp_list);
+ }
+
+- hlist_for_each_entry(p, &tmp_list, mnt_hash)
++ /* Hide the mounts from mnt_mounts */
++ list_for_each_entry(p, &tmp_list, mnt_list) {
+ list_del_init(&p->mnt_child);
++ }
+
+- if (how)
++ /* Add propogated mounts to the tmp_list */
++ if (how & UMOUNT_PROPAGATE)
+ propagate_umount(&tmp_list);
+
+- while (!hlist_empty(&tmp_list)) {
+- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
+- hlist_del_init_rcu(&p->mnt_hash);
++ while (!list_empty(&tmp_list)) {
++ bool disconnect;
++ p = list_first_entry(&tmp_list, struct mount, mnt_list);
+ list_del_init(&p->mnt_expire);
+ list_del_init(&p->mnt_list);
+ __touch_mnt_namespace(p->mnt_ns);
+ p->mnt_ns = NULL;
+- if (how < 2)
++ if (how & UMOUNT_SYNC)
+ p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+
+- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
++ disconnect = !(((how & UMOUNT_CONNECTED) &&
++ mnt_has_parent(p) &&
++ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
++ IS_MNT_LOCKED_AND_LAZY(p));
++
++ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
++ disconnect ? &unmounted : NULL);
+ if (mnt_has_parent(p)) {
+- hlist_del_init(&p->mnt_mp_list);
+- put_mountpoint(p->mnt_mp);
+ mnt_add_count(p->mnt_parent, -1);
+- /* old mountpoint will be dropped when we can do that */
+- p->mnt_ex_mountpoint = p->mnt_mountpoint;
+- p->mnt_mountpoint = p->mnt.mnt_root;
+- p->mnt_parent = p;
+- p->mnt_mp = NULL;
++ if (!disconnect) {
++ /* Don't forget about p */
++ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
++ } else {
++ umount_mnt(p);
++ }
+ }
+ change_mnt_propagation(p, MS_PRIVATE);
+ }
+@@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags)
+
+ if (flags & MNT_DETACH) {
+ if (!list_empty(&mnt->mnt_list))
+- umount_tree(mnt, 2);
++ umount_tree(mnt, UMOUNT_PROPAGATE);
+ retval = 0;
+ } else {
+ shrink_submounts(mnt);
+ retval = -EBUSY;
+ if (!propagate_mount_busy(mnt, 2)) {
+ if (!list_empty(&mnt->mnt_list))
+- umount_tree(mnt, 1);
++ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ retval = 0;
+ }
+ }
+@@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry)
+
+ namespace_lock();
+ mp = lookup_mountpoint(dentry);
+- if (!mp)
++ if (IS_ERR_OR_NULL(mp))
+ goto out_unlock;
+
+ lock_mount_hash();
+ while (!hlist_empty(&mp->m_list)) {
+ mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+- umount_tree(mnt, 2);
++ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
++ struct mount *p, *tmp;
++ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
++ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
++ umount_mnt(p);
++ }
++ }
++ else umount_tree(mnt, UMOUNT_CONNECTED);
+ }
+ unlock_mount_hash();
+ put_mountpoint(mp);
+@@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ out:
+ if (res) {
+ lock_mount_hash();
+- umount_tree(res, 0);
++ umount_tree(res, UMOUNT_SYNC);
+ unlock_mount_hash();
+ }
+ return q;
+@@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
+ {
+ namespace_lock();
+ lock_mount_hash();
+- umount_tree(real_mount(mnt), 0);
++ umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ unlock_mount_hash();
+ namespace_unlock();
+ }
+@@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+ out_cleanup_ids:
+ while (!hlist_empty(&tree_list)) {
+ child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+- umount_tree(child, 0);
++ umount_tree(child, UMOUNT_SYNC);
+ }
+ unlock_mount_hash();
+ cleanup_group_ids(source_mnt, NULL);
+@@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name,
+ err = graft_tree(mnt, parent, mp);
+ if (err) {
+ lock_mount_hash();
+- umount_tree(mnt, 0);
++ umount_tree(mnt, UMOUNT_SYNC);
+ unlock_mount_hash();
+ }
+ out2:
+@@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
+ while (!list_empty(&graveyard)) {
+ mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
+ touch_mnt_namespace(mnt->mnt_ns);
+- umount_tree(mnt, 1);
++ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ }
+ unlock_mount_hash();
+ namespace_unlock();
+@@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt)
+ m = list_first_entry(&graveyard, struct mount,
+ mnt_expire);
+ touch_mnt_namespace(m->mnt_ns);
+- umount_tree(m, 1);
++ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ }
+ }
+ }
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 351be920..8d129bb 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
+ if (try_to_freeze())
+ continue;
+
+- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
++ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+- /* schedule_timeout to game the hung task watchdog */
+- schedule_timeout(60 * HZ);
++ schedule();
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
++ flush_signals(current);
+ }
+ return 0;
+ }
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index e907c8c..ab21ef1 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+ int i;
+ ssize_t count;
+
+- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
+-
+- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+- }
+-
+- /* update the dreq->count by finding the minimum agreed count from all
+- * mirrors */
+- count = dreq->mirrors[0].count;
++ if (dreq->mirror_count == 1) {
++ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
++ dreq->count += hdr->good_bytes;
++ } else {
++ /* mirrored writes */
++ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
++ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
++ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
++ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
++ }
++ /* update the dreq->count by finding the minimum agreed count from all
++ * mirrors */
++ count = dreq->mirrors[0].count;
+
+- for (i = 1; i < dreq->mirror_count; i++)
+- count = min(count, dreq->mirrors[i].count);
++ for (i = 1; i < dreq->mirror_count; i++)
++ count = min(count, dreq->mirrors[i].count);
+
+- dreq->count = count;
++ dreq->count = count;
++ }
+ }
+
+ /*
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 5c399ec..d494ea2 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat)
+ .p_name = #proc, \
+ }
+
++#define STUB(proc) \
++[NFSPROC4_CLNT_##proc] = { \
++ .p_name = #proc, \
++}
++
+ struct rpc_procinfo nfs4_procedures[] = {
+ PROC(READ, enc_read, dec_read),
+ PROC(WRITE, enc_write, dec_write),
+@@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = {
+ PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
+ PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
+ PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
++ STUB(GETDEVICELIST),
+ PROC(BIND_CONN_TO_SESSION,
+ enc_bind_conn_to_session, dec_bind_conn_to_session),
+ PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 568ecf0..848d8b1 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
+ dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+ page, PAGE_CACHE_SIZE, page_file_index(page));
+ nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+- nfs_inc_stats(inode, NFSIOS_READPAGES);
++ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
+
+ /*
+ * Try to flush any pending writes to the file..
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 849ed78..41b3f1096 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
+ int ret;
+
+ nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
++ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
+
+ nfs_pageio_cond_complete(pgio, page_file_index(page));
+ ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 92b9d97..5416968 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ return status;
+ }
++ if (!file)
++ return nfserr_bad_stateid;
+
+ status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ fallocate->falloc_offset,
+@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
+ return status;
+ }
++ if (!file)
++ return nfserr_bad_stateid;
+
+ switch (seek->seek_whence) {
+ case NFS4_CONTENT_DATA:
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8ba1d88..ee1cccd 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
+ return sid->sequence % SESSION_HASH_SIZE;
+ }
+
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void
+ dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
+ {
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 5fb7e78..5b33ce1 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ unsigned long maxcount;
+ struct xdr_stream *xdr = &resp->xdr;
+ struct file *file = read->rd_filp;
++ struct svc_fh *fhp = read->rd_fhp;
+ int starting_len = xdr->buf->len;
+ struct raparms *ra;
+ __be32 *p;
+@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
+ maxcount = min_t(unsigned long, maxcount, read->rd_length);
+
+- if (!read->rd_filp) {
++ if (read->rd_filp)
++ err = nfsd_permission(resp->rqstp, fhp->fh_export,
++ fhp->fh_dentry,
++ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
++ else
+ err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
+ &file, &ra);
+- if (err)
+- goto err_truncate;
+- }
++ if (err)
++ goto err_truncate;
+
+ if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
+ err = nfsd4_encode_splice_read(resp, read, file, maxcount);
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index aa47d75..9690cb4 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
+ int retval;
+ printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+- retval = register_cld_notifier();
+- if (retval)
+- return retval;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
+- goto out_unregister_notifier;
+- retval = nfsd4_init_slabs();
++ return retval;
++ retval = register_cld_notifier();
+ if (retval)
+ goto out_unregister_pernet;
++ retval = nfsd4_init_slabs();
++ if (retval)
++ goto out_unregister_notifier;
+ retval = nfsd4_init_pnfs();
+ if (retval)
+ goto out_free_slabs;
+@@ -1290,10 +1290,10 @@ out_exit_pnfs:
+ nfsd4_exit_pnfs();
+ out_free_slabs:
+ nfsd4_free_slabs();
+-out_unregister_pernet:
+- unregister_pernet_subsys(&nfsd_net_ops);
+ out_unregister_notifier:
+ unregister_cld_notifier();
++out_unregister_pernet:
++ unregister_pernet_subsys(&nfsd_net_ops);
+ return retval;
+ }
+
+@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
+ nfsd4_exit_pnfs();
+ nfsd_fault_inject_cleanup();
+ unregister_filesystem(&nfsd_fs_type);
+- unregister_pernet_subsys(&nfsd_net_ops);
+ unregister_cld_notifier();
++ unregister_pernet_subsys(&nfsd_net_ops);
+ }
+
+ MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 565c4da..cf98052 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -24,7 +24,7 @@
+ #include "export.h"
+
+ #undef ifdebug
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ # define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
+ #else
+ # define ifdebug(flag) if (0)
+diff --git a/fs/open.c b/fs/open.c
+index 33f9cbf..44a3be1 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+
++retry_deleg:
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ if (!uid_valid(uid))
+@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+-retry_deleg:
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chown(path, uid, gid);
+ if (!error)
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 260ac8f..6367e1e 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
+ }
+
+ /*
++ * Clear MNT_LOCKED when it can be shown to be safe.
++ *
++ * mount_lock lock must be held for write
++ */
++void propagate_mount_unlock(struct mount *mnt)
++{
++ struct mount *parent = mnt->mnt_parent;
++ struct mount *m, *child;
++
++ BUG_ON(parent == mnt);
++
++ for (m = propagation_next(parent, parent); m;
++ m = propagation_next(m, parent)) {
++ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
++ if (child)
++ child->mnt.mnt_flags &= ~MNT_LOCKED;
++ }
++}
++
++/*
++ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
++ */
++static void mark_umount_candidates(struct mount *mnt)
++{
++ struct mount *parent = mnt->mnt_parent;
++ struct mount *m;
++
++ BUG_ON(parent == mnt);
++
++ for (m = propagation_next(parent, parent); m;
++ m = propagation_next(m, parent)) {
++ struct mount *child = __lookup_mnt_last(&m->mnt,
++ mnt->mnt_mountpoint);
++ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
++ SET_MNT_MARK(child);
++ }
++ }
++}
++
++/*
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
+ struct mount *child = __lookup_mnt_last(&m->mnt,
+ mnt->mnt_mountpoint);
+ /*
+- * umount the child only if the child has no
+- * other children
++ * umount the child only if the child has no children
++ * and the child is marked safe to unmount.
+ */
+- if (child && list_empty(&child->mnt_mounts)) {
++ if (!child || !IS_MNT_MARKED(child))
++ continue;
++ CLEAR_MNT_MARK(child);
++ if (list_empty(&child->mnt_mounts)) {
+ list_del_init(&child->mnt_child);
+- hlist_del_init_rcu(&child->mnt_hash);
+- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
++ child->mnt.mnt_flags |= MNT_UMOUNT;
++ list_move_tail(&child->mnt_list, &mnt->mnt_list);
+ }
+ }
+ }
+@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
+ *
+ * vfsmount lock must be held for write
+ */
+-int propagate_umount(struct hlist_head *list)
++int propagate_umount(struct list_head *list)
+ {
+ struct mount *mnt;
+
+- hlist_for_each_entry(mnt, list, mnt_hash)
++ list_for_each_entry_reverse(mnt, list, mnt_list)
++ mark_umount_candidates(mnt);
++
++ list_for_each_entry(mnt, list, mnt_list)
+ __propagate_umount(mnt);
+ return 0;
+ }
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 4a24635..7114ce6 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -19,6 +19,9 @@
+ #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+ #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
++#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
++#define IS_MNT_LOCKED_AND_LAZY(m) \
++ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
+
+ #define CL_EXPIRE 0x01
+ #define CL_SLAVE 0x02
+@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
+ void change_mnt_propagation(struct mount *, int);
+ int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
+ struct hlist_head *);
+-int propagate_umount(struct hlist_head *);
++int propagate_umount(struct list_head *);
+ int propagate_mount_busy(struct mount *, int);
++void propagate_mount_unlock(struct mount *);
+ void mnt_release_group_id(struct mount *);
+ int get_dominating_id(struct mount *mnt, const struct path *root);
+ unsigned int mnt_get_count(struct mount *mnt);
+ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+ struct mount *);
+-void umount_tree(struct mount *, int);
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ const struct path *root);
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index b034f10..0d58525 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -199,9 +199,29 @@ typedef int s32;
+ typedef s32 acpi_native_int;
+
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+
++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX ACPI_UINT32_MAX
+
+@@ -736,10 +756,6 @@ typedef u32 acpi_event_status;
+ #define ACPI_GPE_ENABLE 0
+ #define ACPI_GPE_DISABLE 1
+ #define ACPI_GPE_CONDITIONAL_ENABLE 2
+-#define ACPI_GPE_SAVE_MASK 4
+-
+-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
+-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)
+
+ /*
+ * GPE info flags - Per GPE
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index ad74dc5..ecdf940 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
+index ae2eb17..a215609 100644
+--- a/include/dt-bindings/clock/tegra124-car-common.h
++++ b/include/dt-bindings/clock/tegra124-car-common.h
+@@ -297,7 +297,7 @@
+ #define TEGRA124_CLK_PLL_C4 270
+ #define TEGRA124_CLK_PLL_DP 271
+ #define TEGRA124_CLK_PLL_E_MUX 272
+-#define TEGRA124_CLK_PLLD_DSI 273
++#define TEGRA124_CLK_PLL_D_DSI_OUT 273
+ /* 274 */
+ /* 275 */
+ /* 276 */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bbfceb7..33b52fb 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
+
+ /* function argument constraints */
+ enum bpf_arg_type {
+- ARG_ANYTHING = 0, /* any argument is ok */
++ ARG_DONTCARE = 0, /* unused argument in helper function */
+
+ /* the following constraints used to prototype
+ * bpf_map_lookup/update/delete_elem() functions
+@@ -62,6 +62,8 @@ enum bpf_arg_type {
+ */
+ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
++
++ ARG_ANYTHING, /* any (initialized) argument is ok */
+ };
+
+ /* type of values returned from helper functions */
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index c2c561d..564beee 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -61,6 +61,7 @@ struct mnt_namespace;
+ #define MNT_DOOMED 0x1000000
+ #define MNT_SYNC_UMOUNT 0x2000000
+ #define MNT_MARKED 0x4000000
++#define MNT_UMOUNT 0x8000000
+
+ struct vfsmount {
+ struct dentry *mnt_root; /* root of the mounted tree */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index a419b65..51348f7 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+ extern void calc_global_load(unsigned long ticks);
+ extern void update_cpu_load_nohz(void);
+
++/* Notifier for when a task gets migrated to a new CPU */
++struct task_migration_notifier {
++ struct task_struct *task;
++ int from_cpu;
++ int to_cpu;
++};
++extern void register_task_migration_notifier(struct notifier_block *n);
++
+ extern unsigned long get_parent_ip(unsigned long addr);
+
+ extern void dump_cpu_task(int cpu);
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f54d665..bdccc4b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+
+ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ gfp_t priority)
+@@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+ */
+ #define CHECKSUM_BREAK 76
+
++/* Unset checksum-complete
++ *
++ * Unset checksum complete can be done when packet is being modified
++ * (uncompressed for instance) and checksum-complete value is
++ * invalidated.
++ */
++static inline void skb_checksum_complete_unset(struct sk_buff *skb)
++{
++ if (skb->ip_summed == CHECKSUM_COMPLETE)
++ skb->ip_summed = CHECKSUM_NONE;
++}
++
+ /* Validate (init) checksum based on checksum complete.
+ *
+ * Return values:
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7ee1b5c..447fe29 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES 32
+ #define USB_MAXIADS (USB_MAXINTERFACES/2)
+
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT 40 /* ms */
++
+ /**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index d3583d3..dd0f3ab 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -602,6 +602,11 @@ struct iscsi_conn {
+ struct iscsi_session *sess;
+ /* Pointer to thread_set in use for this conn's threads */
+ struct iscsi_thread_set *thread_set;
++ int bitmap_id;
++ int rx_thread_active;
++ struct task_struct *rx_thread;
++ int tx_thread_active;
++ struct task_struct *tx_thread;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+ } ____cacheline_aligned;
+@@ -871,10 +876,12 @@ struct iscsit_global {
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
++#define ISCSIT_BITMAP_BITS 262144
+ /* Thread Set bitmap count */
+ int ts_bitmap_count;
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
++ spinlock_t ts_bitmap_lock;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 672150b..985ca4c 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -524,7 +524,7 @@ struct se_cmd {
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
+index 0bf130a..28ec6c9 100644
+--- a/include/uapi/linux/nfsd/debug.h
++++ b/include/uapi/linux/nfsd/debug.h
+@@ -12,14 +12,6 @@
+ #include <linux/sunrpc/debug.h>
+
+ /*
+- * Enable debugging for nfsd.
+- * Requires RPC_DEBUG.
+- */
+-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+-# define NFSD_DEBUG 1
+-#endif
+-
+-/*
+ * knfsd debug flags
+ */
+ #define NFSDDBG_SOCK 0x0001
+diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
+index a20e4a3..847a0a2 100644
+--- a/include/video/samsung_fimd.h
++++ b/include/video/samsung_fimd.h
+@@ -436,6 +436,12 @@
+ #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+ #define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
++/* Display port clock control */
++#define DP_MIE_CLKCON 0x27c
++#define DP_MIE_CLK_DISABLE 0x0
++#define DP_MIE_CLK_DP_ENABLE 0x2
++#define DP_MIE_CLK_MIE_ENABLE 0x3
++
+ /* Notes on per-window bpp settings
+ *
+ * Value Win0 Win1 Win2 Win3 Win 4
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 36508e6..5d8ea3d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ enum bpf_reg_type expected_type;
+ int err = 0;
+
+- if (arg_type == ARG_ANYTHING)
++ if (arg_type == ARG_DONTCARE)
+ return 0;
+
+ if (reg->type == NOT_INIT) {
+@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ return -EACCES;
+ }
+
++ if (arg_type == ARG_ANYTHING)
++ return 0;
++
+ if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
+ arg_type == ARG_PTR_TO_MAP_VALUE) {
+ expected_type = PTR_TO_STACK;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 227fec3..9a34bd8 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ unsigned long data)
+ {
++ bool need_siglock;
++
+ if (!valid_signal(data))
+ return -EIO;
+
+@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ user_disable_single_step(child);
+ }
+
++ /*
++ * Change ->exit_code and ->state under siglock to avoid the race
++ * with wait_task_stopped() in between; a non-zero ->exit_code will
++ * wrongly look like another report from tracee.
++ *
++ * Note that we need siglock even if ->exit_code == data and/or this
++ * status was not reported yet, the new status must not be cleared by
++ * wait_task_stopped() after resume.
++ *
++ * If data == 0 we do not care if wait_task_stopped() reports the old
++ * status and clears the code too; this can't race with the tracee, it
++ * takes siglock after resume.
++ */
++ need_siglock = data && !thread_group_empty(current);
++ if (need_siglock)
++ spin_lock_irq(&child->sighand->siglock);
+ child->exit_code = data;
+ wake_up_state(child, __TASK_TRACED);
++ if (need_siglock)
++ spin_unlock_irq(&child->sighand->siglock);
+
+ return 0;
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 62671f5..3d5f6f6 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+ rq_clock_skip_update(rq, true);
+ }
+
++static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
++
++void register_task_migration_notifier(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&task_migration_notifier, n);
++}
++
+ #ifdef CONFIG_SMP
+ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ {
+@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ trace_sched_migrate_task(p, new_cpu);
+
+ if (task_cpu(p) != new_cpu) {
++ struct task_migration_notifier tmn;
++
+ if (p->sched_class->migrate_task_rq)
+ p->sched_class->migrate_task_rq(p, new_cpu);
+ p->se.nr_migrations++;
+ perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
++
++ tmn.task = p;
++ tmn.from_cpu = task_cpu(p);
++ tmn.to_cpu = new_cpu;
++
++ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
+ }
+
+ __set_task_cpu(p, new_cpu);
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 3fa8fa6..f670cbb 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
+ unsigned long flags;
+ struct rq *rq;
+
+- rq = task_rq_lock(current, &flags);
++ rq = task_rq_lock(p, &flags);
+
+ /*
+ * We need to take care of several possible races here:
+@@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
+ push_dl_task(rq);
+ #endif
+ unlock:
+- task_rq_unlock(rq, current, &flags);
++ task_rq_unlock(rq, p, &flags);
+
+ return HRTIMER_NORESTART;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 5040d44..922048a 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+
+ static __always_inline int trace_recursive_lock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+ int bit;
+
+ if (in_interrupt()) {
+@@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
+ return 1;
+
+ val |= (1 << bit);
+- this_cpu_write(current_context, val);
++ __this_cpu_write(current_context, val);
+
+ return 0;
+ }
+
+ static __always_inline void trace_recursive_unlock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+
+- val--;
+- val &= this_cpu_read(current_context);
+- this_cpu_write(current_context, val);
++ val &= val & (val - 1);
++ __this_cpu_write(current_context, val);
+ }
+
+ #else
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index db54dda..a9c10a3 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ {
+ char *event = NULL, *sub = NULL, *match;
++ int ret;
+
+ /*
+ * The buf format can be <subsystem>:<event-name>
+@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ event = NULL;
+ }
+
+- return __ftrace_set_clr_event(tr, match, sub, event, set);
++ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
++
++ /* Put back the colon to allow this to be called again */
++ if (buf)
++ *(buf - 1) = ':';
++
++ return ret;
+ }
+
+ /**
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 2d25ad1..b6fce36 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
+ {
+ /* pid and depth on the last trace processed */
+ struct fgraph_data *data;
++ gfp_t gfpflags;
+ int cpu;
+
+ iter->private = NULL;
+
+- data = kzalloc(sizeof(*data), GFP_KERNEL);
++ /* We can be called in atomic context via ftrace_dump() */
++ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
++
++ data = kzalloc(sizeof(*data), gfpflags);
+ if (!data)
+ goto out_err;
+
+- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
++ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
+ if (!data->cpu_data)
+ goto out_err_free;
+
+diff --git a/lib/string.c b/lib/string.c
+index ce81aae..a579201 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ memset(s, 0, count);
+- OPTIMIZER_HIDE_VAR(s);
++ barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 6817b03..956d4db 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2316,8 +2316,14 @@ static struct page
+ struct vm_area_struct *vma, unsigned long address,
+ int node)
+ {
++ gfp_t flags;
++
+ VM_BUG_ON_PAGE(*hpage, *hpage);
+
++ /* Only allocate from the target node */
++ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
++ __GFP_THISNODE;
++
+ /*
+ * Before allocating the hugepage, release the mmap_sem read lock.
+ * The allocation can take potentially a long time if it involves
+@@ -2326,8 +2332,7 @@ static struct page
+ */
+ up_read(&mm->mmap_sem);
+
+- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
+- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
++ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
+ if (unlikely(!*hpage)) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ *hpage = ERR_PTR(-ENOMEM);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c41b2a0..caad3c5 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3735,8 +3735,7 @@ retry:
+ if (!pmd_huge(*pmd))
+ goto out;
+ if (pmd_present(*pmd)) {
+- page = pte_page(*(pte_t *)pmd) +
+- ((address & ~PMD_MASK) >> PAGE_SHIFT);
++ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4721046..de5dc5e 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1985,7 +1985,8 @@ retry_cpuset:
+ nmask = policy_nodemask(gfp, pol);
+ if (!nmask || node_isset(node, *nmask)) {
+ mpol_cond_put(pol);
+- page = alloc_pages_exact_node(node, gfp, order);
++ page = alloc_pages_exact_node(node,
++ gfp | __GFP_THISNODE, order);
+ goto out;
+ }
+ }
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 0ee453f..f371cbf 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
+ struct net_device *in;
+
+ if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
++ int frag_max_size;
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ frag_max_size = IPCB(skb)->frag_max_size;
++ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
++ }
++
+ in = nf_bridge->physindev;
+ if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ skb->pkt_type = PACKET_OTHERHOST;
+@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+ nf_bridge->mask |= BRNF_PKT_TYPE;
+ }
+
+- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
+- return NF_DROP;
++ if (pf == NFPROTO_IPV4) {
++ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
++
++ if (br_parse_ip_options(skb))
++ return NF_DROP;
++
++ IPCB(skb)->frag_max_size = frag_max;
++ }
+
+ /* The physdev module checks on this */
+ nf_bridge->mask |= BRNF_BRIDGED;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 45109b7..22a53ac 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3041,7 +3041,7 @@ static struct rps_dev_flow *
+ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow *rflow, u16 next_cpu)
+ {
+- if (next_cpu != RPS_NO_CPU) {
++ if (next_cpu < nr_cpu_ids) {
+ #ifdef CONFIG_RFS_ACCEL
+ struct netdev_rx_queue *rxqueue;
+ struct rps_dev_flow_table *flow_table;
+@@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ * If the desired CPU (where last recvmsg was done) is
+ * different from current CPU (one in the rx-queue flow
+ * table entry), switch if one of the following holds:
+- * - Current CPU is unset (equal to RPS_NO_CPU).
++ * - Current CPU is unset (>= nr_cpu_ids).
+ * - Current CPU is offline.
+ * - The current CPU's queue tail has advanced beyond the
+ * last packet that was enqueued using this table entry.
+@@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ * have been dequeued, thus preserving in order delivery.
+ */
+ if (unlikely(tcpu != next_cpu) &&
+- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
++ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+ rflow->last_qtail)) >= 0)) {
+ tcpu = next_cpu;
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+ }
+
+- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
++ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
+ *rflowp = rflow;
+ cpu = tcpu;
+ goto done;
+@@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *rflow;
+ bool expire = true;
+- int cpu;
++ unsigned int cpu;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (flow_table && flow_id <= flow_table->mask) {
+ rflow = &flow_table->flows[flow_id];
+ cpu = ACCESS_ONCE(rflow->cpu);
+- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
++ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
+ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+ rflow->last_qtail) <
+ (int)(10 * flow_table->mask)))
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 98d45fe..e9f9a15 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -280,13 +280,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+ * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ * or vmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+- skb->head_frag = frag_size != 0;
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++ struct sk_buff *skb = __build_skb(data, frag_size);
++
++ if (skb && frag_size) {
++ skb->head_frag = 1;
++ if (virt_to_head_page(data)->pfmemalloc)
++ skb->pfmemalloc = 1;
++ }
++ return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+
+ struct netdev_alloc_cache {
+@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
+ gfp_t gfp = gfp_mask;
+
+ if (order) {
+- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
++ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
++ __GFP_NOMEMALLOC;
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ nc->frag.size = PAGE_SIZE << (page ? order : 0);
+ }
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index d9bc28a..53bd53f 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index d520492..9d48dc4 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2751,39 +2751,65 @@ begin_fwd:
+ }
+ }
+
+-/* Send a fin. The caller locks the socket for us. This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++ int amt, status;
++
++ if (size <= sk->sk_forward_alloc)
++ return;
++ amt = sk_mem_pages(size);
++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++ sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+ */
+ void tcp_send_fin(struct sock *sk)
+ {
++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_write_queue_tail(sk);
+- int mss_now;
+
+- /* Optimization, tack on the FIN if we have a queue of
+- * unsent frames. But be careful about outgoing SACKS
+- * and IP options.
++ /* Optimization, tack on the FIN if we have one skb in write queue and
++ * this skb was not yet sent, or we are under memory pressure.
++ * Note: in the latter case, FIN packet will be sent after a timeout,
++ * as TCP stack thinks it has already been transmitted.
+ */
+- mss_now = tcp_current_mss(sk);
+-
+- if (tcp_send_head(sk) != NULL) {
+- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+- TCP_SKB_CB(skb)->end_seq++;
++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(tskb)->end_seq++;
+ tp->write_seq++;
++ if (!tcp_send_head(sk)) {
++ /* This means tskb was already sent.
++ * Pretend we included the FIN on previous transmit.
++ * We need to set tp->snd_nxt to the value it would have
++ * if FIN had been sent. This is because retransmit path
++ * does not change tp->snd_nxt.
++ */
++ tp->snd_nxt++;
++ return;
++ }
+ } else {
+- /* Socket is locked, keep trying until memory is available. */
+- for (;;) {
+- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+- if (skb)
+- break;
+- yield();
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ if (unlikely(!skb)) {
++ if (tskb)
++ goto coalesce;
++ return;
+ }
++ skb_reserve(skb, MAX_TCP_HEADER);
++ sk_forced_wmem_schedule(sk, skb->truesize);
+ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ tcp_init_nondata_skb(skb, tp->write_seq,
+ TCPHDR_ACK | TCPHDR_FIN);
+ tcp_queue_skb(sk, skb);
+ }
+- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 142f66a..0ca013d 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
+ else
+ ssid_len = ssid[1];
+
+- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
++ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
+ ssid + 2, ssid_len, NULL,
+ 0, (u32) -1, true, 0,
+ ifmgd->associated->channel, false);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 05919bf..d1d7a81 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ if (data == NULL)
+ return NULL;
+
+- skb = build_skb(data, size);
++ skb = __build_skb(data, size);
+ if (skb == NULL)
+ vfree(data);
+- else {
+- skb->head_frag = 0;
++ else
+ skb->destructor = netlink_skb_destructor;
+- }
+
+ return skb;
+ }
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e..53745f4 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+ u32 value2;
+- unsigned long flags;
+ u32 rate;
+
+ if (emu->card_capabilities->emu_model) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x38, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x1) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x2a, &value);
+ snd_emu1010_fpga_read(emu, 0x2b, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ } else {
+ snd_iprintf(buffer, "ADAT Unlocked\n");
+ }
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x20, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x4) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x28, &value);
+ snd_emu1010_fpga_read(emu, 0x29, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ } else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+- unsigned long flags;
+ int i;
+ snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+
+ for(i = 0; i < 0x40; i+=1) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, i, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ }
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f9d12c0..2fd490b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
++#define ALC256_STANDARD_PINS \
++ {0x12, 0x90a60140}, \
++ {0x14, 0x90170110}, \
++ {0x19, 0x411111f0}, \
++ {0x1a, 0x411111f0}, \
++ {0x1b, 0x411111f0}, \
++ {0x1d, 0x40700001}, \
++ {0x1e, 0x411111f0}, \
++ {0x21, 0x02211020}
++
+ #define ALC282_STANDARD_PINS \
+ {0x14, 0x90170110}, \
+ {0x18, 0x411111f0}, \
+@@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x1d, 0x40700001},
+ {0x21, 0x02211050}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+- {0x12, 0x90a60140},
+- {0x13, 0x40000000},
+- {0x14, 0x90170110},
+- {0x19, 0x411111f0},
+- {0x1a, 0x411111f0},
+- {0x1b, 0x411111f0},
+- {0x1d, 0x40700001},
+- {0x1e, 0x411111f0},
+- {0x21, 0x02211020}),
++ ALC256_STANDARD_PINS,
++ {0x13, 0x40000000}),
++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++ ALC256_STANDARD_PINS,
++ {0x13, 0x411111f0}),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ {0x12, 0x90a60130},
+ {0x13, 0x40000000},
+@@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec)
+ break;
+ case 0x10ec0256:
+ spec->codec_variant = ALC269_TYPE_ALC256;
++ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
++ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ break;
+ }
+
+@@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec)
+ if (err < 0)
+ goto error;
+
+- if (!spec->gen.no_analog && spec->gen.beep_nid)
+- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
++ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
++ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
+
+ codec->patch_ops = alc_patch_ops;
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
+index 7d3a6ac..e770ee6 100644
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
+ if (gpio_is_valid(cs4271->gpio_nreset)) {
+ /* Reset codec */
+ gpio_direction_output(cs4271->gpio_nreset, 0);
+- udelay(1);
++ mdelay(1);
+ gpio_set_value(cs4271->gpio_nreset, 1);
+ /* Give the codec time to wake up */
+- udelay(1);
++ mdelay(1);
+ }
+
+ ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 474cae8..8c09e3f 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds =
+ static const struct snd_kcontrol_new pcm512x_controls[] = {
+ SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
+ PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
+-SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
++SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
+ PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
+-SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
++SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
+ PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
+ SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
+ PCM512x_RQMR_SHIFT, 1, 1),
+@@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,
+
+ /* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
+ if (pllin_rate / den > 20000000 && num < 8) {
+- num *= 20000000 / (pllin_rate / den);
+- den *= 20000000 / (pllin_rate / den);
++ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
++ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
+ }
+ dev_dbg(dev, "num / den = %lu / %lu\n", num, den);
+
+diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
+index 31bb480..9e71c76 100644
+--- a/sound/soc/codecs/wm8741.c
++++ b/sound/soc/codecs/wm8741.c
+@@ -123,7 +123,7 @@ static struct {
+ };
+
+ static const unsigned int rates_11289[] = {
+- 44100, 88235,
++ 44100, 88200,
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_11289 = {
+@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
+ };
+
+ static const unsigned int rates_16934[] = {
+- 44100, 88235,
++ 44100, 88200,
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_16934 = {
+@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
+ };
+
+ static const unsigned int rates_22579[] = {
+- 44100, 88235, 1764000
++ 44100, 88200, 176400
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_22579 = {
+@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
+ };
+
+ static const unsigned int rates_36864[] = {
+- 48000, 96000, 19200
++ 48000, 96000, 192000
+ };
+
+ static const struct snd_pcm_hw_constraint_list constraints_36864 = {
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index b6bb594..8c2b9be 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int davinci_evm_remove(struct platform_device *pdev)
+-{
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+- snd_soc_unregister_card(card);
+-
+- return 0;
+-}
+-
+ static struct platform_driver davinci_evm_driver = {
+ .probe = davinci_evm_probe,
+- .remove = davinci_evm_remove,
+ .driver = {
+ .name = "davinci_evm",
+ .pm = &snd_soc_pm_ops,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9a28365..32631a8 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ {
+ /* devices which do not support reading the sample rate. */
+ switch (chip->usb_id) {
++ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
+ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ return true;
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc6652..deb3569 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ *length = read_4(kbuf, data);
+- data += *length;
+ break;
+
+ case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
+index cc22408..0884d31 100644
+--- a/tools/perf/config/Makefile
++++ b/tools/perf/config/Makefile
+@@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1)
+ NO_PERF_READ_VDSO32 := 1
+ endif
+ endif
+- ifneq (${IS_X86_64}, 1)
++ ifneq ($(ARCH), x86)
+ NO_PERF_READ_VDSOX32 := 1
+ endif
+ ifndef NO_PERF_READ_VDSOX32
+@@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc
+ ETC_PERFCONFIG = etc/perfconfig
+ endif
+ ifndef lib
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/tests/make b/tools/perf/tests/make
+index 75709d2..bff8532 100644
+--- a/tools/perf/tests/make
++++ b/tools/perf/tests/make
+@@ -5,7 +5,7 @@ include config/Makefile.arch
+
+ # FIXME looks like x86 is the only arch running tests ;-)
+ # we need some IS_(32/64) flag to make this generic
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 6da965b..85b5238 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -7,6 +7,12 @@
+
+ static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+
++int __weak sched_getcpu(void)
++{
++ errno = ENOSYS;
++ return -1;
++}
++
+ static int perf_flag_probe(void)
+ {
+ /* use 'safest' configuration as used in perf_evsel__fallback() */
+diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
+index 94a5a7d..68888c2 100644
+--- a/tools/perf/util/cloexec.h
++++ b/tools/perf/util/cloexec.h
+@@ -3,4 +3,10 @@
+
+ unsigned long perf_event_open_cloexec_flag(void);
+
++#ifdef __GLIBC_PREREQ
++#if !__GLIBC_PREREQ(2, 6)
++extern int sched_getcpu(void) __THROW;
++#endif
++#endif
++
+ #endif /* __PERF_CLOEXEC_H */
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 33b7a2a..9bdf007 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+ return GELF_ST_TYPE(sym->st_info);
+ }
+
++#ifndef STT_GNU_IFUNC
++#define STT_GNU_IFUNC 10
++#endif
++
+ static inline int elf_sym__is_function(const GElf_Sym *sym)
+ {
+ return (elf_sym__type(sym) == STT_FUNC ||
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a36..4039854 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC = $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT := $(PWD)
++BUILD_OUTPUT := $(CURDIR)
+ PREFIX := /usr
+ DESTDIR :=
+
++ifeq ("$(origin O)", "command line")
++ BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS += -Wall
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index c9f60f5..e5abe7c 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ goto out;
+ }
+
++ if (irq_num >= kvm->arch.vgic.nr_irqs)
++ return -EINVAL;
++
+ vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+ if (vcpu_id >= 0) {
+ /* kick the specified vcpu */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index cc6a25d..f8f3f5f 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ ghc->generation = slots->generation;
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ ghc->hva += offset;
+ } else {
+ /*
diff --git a/4.0.1/4420_grsecurity-3.1-4.0.1-201505042053.patch b/4.0.2/4420_grsecurity-3.1-4.0.2-201505091724.patch
index 505b676..86c9e65 100644
--- a/4.0.1/4420_grsecurity-3.1-4.0.1-201505042053.patch
+++ b/4.0.2/4420_grsecurity-3.1-4.0.2-201505091724.patch
@@ -373,7 +373,7 @@ index bfcb1a6..2dae09b 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index f499cd2..37a187f 100644
+index 0649a60..1084563 100644
--- a/Makefile
+++ b/Makefile
@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -381,7 +381,7 @@ index f499cd2..37a187f 100644
HOSTCXX = g++
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
-HOSTCXXFLAGS = -O2
-+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
++HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
@@ -1678,14 +1678,14 @@ index 6ddbe44..b5e38b1a 100644
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index afb9caf..9a0bac0 100644
+index 674d03f..9a0bac0 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+#ifdef CONFIG_PAX_ASLR
@@ -2847,10 +2847,10 @@ index 69bda1a..755113a 100644
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index fdfa3a7..5d208b8 100644
+index 2bf1a16..d959d40 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -207,6 +207,7 @@ void machine_power_off(void)
+@@ -213,6 +213,7 @@ void machine_power_off(void)
if (pm_power_off)
pm_power_off();
@@ -2858,7 +2858,7 @@ index fdfa3a7..5d208b8 100644
}
/*
-@@ -220,7 +221,7 @@ void machine_power_off(void)
+@@ -226,7 +227,7 @@ void machine_power_off(void)
* executing pre-reset code, and using RAM that the primary CPU's code wishes
* to use. Implementing such co-ordination would be essentially impossible.
*/
@@ -2867,7 +2867,7 @@ index fdfa3a7..5d208b8 100644
{
local_irq_disable();
smp_send_stop();
-@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
+@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT);
@@ -2878,7 +2878,7 @@ index fdfa3a7..5d208b8 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -2891,7 +2891,7 @@ index fdfa3a7..5d208b8 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
+@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -2900,7 +2900,7 @@ index fdfa3a7..5d208b8 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return is_gate_vma(vma) ? "[vectors]" : NULL;
}
@@ -3240,7 +3240,7 @@ index b31aa73..cc4b7a1 100644
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index 5560f74..1cc00ea 100644
+index b652af5..60231ab 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
@@ -3279,7 +3279,7 @@ index 5560f74..1cc00ea 100644
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
-@@ -1088,7 +1088,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
@@ -5094,7 +5094,7 @@ index 836f147..4cf23f5 100644
if (!(addr & ~PAGE_MASK))
goto success;
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
-index 69952c1..4fa2908 100644
+index 69952c18..4fa2908 100644
--- a/arch/hexagon/include/asm/cache.h
+++ b/arch/hexagon/include/asm/cache.h
@@ -21,9 +21,11 @@
@@ -6890,7 +6890,7 @@ index 33984c0..666a96d 100644
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index c9eccf5..3903621 100644
+index f5e7dda..47198ec 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
@@ -24750,7 +24750,7 @@ index f36bd42..0ab4474 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 6fd514d9..c4221b8 100644
+index 6fd514d9..55fd355 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
@@ -24775,7 +24775,7 @@ index 6fd514d9..c4221b8 100644
.text
__HEAD
-@@ -89,11 +97,24 @@ startup_64:
+@@ -89,11 +97,26 @@ startup_64:
* Fixup the physical addresses in the page table
*/
addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
@@ -24791,18 +24791,20 @@ index 6fd514d9..c4221b8 100644
+#ifndef CONFIG_XEN
+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
+#endif
-+
+
+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
+
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
-
++
++ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
/*
* Set up the identity mapping for the switchover. These
-@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
+@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
* after the boot processor executes this code.
*/
@@ -24817,7 +24819,7 @@ index 6fd514d9..c4221b8 100644
movq %rcx, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
+@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -24832,13 +24834,15 @@ index 6fd514d9..c4221b8 100644
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
++ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
+@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
@@ -24846,7 +24850,7 @@ index 6fd514d9..c4221b8 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
+@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(stack_start)
@@ -24855,7 +24859,7 @@ index 6fd514d9..c4221b8 100644
.word 0
__FINITDATA
-@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
+@@ -391,7 +427,7 @@ ENTRY(early_idt_handler)
call dump_stack
#ifdef CONFIG_KALLSYMS
leaq early_idt_ripmsg(%rip),%rdi
@@ -24864,7 +24868,7 @@ index 6fd514d9..c4221b8 100644
call __print_symbol
#endif
#endif /* EARLY_PRINTK */
-@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
+@@ -420,6 +456,7 @@ ENDPROC(early_idt_handler)
early_recursion_flag:
.long 0
@@ -24872,7 +24876,7 @@ index 6fd514d9..c4221b8 100644
#ifdef CONFIG_EARLY_PRINTK
early_idt_msg:
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
-@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
+@@ -447,29 +484,52 @@ NEXT_PAGE(early_level4_pgt)
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -24934,7 +24938,7 @@ index 6fd514d9..c4221b8 100644
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
-@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
+@@ -477,6 +537,9 @@ NEXT_PAGE(level3_kernel_pgt)
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -24944,22 +24948,26 @@ index 6fd514d9..c4221b8 100644
NEXT_PAGE(level2_kernel_pgt)
/*
* 512 MB kernel mapping. We spend a full page on this pagetable
-@@ -494,21 +553,57 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -492,23 +555,59 @@ NEXT_PAGE(level2_kernel_pgt)
+ KERNEL_IMAGE_SIZE/PMD_SIZE)
+
NEXT_PAGE(level2_fixmap_pgt)
- .fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+- .fill 506,8,0
+- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
++ .fill 504,8,0
++ PMDS(level1_fixmap_pgt - __START_KERNEL_map, _PAGE_TABLE, 3)
+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
+ .fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
++ .fill 3*512,8,0
++
++NEXT_PAGE(level1_vsyscall_pgt)
.fill 512,8,0
-+NEXT_PAGE(level1_vsyscall_pgt)
-+ .fill 512,8,0
-+
#undef PMDS
- .data
@@ -25006,7 +25014,7 @@ index 6fd514d9..c4221b8 100644
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
-@@ -532,8 +627,8 @@ NEXT_PAGE(kasan_zero_pud)
+@@ -532,8 +631,8 @@ NEXT_PAGE(kasan_zero_pud)
#include "../../x86/xen/xen-head.S"
@@ -26448,10 +26456,10 @@ index 77dd0ad..9ec4723 100644
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 046e2d6..2cc8ad2 100644
+index a388bb8..97064ad 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
-@@ -37,7 +37,8 @@
+@@ -38,7 +38,8 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
@@ -26461,7 +26469,7 @@ index 046e2d6..2cc8ad2 100644
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
-@@ -95,7 +96,7 @@ void arch_task_cache_init(void)
+@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
task_xstate_cachep =
kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
@@ -26470,7 +26478,7 @@ index 046e2d6..2cc8ad2 100644
setup_xstate_comp();
}
-@@ -109,7 +110,7 @@ void exit_thread(void)
+@@ -110,7 +111,7 @@ void exit_thread(void)
unsigned long *bp = t->io_bitmap_ptr;
if (bp) {
@@ -26479,7 +26487,7 @@ index 046e2d6..2cc8ad2 100644
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
-@@ -129,6 +130,9 @@ void flush_thread(void)
+@@ -130,6 +131,9 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -26489,7 +26497,7 @@ index 046e2d6..2cc8ad2 100644
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
drop_init_fpu(tsk);
-@@ -275,7 +279,7 @@ static void __exit_idle(void)
+@@ -276,7 +280,7 @@ static void __exit_idle(void)
void exit_idle(void)
{
/* idle loop has pid 0 */
@@ -26498,7 +26506,7 @@ index 046e2d6..2cc8ad2 100644
return;
__exit_idle();
}
-@@ -328,7 +332,7 @@ bool xen_set_default_idle(void)
+@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
return ret;
}
#endif
@@ -26507,7 +26515,7 @@ index 046e2d6..2cc8ad2 100644
{
local_irq_disable();
/*
-@@ -457,16 +461,37 @@ static int __init idle_setup(char *str)
+@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
@@ -26889,7 +26897,7 @@ index e510618..5165ac0 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
-index 2f355d2..e75ed0a 100644
+index e5ecd20..60f7eef 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
@@ -27702,10 +27710,18 @@ index 30277e2..5664a29 100644
if (!(addr & ~PAGE_MASK))
return addr;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
-index 91a4496..bb87552 100644
+index 91a4496..42fc304 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
-@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
+@@ -44,6 +44,7 @@
+ #include <asm/setup.h>
+ #include <asm/e820.h>
+ #include <asm/io.h>
++#include <asm/tlbflush.h>
+
+ #include "../realmode/rm/wakeup.h"
+
+@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
void tboot_shutdown(u32 shutdown_type)
{
@@ -27714,16 +27730,18 @@ index 91a4496..bb87552 100644
if (!tboot_enabled())
return;
-@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
+@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
+ tboot->shutdown_type = shutdown_type;
switch_to_tboot_pt();
++ cr4_clear_bits(X86_CR4_PCIDE);
- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
shutdown();
/* should not reach here */
-@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
+@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
return -ENODEV;
}
@@ -27732,7 +27750,7 @@ index 91a4496..bb87552 100644
static int tboot_wait_for_aps(int num_aps)
{
-@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
+@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
{
switch (action) {
case CPU_DYING:
@@ -27744,7 +27762,7 @@ index 91a4496..bb87552 100644
return NOTIFY_BAD;
break;
}
-@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
+@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
tboot_create_trampoline();
@@ -28729,7 +28747,7 @@ index cc618c8..3f72f76 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index ae4f6d3..7f5f59b 100644
+index a60bd3a..748e856 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
@@ -28779,7 +28797,7 @@ index ae4f6d3..7f5f59b 100644
{
u64 host_tsc, tsc_offset;
-@@ -4458,7 +4466,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
@@ -28790,7 +28808,7 @@ index ae4f6d3..7f5f59b 100644
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
-@@ -4485,7 +4496,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -28799,7 +28817,7 @@ index ae4f6d3..7f5f59b 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6096,11 +6107,17 @@ static __init int hardware_setup(void)
+@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
* page upon invalidation. No need to do anything if not
* using the APIC_ACCESS_ADDR VMCS field.
*/
@@ -28821,7 +28839,7 @@ index ae4f6d3..7f5f59b 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -6111,14 +6128,16 @@ static __init int hardware_setup(void)
+@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -28843,7 +28861,7 @@ index ae4f6d3..7f5f59b 100644
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
-@@ -6171,10 +6190,12 @@ static __init int hardware_setup(void)
+@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
enable_pml = 0;
if (!enable_pml) {
@@ -28860,7 +28878,7 @@ index ae4f6d3..7f5f59b 100644
}
return alloc_kvm_area();
-@@ -8219,6 +8240,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -28873,7 +28891,7 @@ index ae4f6d3..7f5f59b 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -8271,6 +8298,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -28885,7 +28903,7 @@ index ae4f6d3..7f5f59b 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -8284,7 +8316,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -28894,7 +28912,7 @@ index ae4f6d3..7f5f59b 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -8293,8 +8325,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -28916,7 +28934,7 @@ index ae4f6d3..7f5f59b 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 32bf19e..c8de1b5 100644
+index e222ba5..6f0f2de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
@@ -30290,7 +30308,7 @@ index a451235..a74bfa3 100644
CFI_ENDPROC
END(bad_get_user_8)
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
-index 1313ae6..84f25ea 100644
+index 85994f5..9929d7f 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -20,8 +20,10 @@
@@ -30304,9 +30322,9 @@ index 1313ae6..84f25ea 100644
#endif
#include <asm/inat.h>
#include <asm/insn.h>
-@@ -53,9 +55,9 @@
- void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
- {
+@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+ buf_len = MAX_INSN_SIZE;
+
memset(insn, 0, sizeof(*insn));
- insn->kaddr = kaddr;
- insn->end_kaddr = kaddr + buf_len;
@@ -31619,7 +31637,7 @@ index e2f5e21..4b22130 100644
+EXPORT_SYMBOL(set_fs);
+#endif
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
-index 1f33b3d..83c151d 100644
+index 0a42327..1c3a136 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
@@ -31678,7 +31696,7 @@ index 1f33b3d..83c151d 100644
- clac();
/* If the destination is a kernel buffer, we always clear the end */
- if ((unsigned long)to >= TASK_SIZE_MAX)
+ if (!__addr_ok(to))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index c4cc740..60a7362 100644
--- a/arch/x86/mm/Makefile
@@ -35187,6 +35205,21 @@ index a28221d..93c40f1 100644
.long __KERNEL32_CS
#endif
END(real_mode_header)
+diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
+index d66c607..3def845 100644
+--- a/arch/x86/realmode/rm/reboot.S
++++ b/arch/x86/realmode/rm/reboot.S
+@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
+ lgdtl pa_tr_gdt
+
+ /* Disable paging to drop us out of long mode */
++ movl %cr4, %eax
++ andl $~X86_CR4_PCIDE, %eax
++ movl %eax, %cr4
++
+ movl %cr0, %eax
+ andl $~X86_CR0_PG, %eax
+ movl %eax, %cr0
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
index 48ddd76..c26749f 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
@@ -37718,7 +37751,7 @@ index cecfb94..87009ec 100644
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
-index 876bae5..8978785 100644
+index 79bc203..fa3945b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
@@ -42096,10 +42129,10 @@ index c13fb5b..55a3802 100644
*off += size;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 2978f5e..ac3a23c 100644
+index 00bc30e..d8e5097 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
-@@ -367,7 +367,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
int ret = 0;
next_gpadl_handle =
@@ -43699,6 +43732,480 @@ index 48882c1..93e0987 100644
cmd->data[2] = 1;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index a3adde6..988ee96 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -338,7 +338,7 @@ enum arm_smmu_domain_stage {
+
+ struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+- struct io_pgtable_ops *pgtbl_ops;
++ struct io_pgtable *pgtbl;
+ spinlock_t pgtbl_lock;
+ struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
+@@ -833,7 +833,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ {
+ int irq, start, ret = 0;
+ unsigned long ias, oas;
+- struct io_pgtable_ops *pgtbl_ops;
++ struct io_pgtable *pgtbl;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+@@ -918,14 +918,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ };
+
+ smmu_domain->smmu = smmu;
+- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+- if (!pgtbl_ops) {
++ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
++ if (!pgtbl) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update our support page sizes to reflect the page table format */
+- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
++ pax_open_kernel();
++ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
++ pax_close_kernel();
+
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+@@ -946,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+- smmu_domain->pgtbl_ops = pgtbl_ops;
++ smmu_domain->pgtbl = pgtbl;
+ return 0;
+
+ out_clear_smmu:
+@@ -979,8 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+ free_irq(irq, domain);
+ }
+
+- if (smmu_domain->pgtbl_ops)
+- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
++ free_io_pgtable(smmu_domain->pgtbl);
+
+ __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+ }
+@@ -1204,13 +1205,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ int ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++ struct io_pgtable *iop = smmu_domain->pgtbl;
+
+- if (!ops)
++ if (!iop)
+ return -ENODEV;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+- ret = ops->map(ops, iova, paddr, size, prot);
++ ret = iop->ops->map(iop, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+ }
+@@ -1221,13 +1222,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++ struct io_pgtable *iop = smmu_domain->pgtbl;
+
+- if (!ops)
++ if (!iop)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+- ret = ops->unmap(ops, iova, size);
++ ret = iop->ops->unmap(iop, iova, size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+ return ret;
+ }
+@@ -1238,7 +1239,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++ struct io_pgtable *iop = smmu_domain->pgtbl;
+ struct device *dev = smmu->dev;
+ void __iomem *cb_base;
+ u32 tmp;
+@@ -1261,7 +1262,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ dev_err(dev,
+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
+ &iova);
+- return ops->iova_to_phys(ops, iova);
++ return iop->ops->iova_to_phys(iop, iova);
+ }
+
+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
+@@ -1282,9 +1283,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
++ struct io_pgtable *iop = smmu_domain->pgtbl;
+
+- if (!ops)
++ if (!iop)
+ return 0;
+
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+@@ -1292,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ ret = arm_smmu_iova_to_phys_hard(domain, iova);
+ } else {
+- ret = ops->iova_to_phys(ops, iova);
++ ret = iop->ops->iova_to_phys(iop, iova);
+ }
+
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+@@ -1651,7 +1652,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ size |= SZ_64K | SZ_512M;
+ }
+
+- arm_smmu_ops.pgsize_bitmap &= size;
++ pax_open_kernel();
++ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
++ pax_close_kernel();
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+
+ if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index b610a8d..08eb879 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -36,12 +36,6 @@
+ #define io_pgtable_to_data(x) \
+ container_of((x), struct arm_lpae_io_pgtable, iop)
+
+-#define io_pgtable_ops_to_pgtable(x) \
+- container_of((x), struct io_pgtable, ops)
+-
+-#define io_pgtable_ops_to_data(x) \
+- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+-
+ /*
+ * For consistency with the architecture, we always consider
+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+ return pte;
+ }
+
+-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+ {
+- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+ arm_lpae_iopte prot;
+@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+ }
+
+-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
++static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
+ size_t size)
+ {
+ size_t unmapped;
+- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+- struct io_pgtable *iop = &data->iop;
++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ return unmapped;
+ }
+
+-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
++static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
+ unsigned long iova)
+ {
+- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+ arm_lpae_iopte pte, *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+ }
+ }
+
++static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
++ .map = arm_lpae_map,
++ .unmap = arm_lpae_unmap,
++ .iova_to_phys = arm_lpae_iova_to_phys,
++};
++
+ static struct arm_lpae_io_pgtable *
+ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+ {
+@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+
+- data->iop.ops = (struct io_pgtable_ops) {
+- .map = arm_lpae_map,
+- .unmap = arm_lpae_unmap,
+- .iova_to_phys = arm_lpae_iova_to_phys,
+- };
++ data->iop.ops = &arm_lpae_io_pgtable_ops;
+
+ return data;
+ }
+@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+ .flush_pgtable = dummy_flush_pgtable,
+ };
+
+-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
++static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
+ {
+- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+ data->bits_per_level, data->pgd);
+ }
+
+-#define __FAIL(ops, i) ({ \
++#define __FAIL(iop, i) ({ \
+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+- arm_lpae_dump_ops(ops); \
++ arm_lpae_dump_ops(iop); \
+ selftest_running = false; \
+ -EFAULT; \
+ })
+@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+ int i, j;
+ unsigned long iova;
+ size_t size;
+- struct io_pgtable_ops *ops;
++ struct io_pgtable *iop;
++ const struct io_pgtable_ops *ops;
+
+ selftest_running = true;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+- if (!ops) {
++ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
++ if (!iop) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
++ ops = iop->ops;
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+- if (ops->iova_to_phys(ops, 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, 42))
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, SZ_1G + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, SZ_1G + 42))
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, SZ_2G + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, SZ_2G + 42))
++ return __FAIL(iop, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+- if (ops->map(ops, iova, iova, size, IOMMU_READ |
++ if (ops->map(iop, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+- return __FAIL(ops, i);
++ return __FAIL(iop, i);
+
+ /* Overlapping mappings */
+- if (!ops->map(ops, iova, iova + size, size,
++ if (!ops->map(iop, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+- return __FAIL(ops, i);
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
++ return __FAIL(iop, i);
+
+ iova += SZ_1G;
+ j++;
+@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+
+ /* Partial unmap */
+ size = 1UL << __ffs(cfg->pgsize_bitmap);
+- if (ops->unmap(ops, SZ_1G + size, size) != size)
+- return __FAIL(ops, i);
++ if (ops->unmap(iop, SZ_1G + size, size) != size)
++ return __FAIL(iop, i);
+
+ /* Remap of partial unmap */
+- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+- return __FAIL(ops, i);
++ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
++ return __FAIL(iop, i);
+
+ /* Full unmap */
+ iova = 0;
+@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+- if (ops->unmap(ops, iova, size) != size)
+- return __FAIL(ops, i);
++ if (ops->unmap(iop, iova, size) != size)
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, iova + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, iova + 42))
++ return __FAIL(iop, i);
+
+ /* Remap full block */
+- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+- return __FAIL(ops, i);
++ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
++ return __FAIL(iop, i);
+
+- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+- return __FAIL(ops, i);
++ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
++ return __FAIL(iop, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+- free_io_pgtable_ops(ops);
++ free_io_pgtable(iop);
+ }
+
+ selftest_running = false;
+diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
+index 6436fe2..088c965 100644
+--- a/drivers/iommu/io-pgtable.c
++++ b/drivers/iommu/io-pgtable.c
+@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
+ #endif
+ };
+
+-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
++struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
+ {
+@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ iop->cookie = cookie;
+ iop->cfg = *cfg;
+
+- return &iop->ops;
++ return iop;
+ }
+
+ /*
+ * It is the IOMMU driver's responsibility to ensure that the page table
+ * is no longer accessible to the walker by this point.
+ */
+-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
++void free_io_pgtable(struct io_pgtable *iop)
+ {
+- struct io_pgtable *iop;
+-
+- if (!ops)
++ if (!iop)
+ return;
+
+- iop = container_of(ops, struct io_pgtable, ops);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_init_table[iop->fmt]->free(iop);
+ }
+diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
+index 10e32f6..0b276c8 100644
+--- a/drivers/iommu/io-pgtable.h
++++ b/drivers/iommu/io-pgtable.h
+@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
++struct io_pgtable;
+ struct io_pgtable_ops {
+- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
++ int (*map)(struct io_pgtable *iop, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
++ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
+ size_t size);
+- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
++ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
+ unsigned long iova);
+ };
+
+ /**
+- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
++ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+@@ -94,9 +95,9 @@ struct io_pgtable_ops {
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+- struct io_pgtable_cfg *cfg,
+- void *cookie);
++struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
++ struct io_pgtable_cfg *cfg,
++ void *cookie);
+
+ /**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
++void free_io_pgtable(struct io_pgtable *iop);
+
+
+ /*
+@@ -125,7 +126,7 @@ struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+- struct io_pgtable_ops ops;
++ const struct io_pgtable_ops *ops;
+ };
+
+ /**
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 72e683d..c9db262 100644
--- a/drivers/iommu/iommu.c
@@ -43712,6 +44219,65 @@ index 72e683d..c9db262 100644
struct iommu_callback_data cb = {
.ops = ops,
};
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index bc39bdf..e2de272 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
+ struct iommu_domain *io_domain;
+
+ struct io_pgtable_cfg cfg;
+- struct io_pgtable_ops *iop;
++ struct io_pgtable *iop;
+
+ unsigned int context_id;
+ spinlock_t lock; /* Protects mappings */
+@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+
+- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+- domain);
++ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
+ if (!domain->iop)
+ return -EINVAL;
+
+@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
+ * been detached.
+ */
+ ipmmu_domain_destroy_context(domain);
+- free_io_pgtable_ops(domain->iop);
++ free_io_pgtable(domain->iop);
+ kfree(domain);
+ }
+
+@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
+ if (!domain)
+ return -ENODEV;
+
+- return domain->iop->map(domain->iop, iova, paddr, size, prot);
++ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
+ }
+
+ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
+ {
+ struct ipmmu_vmsa_domain *domain = io_domain->priv;
+
+- return domain->iop->unmap(domain->iop, iova, size);
++ return domain->iop->ops->unmap(domain->iop, iova, size);
+ }
+
+ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
+
+ /* TODO: Is locking needed ? */
+
+- return domain->iop->iova_to_phys(domain->iop, iova);
++ return domain->iop->ops->iova_to_phys(domain->iop, iova);
+ }
+
+ static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 390079e..1da9d6c 100644
--- a/drivers/iommu/irq_remapping.c
@@ -44619,7 +45185,7 @@ index 8001fe9..abdd0d0 100644
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 717daad..6dd103f 100644
+index e617878..d423ee3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
@@ -44644,7 +45210,7 @@ index 717daad..6dd103f 100644
wake_up(&md_event_waiters);
}
-@@ -1438,7 +1438,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
(le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
@@ -44653,7 +45219,7 @@ index 717daad..6dd103f 100644
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
-@@ -1689,7 +1689,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
+@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
else
sb->resync_offset = cpu_to_le64(0);
@@ -44662,7 +45228,7 @@ index 717daad..6dd103f 100644
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
-@@ -2560,7 +2560,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
+@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
{
@@ -44671,7 +45237,7 @@ index 717daad..6dd103f 100644
}
static ssize_t
-@@ -2569,7 +2569,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
+@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
@@ -44680,7 +45246,7 @@ index 717daad..6dd103f 100644
return len;
}
return -EINVAL;
-@@ -3005,8 +3005,8 @@ int md_rdev_init(struct md_rdev *rdev)
+@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
@@ -44691,7 +45257,7 @@ index 717daad..6dd103f 100644
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
-@@ -7079,7 +7079,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
+@@ -7083,7 +7083,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
@@ -44700,7 +45266,7 @@ index 717daad..6dd103f 100644
return 0;
}
if (v == (void*)2) {
-@@ -7182,7 +7182,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
+@@ -7186,7 +7186,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
return error;
seq = file->private_data;
@@ -44709,7 +45275,7 @@ index 717daad..6dd103f 100644
return error;
}
-@@ -7199,7 +7199,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+@@ -7203,7 +7203,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
/* always allow read */
mask = POLLIN | POLLRDNORM;
@@ -44718,7 +45284,7 @@ index 717daad..6dd103f 100644
mask |= POLLERR | POLLPRI;
return mask;
}
-@@ -7246,7 +7246,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
+@@ -7250,7 +7250,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
@@ -48372,7 +48938,7 @@ index bdfe51f..e7845c7 100644
r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index af034db..1611c0b2 100644
+index 9d15566..5ad4ef6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -50140,7 +50706,7 @@ index 7543a56..367ca8ed 100644
1, asus->debug.method_id,
&input, &output);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
-index 15c0fab..f674006 100644
+index bceb30b..bf063d4 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
@@ -52009,7 +52575,7 @@ index ae45bd9..c32a586 100644
transport_setup_device(&rport->dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 6b78476..d40476f 100644
+index 3290a3e..d65ac1c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
@@ -52676,7 +53242,7 @@ index 7faa6ae..ae6c410 100644
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index ac3cbab..f0d1dd2 100644
+index f786de0..04b643e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -54377,7 +54943,7 @@ index 45a915c..09f9735 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index d7c3d5a..2f87607 100644
+index 3b71516..1f26579 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -26,6 +26,7 @@
@@ -54607,10 +55173,10 @@ index c78c841..48fd281 100644
#include "u_uac1.h"
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
-index 87cf86f..3de9809 100644
+index 7354d01..299478e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
-@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
+@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
urb->transfer_flags = URB_DIR_IN;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
@@ -54619,7 +55185,7 @@ index 87cf86f..3de9809 100644
urb->setup_dma = dma_map_single(
hcd->self.controller,
urb->setup_packet,
-@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
urb->status = -EINPROGRESS;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
@@ -58614,7 +59180,7 @@ index 4c55668..eeae150 100644
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 995986b..dcc4ef2 100644
+index d925f55..d31f527 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -34,6 +34,7 @@
@@ -59235,10 +59801,15 @@ index 995986b..dcc4ef2 100644
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
-@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -925,12 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
+- total_size = total_mapping_size(elf_phdata,
+- loc->elf_ex.e_phnum);
+- if (!total_size) {
+- error = -EINVAL;
+- goto out_free_dentry;
+
+#ifdef CONFIG_PAX_RANDMMAP
+ /* PaX: randomize base address at the default exe base if requested */
@@ -59250,13 +59821,14 @@ index 995986b..dcc4ef2 100644
+#endif
+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
+ elf_flags |= MAP_FIXED;
-+ }
+ }
+#endif
+
++ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -962,9 +1410,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
@@ -59269,7 +59841,7 @@ index 995986b..dcc4ef2 100644
/* set_brk can never work. Avoid overflows. */
retval = -EINVAL;
goto out_free_dentry;
-@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
+@@ -1000,16 +1448,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
if (retval)
goto out_free_dentry;
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -59318,7 +59890,7 @@ index 995986b..dcc4ef2 100644
load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
/*
-@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
+@@ -1237,7 +1712,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
* Decide what to dump of a segment, part, all or none.
*/
static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -59327,7 +59899,7 @@ index 995986b..dcc4ef2 100644
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
-@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1275,7 +1750,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (vma->vm_file == NULL)
return 0;
@@ -59336,7 +59908,7 @@ index 995986b..dcc4ef2 100644
goto whole;
/*
-@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1482,9 +1957,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
@@ -59348,7 +59920,7 @@ index 995986b..dcc4ef2 100644
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
-@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+@@ -1493,7 +1968,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -59357,7 +59929,7 @@ index 995986b..dcc4ef2 100644
set_fs(old_fs);
fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
}
-@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2213,7 +2688,7 @@ static int elf_core_dump(struct coredump_params *cprm)
vma = next_vma(vma, gate_vma)) {
unsigned long dump_size;
@@ -59366,7 +59938,7 @@ index 995986b..dcc4ef2 100644
vma_filesz[i++] = dump_size;
vma_data_size += dump_size;
}
-@@ -2314,6 +2794,167 @@ out:
+@@ -2321,6 +2796,167 @@ out:
#endif /* CONFIG_ELF_CORE */
@@ -64355,7 +64927,7 @@ index 6a61c2b..bd79179 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index c83145a..a78aa13 100644
+index caa38a2..44c470a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -64457,7 +65029,7 @@ index c83145a..a78aa13 100644
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
-@@ -1639,6 +1652,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -64466,7 +65038,7 @@ index c83145a..a78aa13 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1711,7 +1726,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline u64 hash_name(const char *name)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -64475,7 +65047,7 @@ index c83145a..a78aa13 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -2006,6 +2021,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -64484,7 +65056,7 @@ index c83145a..a78aa13 100644
put_link(nd, &link, cookie);
}
}
-@@ -2013,6 +2030,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -64498,7 +65070,7 @@ index c83145a..a78aa13 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -2034,8 +2058,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -64515,7 +65087,7 @@ index c83145a..a78aa13 100644
return retval;
}
-@@ -2614,6 +2645,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -64529,7 +65101,7 @@ index c83145a..a78aa13 100644
return 0;
}
-@@ -2845,7 +2883,7 @@ looked_up:
+@@ -2846,7 +2884,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -64538,7 +65110,7 @@ index c83145a..a78aa13 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2880,6 +2918,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -64556,7 +65128,7 @@ index c83145a..a78aa13 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2901,6 +2950,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -64565,7 +65137,7 @@ index c83145a..a78aa13 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2915,7 +2966,7 @@ out_dput:
+@@ -2916,7 +2967,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -64574,7 +65146,7 @@ index c83145a..a78aa13 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2965,6 +3016,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -64590,7 +65162,7 @@ index c83145a..a78aa13 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2984,7 +3044,7 @@ retry_lookup:
+@@ -2985,7 +3045,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -64599,7 +65171,7 @@ index c83145a..a78aa13 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -3008,11 +3068,28 @@ retry_lookup:
+@@ -3009,11 +3069,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -64629,7 +65201,7 @@ index c83145a..a78aa13 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -3053,6 +3130,11 @@ finish_lookup:
+@@ -3055,6 +3132,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -64641,7 +65213,7 @@ index c83145a..a78aa13 100644
return 1;
}
-@@ -3072,7 +3154,18 @@ finish_open:
+@@ -3074,7 +3156,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -64660,7 +65232,15 @@ index c83145a..a78aa13 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3233,7 +3326,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3228,14 +3321,14 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+
+ if (unlikely(file->f_flags & __O_TMPFILE)) {
+ error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
+- goto out;
++ goto out2;
+ }
+
+ error = path_init(dfd, pathname->name, flags, nd);
if (unlikely(error))
goto out;
@@ -64669,7 +65249,7 @@ index c83145a..a78aa13 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3251,7 +3344,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3253,11 +3346,12 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -64678,7 +65258,12 @@ index c83145a..a78aa13 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3353,9 +3446,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
+ path_cleanup(nd);
++out2:
+ if (!(opened & FILE_OPENED)) {
+ BUG_ON(!error);
+ put_filp(file);
+@@ -3355,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
goto unlock;
error = -EEXIST;
@@ -64692,7 +65277,7 @@ index c83145a..a78aa13 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3420,6 +3515,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3422,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -64713,7 +65298,7 @@ index c83145a..a78aa13 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3483,6 +3592,17 @@ retry:
+@@ -3485,6 +3595,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -64731,7 +65316,7 @@ index c83145a..a78aa13 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3498,6 +3618,8 @@ retry:
+@@ -3500,6 +3621,8 @@ retry:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
@@ -64740,7 +65325,7 @@ index c83145a..a78aa13 100644
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
-@@ -3552,9 +3674,16 @@ retry:
+@@ -3554,9 +3677,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -64757,7 +65342,7 @@ index c83145a..a78aa13 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3587,7 +3716,7 @@ void dentry_unhash(struct dentry *dentry)
+@@ -3589,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
@@ -64766,7 +65351,7 @@ index c83145a..a78aa13 100644
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
-@@ -3638,6 +3767,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3640,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -64775,7 +65360,7 @@ index c83145a..a78aa13 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3670,10 +3801,21 @@ retry:
+@@ -3672,10 +3804,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -64797,7 +65382,7 @@ index c83145a..a78aa13 100644
exit3:
dput(dentry);
exit2:
-@@ -3766,6 +3908,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3768,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -64806,7 +65391,7 @@ index c83145a..a78aa13 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3792,10 +3936,22 @@ retry_deleg:
+@@ -3794,10 +3939,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -64829,7 +65414,7 @@ index c83145a..a78aa13 100644
exit2:
dput(dentry);
}
-@@ -3884,9 +4040,17 @@ retry:
+@@ -3886,9 +4043,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -64847,7 +65432,7 @@ index c83145a..a78aa13 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3990,6 +4154,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3992,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -64855,7 +65440,7 @@ index c83145a..a78aa13 100644
int how = 0;
int error;
-@@ -4013,7 +4178,7 @@ retry:
+@@ -4015,7 +4181,7 @@ retry:
if (error)
return error;
@@ -64864,7 +65449,7 @@ index c83145a..a78aa13 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -4025,11 +4190,28 @@ retry:
+@@ -4027,11 +4193,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -64893,7 +65478,7 @@ index c83145a..a78aa13 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4345,6 +4527,20 @@ retry_deleg:
+@@ -4347,6 +4530,20 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -64914,7 +65499,7 @@ index c83145a..a78aa13 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry, flags);
if (error)
-@@ -4352,6 +4548,9 @@ retry_deleg:
+@@ -4354,6 +4551,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode, flags);
@@ -64924,7 +65509,7 @@ index c83145a..a78aa13 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4408,14 +4607,24 @@ EXPORT_SYMBOL(vfs_whiteout);
+@@ -4410,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
@@ -64951,10 +65536,10 @@ index c83145a..a78aa13 100644
out:
return len;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 82ef140..5335e75 100644
+index 4622ee3..2db42e3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -1438,6 +1438,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
if (!(sb->s_flags & MS_RDONLY))
retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
up_write(&sb->s_umount);
@@ -64964,7 +65549,7 @@ index 82ef140..5335e75 100644
return retval;
}
-@@ -1460,6 +1463,9 @@ static int do_umount(struct mount *mnt, int flags)
+@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
}
unlock_mount_hash();
namespace_unlock();
@@ -64974,7 +65559,7 @@ index 82ef140..5335e75 100644
return retval;
}
-@@ -1510,7 +1516,7 @@ static inline bool may_mount(void)
+@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
@@ -64983,7 +65568,7 @@ index 82ef140..5335e75 100644
{
struct path path;
struct mount *mnt;
-@@ -1555,7 +1561,7 @@ out:
+@@ -1604,7 +1610,7 @@ out:
/*
* The 2.0 compatible umount. No flags.
*/
@@ -64992,7 +65577,7 @@ index 82ef140..5335e75 100644
{
return sys_umount(name, 0);
}
-@@ -2621,6 +2627,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2670,6 +2676,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
@@ -65009,7 +65594,7 @@ index 82ef140..5335e75 100644
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
-@@ -2634,7 +2650,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+@@ -2683,7 +2699,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
retval = do_new_mount(&path, type_page, flags, mnt_flags,
dev_name, data_page);
dput_out:
@@ -65020,7 +65605,7 @@ index 82ef140..5335e75 100644
return retval;
}
-@@ -2652,7 +2671,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
+@@ -2701,7 +2720,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
* number incrementing at 10Ghz will take 12,427 years to wrap which
* is effectively never, so we can ignore the possibility.
*/
@@ -65029,7 +65614,7 @@ index 82ef140..5335e75 100644
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
-@@ -2668,7 +2687,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2717,7 +2736,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return ERR_PTR(ret);
}
new_ns->ns.ops = &mntns_operations;
@@ -65038,7 +65623,7 @@ index 82ef140..5335e75 100644
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
-@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+@@ -2727,7 +2746,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
@@ -65047,7 +65632,7 @@ index 82ef140..5335e75 100644
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
struct mnt_namespace *new_ns;
-@@ -2799,8 +2818,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
+@@ -2848,8 +2867,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
}
EXPORT_SYMBOL(mount_subtree);
@@ -65058,7 +65643,7 @@ index 82ef140..5335e75 100644
{
int ret;
char *kernel_type;
-@@ -2906,6 +2925,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+@@ -2955,6 +2974,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (error)
goto out2;
@@ -65070,7 +65655,7 @@ index 82ef140..5335e75 100644
get_fs_root(current->fs, &root);
old_mp = lock_mount(&old);
error = PTR_ERR(old_mp);
-@@ -3180,7 +3204,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+@@ -3229,7 +3253,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
@@ -65117,10 +65702,10 @@ index d42dff6..ecbdf42 100644
EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 92b9d97..045e58c 100644
+index 5416968..0942042 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
-@@ -1492,7 +1492,7 @@ struct nfsd4_operation {
+@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
nfsd4op_rsize op_rsize_bop;
stateid_getter op_get_currentstateid;
stateid_setter op_set_currentstateid;
@@ -65130,7 +65715,7 @@ index 92b9d97..045e58c 100644
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 5fb7e78..cc8a22e 100644
+index 5b33ce1..c2a92aa 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
@@ -65556,7 +66141,7 @@ index 2667518..24bcf79 100644
/* Copy the blockcheck stats from the superblock probe */
osb->osb_ecc_stats = *stats;
diff --git a/fs/open.c b/fs/open.c
-index 33f9cbf..8abe053 100644
+index 44a3be1..5e97aa1 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -32,6 +32,8 @@
@@ -65657,9 +66242,9 @@ index 33f9cbf..8abe053 100644
+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
+ return -EACCES;
+
+ retry_deleg:
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
- if (!uid_valid(uid))
@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
} else {
fsnotify_open(f);
@@ -65693,9 +66278,18 @@ index 5f0d199..13b74b9 100644
struct ovl_entry *oe;
struct ovl_fs *ufs;
diff --git a/fs/pipe.c b/fs/pipe.c
-index 21981e5..3d5f55c 100644
+index 21981e5..2c0bffb 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
+@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
+ /*
+ * Minimum pipe size, as required by POSIX
+ */
+-unsigned int pipe_min_size = PAGE_SIZE;
++unsigned int pipe_min_size __read_only = PAGE_SIZE;
+
+ /*
+ * We use a start+len construction, which provides full use of the
@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
@@ -65902,6 +66496,35 @@ index 21981e5..3d5f55c 100644
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
+@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+-static inline unsigned int round_pipe_size(unsigned int size)
++static inline unsigned long round_pipe_size(unsigned long size)
+ {
+ unsigned long nr_pages;
+
+@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ switch (cmd) {
+ case F_SETPIPE_SZ: {
+- unsigned int size, nr_pages;
++ unsigned long size, nr_pages;
++
++ ret = -EINVAL;
++ if (arg < pipe_min_size)
++ goto out;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+- ret = -EINVAL;
+- if (!nr_pages)
++ if (size < pipe_min_size)
+ goto out;
+
+ if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 3a48bb7..403067b 100644
--- a/fs/posix_acl.c
@@ -67998,7 +68621,7 @@ index 555f821..34684d7 100644
{
const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
diff --git a/fs/splice.c b/fs/splice.c
-index 7968da9..275187d 100644
+index 7968da9..4ce985b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
@@ -68089,6 +68712,15 @@ index 7968da9..275187d 100644
sd.need_wakeup = true;
} else {
buf->offset += ret;
+@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ long ret, bytes;
+ umode_t i_mode;
+ size_t len;
+- int i, flags;
++ int i, flags, more;
+
+ /*
+ * We require the input being a regular file, as we don't want to
@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* out of the pipe right after the splice_to_pipe(). So set
* PIPE_READERS appropriately.
@@ -68098,7 +68730,31 @@ index 7968da9..275187d 100644
current->splice_pipe = pipe;
}
-@@ -1482,6 +1482,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
+@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ * Don't block on output, we have to drain the direct pipe.
+ */
+ sd->flags &= ~SPLICE_F_NONBLOCK;
++ more = sd->flags & SPLICE_F_MORE;
+
+ while (len) {
+ size_t read_len;
+@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ sd->total_len = read_len;
+
+ /*
++ * If more data is pending, set SPLICE_F_MORE
++ * If this is the last data and SPLICE_F_MORE was not set
++ * initially, clears it.
++ */
++ if (read_len < len)
++ sd->flags |= SPLICE_F_MORE;
++ else if (!more)
++ sd->flags &= ~SPLICE_F_MORE;
++ /*
+ * NOTE: nonblocking mode only applies to the input. We
+ * must not do the output in nonblocking mode as then we
+ * could get stuck data in the internal pipe:
+@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
partial[buffers].offset = off;
partial[buffers].len = plen;
@@ -68106,7 +68762,7 @@ index 7968da9..275187d 100644
off = 0;
len -= plen;
-@@ -1718,9 +1719,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -68118,7 +68774,7 @@ index 7968da9..275187d 100644
if (flags & SPLICE_F_NONBLOCK) {
ret = -EAGAIN;
break;
-@@ -1752,7 +1753,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe_lock(pipe);
while (pipe->nrbufs >= pipe->buffers) {
@@ -68127,7 +68783,7 @@ index 7968da9..275187d 100644
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
break;
-@@ -1765,9 +1766,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
+@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ret = -ERESTARTSYS;
break;
}
@@ -68139,7 +68795,7 @@ index 7968da9..275187d 100644
}
pipe_unlock(pipe);
-@@ -1803,14 +1804,14 @@ retry:
+@@ -1803,14 +1814,14 @@ retry:
pipe_double_lock(ipipe, opipe);
do {
@@ -68156,7 +68812,7 @@ index 7968da9..275187d 100644
break;
/*
-@@ -1907,7 +1908,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
pipe_double_lock(ipipe, opipe);
do {
@@ -68165,7 +68821,7 @@ index 7968da9..275187d 100644
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
-@@ -1952,7 +1953,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
@@ -80822,7 +81478,7 @@ index 5591ea7..61b77ce 100644
/**
* struct clk_init_data - holds init data that's common to all clocks and is
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index ab25814..9026bca 100644
+index ab25814..d1540d1 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
@@ -80834,6 +81490,15 @@ index ab25814..9026bca 100644
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
+@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ long compat_sys_msgctl(int first, int second, void __user *uptr);
+ long compat_sys_shmctl(int first, int second, void __user *uptr);
+ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+- unsigned nsems, const struct compat_timespec __user *timeout);
++ compat_long_t nsems, const struct compat_timespec __user *timeout);
+ asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -84390,10 +85055,10 @@ index 1c9effa..1160bdd 100644
.ops = &param_ops_##type, \
.elemsize = sizeof(array[0]), .elem = array }; \
diff --git a/include/linux/mount.h b/include/linux/mount.h
-index c2c561d..a5f2a8c 100644
+index 564beee..653be6f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
-@@ -66,7 +66,7 @@ struct vfsmount {
+@@ -67,7 +67,7 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
@@ -85254,7 +85919,7 @@ index ed8f9e70..999bc96 100644
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index a419b65..6dd8f3f 100644
+index 51348f7..8c8b0ba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -133,6 +133,7 @@ struct fs_struct;
@@ -85265,7 +85930,7 @@ index a419b65..6dd8f3f 100644
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-@@ -412,7 +413,7 @@ extern char __sched_text_start[], __sched_text_end[];
+@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
@@ -85274,7 +85939,7 @@ index a419b65..6dd8f3f 100644
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -430,6 +431,19 @@ struct nsproxy;
+@@ -438,6 +439,19 @@ struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
@@ -85294,7 +85959,7 @@ index a419b65..6dd8f3f 100644
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -728,6 +742,17 @@ struct signal_struct {
+@@ -736,6 +750,17 @@ struct signal_struct {
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
@@ -85312,7 +85977,7 @@ index a419b65..6dd8f3f 100644
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
-@@ -754,7 +779,7 @@ struct signal_struct {
+@@ -762,7 +787,7 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
@@ -85321,7 +85986,7 @@ index a419b65..6dd8f3f 100644
/*
* Bits in flags field of signal_struct.
-@@ -807,6 +832,14 @@ struct user_struct {
+@@ -815,6 +840,14 @@ struct user_struct {
struct key *session_keyring; /* UID's default session keyring */
#endif
@@ -85336,7 +86001,7 @@ index a419b65..6dd8f3f 100644
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
-@@ -814,7 +847,7 @@ struct user_struct {
+@@ -822,7 +855,7 @@ struct user_struct {
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
@@ -85345,7 +86010,7 @@ index a419b65..6dd8f3f 100644
extern int uids_sysfs_init(void);
-@@ -1278,6 +1311,9 @@ enum perf_event_task_context {
+@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -85355,7 +86020,7 @@ index a419b65..6dd8f3f 100644
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
-@@ -1411,8 +1447,8 @@ struct task_struct {
+@@ -1419,8 +1455,8 @@ struct task_struct {
struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
@@ -85366,7 +86031,7 @@ index a419b65..6dd8f3f 100644
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1437,11 +1473,6 @@ struct task_struct {
+@@ -1445,11 +1481,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -85378,7 +86043,7 @@ index a419b65..6dd8f3f 100644
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1459,6 +1490,10 @@ struct task_struct {
+@@ -1467,6 +1498,10 @@ struct task_struct {
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -85389,7 +86054,7 @@ index a419b65..6dd8f3f 100644
/* filesystem information */
struct fs_struct *fs;
/* open file information */
-@@ -1533,6 +1568,10 @@ struct task_struct {
+@@ -1541,6 +1576,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
@@ -85400,7 +86065,7 @@ index a419b65..6dd8f3f 100644
/* journalling filesystem info */
void *journal_info;
-@@ -1571,6 +1610,10 @@ struct task_struct {
+@@ -1579,6 +1618,10 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -85411,7 +86076,7 @@ index a419b65..6dd8f3f 100644
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
-@@ -1682,7 +1725,7 @@ struct task_struct {
+@@ -1690,7 +1733,7 @@ struct task_struct {
* Number of functions that haven't been traced
* because of depth overrun.
*/
@@ -85420,7 +86085,7 @@ index a419b65..6dd8f3f 100644
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
-@@ -1710,7 +1753,78 @@ struct task_struct {
+@@ -1718,7 +1761,78 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
@@ -85500,7 +86165,7 @@ index a419b65..6dd8f3f 100644
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -1793,7 +1907,7 @@ struct pid_namespace;
+@@ -1801,7 +1915,7 @@ struct pid_namespace;
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
@@ -85509,7 +86174,7 @@ index a419b65..6dd8f3f 100644
{
return tsk->pid;
}
-@@ -2161,6 +2275,25 @@ extern u64 sched_clock_cpu(int cpu);
+@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
@@ -85535,7 +86200,7 @@ index a419b65..6dd8f3f 100644
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
-@@ -2294,7 +2427,9 @@ void yield(void);
+@@ -2302,7 +2435,9 @@ void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
@@ -85545,7 +86210,7 @@ index a419b65..6dd8f3f 100644
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
-@@ -2327,6 +2462,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -85553,7 +86218,7 @@ index a419b65..6dd8f3f 100644
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
-@@ -2491,7 +2627,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -85562,7 +86227,7 @@ index a419b65..6dd8f3f 100644
extern int do_execve(struct filename *,
const char __user * const __user *,
-@@ -2712,9 +2848,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -85696,19 +86361,19 @@ index ab1e039..ad4229e 100644
static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index f54d665..e41848d 100644
+index bdccc4b..e9f8670 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -770,7 +770,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
- struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
-static inline struct sk_buff *alloc_skb(unsigned int size,
+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -1966,7 +1966,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
return skb->inner_transport_header - skb->inner_network_header;
}
@@ -85717,7 +86382,7 @@ index f54d665..e41848d 100644
{
return skb_network_header(skb) - skb->data;
}
-@@ -2026,7 +2026,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -85726,7 +86391,7 @@ index f54d665..e41848d 100644
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2668,9 +2668,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
@@ -85738,7 +86403,7 @@ index f54d665..e41848d 100644
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3180,6 +3180,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -86050,7 +86715,7 @@ index e7a018e..49f8b17 100644
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 76d1e38..d92ff38 100644
+index 76d1e38..200776e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -102,7 +102,12 @@ union bpf_attr;
@@ -86092,6 +86757,19 @@ index 76d1e38..d92ff38 100644
asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags);
+@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+
+ asmlinkage long sys_semget(key_t key, int nsems, int semflg);
+ asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+- unsigned nsops);
++ long nsops);
+ asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
+- unsigned nsops,
++ long nsops,
+ const struct timespec __user *timeout);
+ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+ asmlinkage long sys_shmget(key_t key, size_t size, int flag);
diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
index 27b3b0b..e093dd9 100644
--- a/include/linux/syscore_ops.h
@@ -86396,10 +87074,10 @@ index 99c1b4d..562e6f3 100644
static inline void put_unaligned_le16(u16 val, void *p)
diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 7ee1b5c..82e2c1a 100644
+index 447fe29..9fc875f 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
-@@ -566,7 +566,7 @@ struct usb_device {
+@@ -592,7 +592,7 @@ struct usb_device {
int maxchild;
u32 quirks;
@@ -86408,7 +87086,7 @@ index 7ee1b5c..82e2c1a 100644
unsigned long active_duration;
-@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
@@ -87537,7 +88215,7 @@ index 0d1ade1..34e77d3 100644
struct snd_soc_dai_link_component {
const char *name;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 672150b..9d4bec4 100644
+index 985ca4c..b55b54a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -767,7 +767,7 @@ struct se_device {
@@ -88472,7 +89150,7 @@ index 6f0f1c5f..a542824 100644
* Ok, we have completed the initial bootup, and
* we're essentially up and running. Get rid of the
diff --git a/ipc/compat.c b/ipc/compat.c
-index 9b3c85f..1c4d897 100644
+index 9b3c85f..5266b0f 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
@@ -88484,6 +89162,15 @@ index 9b3c85f..1c4d897 100644
}
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
+@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
+ }
+
+ COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
+- unsigned, nsops,
++ compat_long_t, nsops,
+ const struct compat_timespec __user *, timeout)
+ {
+ struct timespec __user *ts64;
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 8ad93c2..efd80f8 100644
--- a/ipc/ipc_sysctl.c
@@ -88558,6 +89245,28 @@ index 7635a1c..7432cb6 100644
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 9284211..bca5b1b 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
+ }
+
+ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops, const struct timespec __user *, timeout)
++ long, nsops, const struct timespec __user *, timeout)
+ {
+ int error = -EINVAL;
+ struct sem_array *sma;
+@@ -2015,7 +2015,7 @@ out_free:
+ }
+
+ SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
+- unsigned, nsops)
++ long, nsops)
+ {
+ return sys_semtimedop(semid, tsops, nsops, NULL);
+ }
diff --git a/ipc/shm.c b/ipc/shm.c
index 19633b4..d454904 100644
--- a/ipc/shm.c
@@ -92050,7 +92759,7 @@ index a7bcd28..5b368fa 100644
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index 227fec3..3aea55b 100644
+index 9a34bd8..38d90e5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
@@ -92071,7 +92780,7 @@ index 227fec3..3aea55b 100644
return -EFAULT;
copied += retval;
src += retval;
-@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
+@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
siginfo_t siginfo, *si;
@@ -92080,7 +92789,7 @@ index 227fec3..3aea55b 100644
unsigned long __user *datalp = datavp;
unsigned long flags;
-@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out;
}
@@ -92103,7 +92812,7 @@ index 227fec3..3aea55b 100644
goto out_put_task_struct;
}
-@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
@@ -92112,7 +92821,7 @@ index 227fec3..3aea55b 100644
}
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
-@@ -1157,7 +1164,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
+@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
}
COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
@@ -92121,7 +92830,7 @@ index 227fec3..3aea55b 100644
{
struct task_struct *child;
long ret;
-@@ -1173,14 +1180,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
goto out;
}
@@ -93260,10 +93969,10 @@ index 8d0f35d..c16360d 100644
unsigned long timeout)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 62671f5..7b3505b 100644
+index 3d5f6f6..a94298f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1847,7 +1847,7 @@ void set_numabalancing_state(bool enabled)
+@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
int sysctl_numa_balancing(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -93272,7 +93981,7 @@ index 62671f5..7b3505b 100644
int err;
int state = numabalancing_enabled;
-@@ -2297,8 +2297,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
+@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
@@ -93284,7 +93993,7 @@ index 62671f5..7b3505b 100644
if (!prev->mm) {
prev->active_mm = NULL;
-@@ -3109,6 +3111,8 @@ int can_nice(const struct task_struct *p, const int nice)
+@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = nice_to_rlimit(nice);
@@ -93293,7 +94002,7 @@ index 62671f5..7b3505b 100644
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
-@@ -3135,7 +3139,8 @@ SYSCALL_DEFINE1(nice, int, increment)
+@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
nice = task_nice(current) + increment;
nice = clamp_val(nice, MIN_NICE, MAX_NICE);
@@ -93303,7 +94012,7 @@ index 62671f5..7b3505b 100644
return -EPERM;
retval = security_task_setnice(current, nice);
-@@ -3444,6 +3449,7 @@ recheck:
+@@ -3459,6 +3464,7 @@ recheck:
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
@@ -93311,7 +94020,7 @@ index 62671f5..7b3505b 100644
/* can't increase priority */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
-@@ -4931,6 +4937,7 @@ void idle_task_exit(void)
+@@ -4946,6 +4952,7 @@ void idle_task_exit(void)
if (mm != &init_mm) {
switch_mm(mm, &init_mm, current);
@@ -93319,7 +94028,7 @@ index 62671f5..7b3505b 100644
finish_arch_post_lock_switch();
}
mmdrop(mm);
-@@ -5026,7 +5033,7 @@ static void migrate_tasks(unsigned int dead_cpu)
+@@ -5041,7 +5048,7 @@ static void migrate_tasks(unsigned int dead_cpu)
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -93328,7 +94037,7 @@ index 62671f5..7b3505b 100644
{
.procname = "sched_domain",
.mode = 0555,
-@@ -5043,17 +5050,17 @@ static struct ctl_table sd_ctl_root[] = {
+@@ -5058,17 +5065,17 @@ static struct ctl_table sd_ctl_root[] = {
{}
};
@@ -93350,7 +94059,7 @@ index 62671f5..7b3505b 100644
/*
* In the intermediate directories, both the child directory and
-@@ -5061,22 +5068,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+@@ -5076,22 +5083,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
@@ -93382,7 +94091,7 @@ index 62671f5..7b3505b 100644
const char *procname, void *data, int maxlen,
umode_t mode, proc_handler *proc_handler,
bool load_idx)
-@@ -5096,7 +5106,7 @@ set_table_entry(struct ctl_table *entry,
+@@ -5111,7 +5121,7 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
@@ -93391,7 +94100,7 @@ index 62671f5..7b3505b 100644
if (table == NULL)
return NULL;
-@@ -5134,9 +5144,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+@@ -5149,9 +5159,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
@@ -93403,7 +94112,7 @@ index 62671f5..7b3505b 100644
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
-@@ -5163,11 +5173,13 @@ static struct ctl_table_header *sd_sysctl_header;
+@@ -5178,11 +5188,13 @@ static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
@@ -93418,7 +94127,7 @@ index 62671f5..7b3505b 100644
if (entry == NULL)
return;
-@@ -5190,8 +5202,12 @@ static void unregister_sched_domain_sysctl(void)
+@@ -5205,8 +5217,12 @@ static void unregister_sched_domain_sysctl(void)
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
@@ -94603,7 +95312,7 @@ index 4f22802..bd268b1 100644
/* make curr_ret_stack visible before we add the ret_stack */
smp_wmb();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index 5040d44..d43b2b9 100644
+index 922048a..bb71a55 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -348,9 +348,9 @@ struct buffer_data_page {
@@ -94815,7 +95524,7 @@ index 5040d44..d43b2b9 100644
return NULL;
}
#endif
-@@ -2902,7 +2902,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -94824,7 +95533,7 @@ index 5040d44..d43b2b9 100644
return;
}
-@@ -2914,7 +2914,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
start = bpage;
do {
if (bpage->page == (void *)addr) {
@@ -94833,7 +95542,7 @@ index 5040d44..d43b2b9 100644
return;
}
rb_inc_page(cpu_buffer, &bpage);
-@@ -3198,7 +3198,7 @@ static inline unsigned long
+@@ -3197,7 +3197,7 @@ static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
@@ -94842,7 +95551,7 @@ index 5040d44..d43b2b9 100644
}
/**
-@@ -3287,7 +3287,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -94851,7 +95560,7 @@ index 5040d44..d43b2b9 100644
return ret;
}
-@@ -3310,7 +3310,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -94860,7 +95569,7 @@ index 5040d44..d43b2b9 100644
return ret;
}
-@@ -3332,7 +3332,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -94869,7 +95578,7 @@ index 5040d44..d43b2b9 100644
return ret;
}
-@@ -3395,7 +3395,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -94878,7 +95587,7 @@ index 5040d44..d43b2b9 100644
}
return overruns;
-@@ -3566,8 +3566,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Reset the reader page to size zero.
*/
@@ -94889,7 +95598,7 @@ index 5040d44..d43b2b9 100644
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
-@@ -3601,7 +3601,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* want to compare with the last_overrun.
*/
smp_mb();
@@ -94898,7 +95607,7 @@ index 5040d44..d43b2b9 100644
/*
* Here's the tricky part.
-@@ -4173,8 +4173,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -94909,7 +95618,7 @@ index 5040d44..d43b2b9 100644
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
-@@ -4184,18 +4184,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -94934,7 +95643,7 @@ index 5040d44..d43b2b9 100644
cpu_buffer->read = 0;
cpu_buffer->read_bytes = 0;
-@@ -4596,8 +4596,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
@@ -94992,10 +95701,10 @@ index 57b67b1..66082a9 100644
+ return atomic64_inc_return_unchecked(&trace_counter);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index db54dda..b9e4f03 100644
+index a9c10a3..1864f6b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
+@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
return 0;
}
@@ -95004,7 +95713,7 @@ index db54dda..b9e4f03 100644
/* Add an additional event_call dynamically */
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
-index 2d25ad1..5bfd931 100644
+index b6fce36..d9f11a3 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
@@ -95397,6 +96106,20 @@ index 547f7f9..a6d4ba0 100644
if (is_on_stack == onstack)
return;
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 6dd0335..1e9c239 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
+
+ /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+ uncompressed data. Allocate intermediate buffer for block. */
+- bd->dbufSize = 100000*(i-BZh0);
++ i -= BZh0;
++ bd->dbufSize = 100000 * i;
+
+ bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
diff --git a/lib/div64.c b/lib/div64.c
index 4382ad7..08aa558 100644
--- a/lib/div64.c
@@ -96293,7 +97016,7 @@ index 123bcd3..0de52ba 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index c41b2a0..100cf92 100644
+index caad3c5..4f68807 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
@@ -97357,7 +98080,7 @@ index 97839f5..4bc5530 100644
mm = get_task_mm(tsk);
if (!mm)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 4721046..6ae2056 100644
+index de5dc5e..68a4ea3 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
@@ -102158,7 +102881,7 @@ index df493d6..1145766 100644
return err;
diff --git a/net/core/dev.c b/net/core/dev.c
-index 45109b7..6b58f14a 100644
+index 22a53ac..1d19af7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
@@ -102569,10 +103292,10 @@ index 3b6899b..cf36238 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 98d45fe..4f9608f 100644
+index e9f9a15..6eb024e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -2121,7 +2121,7 @@ EXPORT_SYMBOL(__skb_checksum);
+@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
@@ -102581,7 +103304,7 @@ index 98d45fe..4f9608f 100644
.update = csum_partial_ext,
.combine = csum_block_add_ext,
};
-@@ -3361,12 +3361,14 @@ void __init skb_init(void)
+@@ -3379,12 +3379,14 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -106035,7 +106758,7 @@ index 11de55e..f25e448 100644
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 05919bf..fcb6be3 100644
+index d1d7a81..b45b03d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -260,7 +260,7 @@ static void netlink_overrun(struct sock *sk)
@@ -106047,7 +106770,7 @@ index 05919bf..fcb6be3 100644
}
static void netlink_rcv_wake(struct sock *sk)
-@@ -3004,7 +3004,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -3002,7 +3002,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
@@ -108747,10 +109470,10 @@ index cdb491d..8d32bfc 100755
# Find all available archs
find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
-index beb86b5..3bc66c1 100644
+index beb86b5..135675f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,975 @@
+@@ -4,6 +4,980 @@
menu "Security options"
@@ -108818,6 +109541,11 @@ index beb86b5..3bc66c1 100644
+ grsecurity and PaX settings manually. Via this method, no options are
+ automatically enabled.
+
++ Take note that if menuconfig is exited with this configuration method
++ chosen, you will not be able to use the automatic configuration methods
++ without starting again with a kernel configuration with no grsecurity
++ or PaX options specified inside.
++
+endchoice
+
+choice
@@ -109726,7 +110454,7 @@ index beb86b5..3bc66c1 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -103,7 +1072,7 @@ config INTEL_TXT
+@@ -103,7 +1077,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
@@ -117635,10 +118363,10 @@ index 0000000..b8e7188
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..a0b019b
+index 0000000..df41844
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,28039 @@
+@@ -0,0 +1,28043 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL nohasharray
+iwl_set_tx_power_1 iwl_set_tx_power 0 1 &intel_fake_agp_alloc_by_type_1
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
@@ -118005,6 +118733,7 @@ index 0000000..a0b019b
+set_node_desc_885 set_node_desc 0-4 885 NULL
+show_fnode_max_burst_len_889 show_fnode_max_burst_len 0 889 NULL
+vsock_stream_sendmsg_898 vsock_stream_sendmsg 0-4 898 NULL
++btrfs_is_valid_xattr_899 btrfs_is_valid_xattr 0 899 NULL
+drv_attr_show_903 drv_attr_show 0 903 NULL nohasharray
+regulator_bulk_enable_903 regulator_bulk_enable 0 903 &drv_attr_show_903
+tcpprobe_read_904 tcpprobe_read 0-3 904 NULL
@@ -126241,6 +126970,7 @@ index 0000000..a0b019b
+attr_press_speed_store_tpkbd_20100 attr_press_speed_store_tpkbd 0-4 20100 NULL
+snd_es1938_playback1_trigger_20102 snd_es1938_playback1_trigger 0 20102 NULL
+xfs_qm_dqget_20103 xfs_qm_dqget 0 20103 NULL
++fd_do_unmap_20109 fd_do_unmap 4 20109 NULL
+nilfs_segments_clean_segments_show_20115 nilfs_segments_clean_segments_show 0 20115 NULL
+iscsi_tpg_param_store_TargetAlias_20119 iscsi_tpg_param_store_TargetAlias 0-3 20119 NULL
+cx18_s_audio_sampling_freq_20123 cx18_s_audio_sampling_freq 0 20123 NULL nohasharray
@@ -129102,6 +129832,7 @@ index 0000000..a0b019b
+pch_gbe_set_mac_26647 pch_gbe_set_mac 0 26647 NULL
+statfs_quantum_show_26649 statfs_quantum_show 0 26649 NULL
+irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL
++fd_do_prot_fill_26652 fd_do_prot_fill 5-3 26652 NULL
+show_state_desc_26653 show_state_desc 0 26653 NULL nohasharray
+rom_index_show_26653 rom_index_show 0 26653 &show_state_desc_26653
+nouveau_volt_create__26654 nouveau_volt_create_ 4 26654 NULL
@@ -130918,6 +131649,7 @@ index 0000000..a0b019b
+copy_send_mad_30897 copy_send_mad 0 30897 NULL nohasharray
+ubifs_wbuf_seek_nolock_30897 ubifs_wbuf_seek_nolock 0 30897 &copy_send_mad_30897
+pxa168_get_settings_30899 pxa168_get_settings 0 30899 NULL
++fd_do_prot_unmap_30900 fd_do_prot_unmap 3 30900 NULL
+copy_to_iter_30901 copy_to_iter 0-2 30901 NULL
+bq2415x_get_battery_regulation_voltage_30903 bq2415x_get_battery_regulation_voltage 0 30903 NULL nohasharray
+fc_host_post_vendor_event_30903 fc_host_post_vendor_event 3 30903 &bq2415x_get_battery_regulation_voltage_30903
@@ -149481,7 +150213,7 @@ index 0a578fe..b81f62d 100644
})
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index cc6a25d..babf9ee 100644
+index f8f3f5fe..9bc113f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -81,12 +81,17 @@ LIST_HEAD(vm_list);
diff --git a/3.14.40/4425_grsec_remove_EI_PAX.patch b/4.0.2/4425_grsec_remove_EI_PAX.patch
index 86e242a..a80a5d7 100644
--- a/3.14.40/4425_grsec_remove_EI_PAX.patch
+++ b/4.0.2/4425_grsec_remove_EI_PAX.patch
@@ -8,7 +8,7 @@ X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
+++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
-@@ -273,7 +273,7 @@
+@@ -278,7 +278,7 @@
config PAX_EI_PAX
bool 'Use legacy ELF header marking'
diff --git a/4.0.1/4427_force_XATTR_PAX_tmpfs.patch b/4.0.2/4427_force_XATTR_PAX_tmpfs.patch
index a789f0b..a789f0b 100644
--- a/4.0.1/4427_force_XATTR_PAX_tmpfs.patch
+++ b/4.0.2/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/4.0.1/4430_grsec-remove-localversion-grsec.patch b/4.0.2/4430_grsec-remove-localversion-grsec.patch
index 31cf878..31cf878 100644
--- a/4.0.1/4430_grsec-remove-localversion-grsec.patch
+++ b/4.0.2/4430_grsec-remove-localversion-grsec.patch
diff --git a/4.0.1/4435_grsec-mute-warnings.patch b/4.0.2/4435_grsec-mute-warnings.patch
index 0585e08..b7564e4 100644
--- a/4.0.1/4435_grsec-mute-warnings.patch
+++ b/4.0.2/4435_grsec-mute-warnings.patch
@@ -29,15 +29,15 @@ warning flags of vanilla kernel versions.
Acked-by: Christian Heim <phreak@gentoo.org>
---
-diff -Naur a/Makefile b/Makefile
---- a/Makefile 2014-12-30 10:18:12.697915684 -0500
-+++ b/Makefile 2014-12-30 10:25:59.132931931 -0500
+diff -Naur linux-4.0.2-hardened.orig/Makefile linux-4.0.2-hardened/Makefile
+--- linux-4.0.2-hardened.orig/Makefile 2015-05-09 21:08:19.243113731 -0400
++++ linux-4.0.2-hardened/Makefile 2015-05-09 21:10:37.775120037 -0400
@@ -298,7 +298,7 @@
HOSTCC = gcc
HOSTCXX = g++
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
--HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
-+HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
+-HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
++HOSTCFLAGS = -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
diff --git a/4.0.1/4440_grsec-remove-protected-paths.patch b/4.0.2/4440_grsec-remove-protected-paths.patch
index 741546d..741546d 100644
--- a/4.0.1/4440_grsec-remove-protected-paths.patch
+++ b/4.0.2/4440_grsec-remove-protected-paths.patch
diff --git a/4.0.1/4450_grsec-kconfig-default-gids.patch b/4.0.2/4450_grsec-kconfig-default-gids.patch
index 5c025da..61d903e 100644
--- a/4.0.1/4450_grsec-kconfig-default-gids.patch
+++ b/4.0.2/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
-@@ -201,7 +201,7 @@
+@@ -206,7 +206,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
@@ -82,7 +82,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group will be exempted from
grsecurity's /proc restrictions, allowing users of the specified
-@@ -212,7 +212,7 @@
+@@ -217,7 +217,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -91,7 +91,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group untrusted users should
be added to. These users will be placed under grsecurity's Trusted Path
-@@ -224,7 +224,7 @@
+@@ -229,7 +229,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -100,7 +100,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -233,7 +233,7 @@
+@@ -238,7 +238,7 @@
config GRKERNSEC_SYMLINKOWN_GID
int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
depends on GRKERNSEC_CONFIG_SERVER
diff --git a/4.0.1/4465_selinux-avc_audit-log-curr_ip.patch b/4.0.2/4465_selinux-avc_audit-log-curr_ip.patch
index ba89596..ba89596 100644
--- a/4.0.1/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/4.0.2/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/4.0.1/4470_disable-compat_vdso.patch b/4.0.2/4470_disable-compat_vdso.patch
index 7aefa02..7aefa02 100644
--- a/4.0.1/4470_disable-compat_vdso.patch
+++ b/4.0.2/4470_disable-compat_vdso.patch
diff --git a/3.14.40/4475_emutramp_default_on.patch b/4.0.2/4475_emutramp_default_on.patch
index ad4967a..a128205 100644
--- a/3.14.40/4475_emutramp_default_on.patch
+++ b/4.0.2/4475_emutramp_default_on.patch
@@ -10,7 +10,7 @@ See bug:
diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
+++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
-@@ -433,7 +433,7 @@
+@@ -438,7 +438,7 @@
config PAX_EMUTRAMP
bool "Emulate trampolines"
@@ -19,7 +19,7 @@ diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/secur
depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
help
There are some programs and libraries that for one reason or
-@@ -456,6 +456,12 @@
+@@ -461,6 +461,12 @@
utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
for the affected files.