summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.2.46/1044_linux-3.2.45.patch')
-rw-r--r--3.2.46/1044_linux-3.2.45.patch3809
1 files changed, 3809 insertions, 0 deletions
diff --git a/3.2.46/1044_linux-3.2.45.patch b/3.2.46/1044_linux-3.2.45.patch
new file mode 100644
index 0000000..44e1767
--- /dev/null
+++ b/3.2.46/1044_linux-3.2.45.patch
@@ -0,0 +1,3809 @@
+diff --git a/Makefile b/Makefile
+index 566750c..9072fee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
+index 035fdc9..a8b71f2 100644
+--- a/arch/arm/mach-u300/include/mach/u300-regs.h
++++ b/arch/arm/mach-u300/include/mach/u300-regs.h
+@@ -102,7 +102,7 @@
+
+ #ifdef CONFIG_MACH_U300_BS335
+ /* Fast UART1 on U335 only */
+-#define U300_UART1_BASE (U300_SLOW_PER_PHYS_BASE+0x7000)
++#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000)
+ #endif
+
+ /*
+diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
+index 21ab376..1bd14d5 100644
+--- a/arch/ia64/include/asm/futex.h
++++ b/arch/ia64/include/asm/futex.h
+@@ -107,16 +107,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ {
+- register unsigned long r8 __asm ("r8");
++ register unsigned long r8 __asm ("r8") = 0;
+ unsigned long prev;
+ __asm__ __volatile__(
+ " mf;; \n"
+- " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+- : "=r" (r8), "=r" (prev)
++ : "+r" (r8), "=&r" (prev)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
+index 43f96ab..8c70961 100644
+--- a/arch/ia64/include/asm/mca.h
++++ b/arch/ia64/include/asm/mca.h
+@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
+ extern int cpe_vector;
+ extern int ia64_cpe_irq;
+ extern void ia64_mca_init(void);
++extern void ia64_mca_irq_init(void);
+ extern void ia64_mca_cpu_init(void *);
+ extern void ia64_os_mca_dispatch(void);
+ extern void ia64_os_mca_dispatch_end(void);
+diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
+index ad69606..f2c41828 100644
+--- a/arch/ia64/kernel/irq.c
++++ b/arch/ia64/kernel/irq.c
+@@ -23,6 +23,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
+
++#include <asm/mca.h>
++
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
+
+ #endif /* CONFIG_SMP */
+
++int __init arch_early_irq_init(void)
++{
++ ia64_mca_irq_init();
++ return 0;
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ unsigned int vectors_in_migration[NR_IRQS];
+
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 84fb405..9b97303 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -2071,22 +2071,16 @@ ia64_mca_init(void)
+ printk(KERN_INFO "MCA related initialization done\n");
+ }
+
++
+ /*
+- * ia64_mca_late_init
+- *
+- * Opportunity to setup things that require initialization later
+- * than ia64_mca_init. Setup a timer to poll for CPEs if the
+- * platform doesn't support an interrupt driven mechanism.
+- *
+- * Inputs : None
+- * Outputs : Status
++ * These pieces cannot be done in ia64_mca_init() because it is called before
++ * early_irq_init() which would wipe out our percpu irq registrations. But we
++ * cannot leave them until ia64_mca_late_init() because by then all the other
++ * processors have been brought online and have set their own CMC vectors to
++ * point at a non-existant action. Called from arch_early_irq_init().
+ */
+-static int __init
+-ia64_mca_late_init(void)
++void __init ia64_mca_irq_init(void)
+ {
+- if (!mca_init)
+- return 0;
+-
+ /*
+ * Configure the CMCI/P vector and handler. Interrupts for CMC are
+ * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
+@@ -2105,6 +2099,23 @@ ia64_mca_late_init(void)
+ /* Setup the CPEI/P handler */
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ #endif
++}
++
++/*
++ * ia64_mca_late_init
++ *
++ * Opportunity to setup things that require initialization later
++ * than ia64_mca_init. Setup a timer to poll for CPEs if the
++ * platform doesn't support an interrupt driven mechanism.
++ *
++ * Inputs : None
++ * Outputs : Status
++ */
++static int __init
++ia64_mca_late_init(void)
++{
++ if (!mca_init)
++ return 0;
+
+ register_hotcpu_notifier(&mca_cpu_notifier);
+
+diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
+index 4332f7e..a7869f8 100644
+--- a/arch/ia64/kvm/vtlb.c
++++ b/arch/ia64/kvm/vtlb.c
+@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+ "srlz.d;;"
+ "ssm psr.i;;"
+ "srlz.d;;"
+- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
++ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
+
+ return ret;
+ }
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index cdf6b3f..2c49227 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -502,6 +502,7 @@ _GLOBAL(copy_and_flush)
+ sync
+ addi r5,r5,8
+ addi r6,r6,8
++ isync
+ blr
+
+ .align 8
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index b22a83a..24523dc 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -221,7 +221,7 @@ int __node_distance(int a, int b)
+ int distance = LOCAL_DISTANCE;
+
+ if (!form1_affinity)
+- return distance;
++ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index e481f6b..70ec4e9 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -100,6 +100,7 @@ spufs_new_inode(struct super_block *sb, int mode)
+ if (!inode)
+ goto out;
+
++ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 4f289ff..5aaf0bf 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned long pfn)
+
+ #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
++/* TODO: s390 cannot support io_remap_pfn_range... */
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ remap_pfn_range(vma, vaddr, pfn, size, prot)
++
+ #endif /* !__ASSEMBLY__ */
+
+ /*
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 38ebb2c..ddbbea3 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -781,6 +781,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+ }
+
++#include <asm/tlbflush.h>
+ #include <asm-generic/pgtable.h>
+
+ /* We provide our own get_unmapped_area to cope with VA holes and
+diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
+index 10bcabc..f856c7f 100644
+--- a/arch/sparc/include/asm/system_64.h
++++ b/arch/sparc/include/asm/system_64.h
+@@ -140,8 +140,7 @@ do { \
+ * and 2 stores in this critical code path. -DaveM
+ */
+ #define switch_to(prev, next, last) \
+-do { flush_tlb_pending(); \
+- save_and_clear_fpu(); \
++do { save_and_clear_fpu(); \
+ /* If you are tempted to conditionalize the following */ \
+ /* so that ASI is only written if it changes, think again. */ \
+ __asm__ __volatile__("wr %%g0, %0, %%asi" \
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index 2ef4634..f0d6a97 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -11,24 +11,40 @@
+ struct tlb_batch {
+ struct mm_struct *mm;
+ unsigned long tlb_nr;
++ unsigned long active;
+ unsigned long vaddrs[TLB_BATCH_NR];
+ };
+
+ extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+ extern void flush_tsb_user(struct tlb_batch *tb);
++extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+
+ /* TLB flush operations. */
+
+-extern void flush_tlb_pending(void);
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long vmaddr)
++{
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++}
++
++#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+-#define flush_tlb_range(vma,start,end) \
+- do { (void)(start); flush_tlb_pending(); } while (0)
+-#define flush_tlb_page(vma,addr) flush_tlb_pending()
+-#define flush_tlb_mm(mm) flush_tlb_pending()
++extern void flush_tlb_pending(void);
++extern void arch_enter_lazy_mmu_mode(void);
++extern void arch_leave_lazy_mmu_mode(void);
++#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+ /* Local cpu only. */
+ extern void __flush_tlb_all(void);
+-
++extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+ #ifndef CONFIG_SMP
+@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
+ __flush_tlb_kernel_range(start,end); \
+ } while (0)
+
++static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
++}
++
+ #else /* CONFIG_SMP */
+
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
++extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+
+ #define flush_tlb_kernel_range(start, end) \
+ do { flush_tsb_kernel_range(start,end); \
+ smp_flush_tlb_kernel_range(start, end); \
+ } while (0)
+
++#define global_flush_tlb_page(mm, vaddr) \
++ smp_flush_tlb_page(mm, vaddr)
++
+ #endif /* ! CONFIG_SMP */
+
+ #endif /* _SPARC64_TLBFLUSH_H */
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 7560772..e21d3c0d 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm)
+ }
+
+ extern unsigned long xcall_flush_tlb_mm;
+-extern unsigned long xcall_flush_tlb_pending;
++extern unsigned long xcall_flush_tlb_page;
+ extern unsigned long xcall_flush_tlb_kernel_range;
+ extern unsigned long xcall_fetch_glob_regs;
+ extern unsigned long xcall_receive_signal;
+@@ -1070,23 +1070,56 @@ local_flush_and_out:
+ put_cpu();
+ }
+
++struct tlb_pending_info {
++ unsigned long ctx;
++ unsigned long nr;
++ unsigned long *vaddrs;
++};
++
++static void tlb_pending_func(void *info)
++{
++ struct tlb_pending_info *t = info;
++
++ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
++}
++
+ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
++ struct tlb_pending_info info;
+ int cpu = get_cpu();
+
++ info.ctx = ctx;
++ info.nr = nr;
++ info.vaddrs = vaddrs;
++
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ else
+- smp_cross_call_masked(&xcall_flush_tlb_pending,
+- ctx, nr, (unsigned long) vaddrs,
+- mm_cpumask(mm));
++ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
++ &info, 1);
+
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+ put_cpu();
+ }
+
++void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long context = CTX_HWBITS(mm->context);
++ int cpu = get_cpu();
++
++ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
++ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
++ else
++ smp_cross_call_masked(&xcall_flush_tlb_page,
++ context, vaddr, 0,
++ mm_cpumask(mm));
++ __flush_tlb_page(context, vaddr);
++
++ put_cpu();
++}
++
+ void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ {
+ start &= PAGE_MASK;
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index b1f279c..afd021e 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
+ void flush_tlb_pending(void)
+ {
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
++ struct mm_struct *mm = tb->mm;
+
+- if (tb->tlb_nr) {
+- flush_tsb_user(tb);
++ if (!tb->tlb_nr)
++ goto out;
+
+- if (CTX_VALID(tb->mm->context)) {
++ flush_tsb_user(tb);
++
++ if (CTX_VALID(mm->context)) {
++ if (tb->tlb_nr == 1) {
++ global_flush_tlb_page(mm, tb->vaddrs[0]);
++ } else {
+ #ifdef CONFIG_SMP
+ smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+ &tb->vaddrs[0]);
+@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
+ tb->tlb_nr, &tb->vaddrs[0]);
+ #endif
+ }
+- tb->tlb_nr = 0;
+ }
+
++ tb->tlb_nr = 0;
++
++out:
+ put_cpu_var(tlb_batch);
+ }
+
++void arch_enter_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ tb->active = 1;
++}
++
++void arch_leave_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ if (tb->tlb_nr)
++ flush_tlb_pending();
++ tb->active = 0;
++}
++
+ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ pte_t *ptep, pte_t orig, int fullmm)
+ {
+@@ -90,6 +114,12 @@ no_cache_flush:
+ nr = 0;
+ }
+
++ if (!tb->active) {
++ global_flush_tlb_page(mm, vaddr);
++ flush_tsb_user_page(mm, vaddr);
++ goto out;
++ }
++
+ if (nr == 0)
+ tb->mm = mm;
+
+@@ -98,5 +128,6 @@ no_cache_flush:
+ if (nr >= TLB_BATCH_NR)
+ flush_tlb_pending();
+
++out:
+ put_cpu_var(tlb_batch);
+ }
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 536412d..3ebcac7 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -8,11 +8,10 @@
+ #include <linux/slab.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+-#include <asm/tlbflush.h>
+-#include <asm/tlb.h>
+-#include <asm/mmu_context.h>
+ #include <asm/pgtable.h>
++#include <asm/mmu_context.h>
+ #include <asm/tsb.h>
++#include <asm/tlb.h>
+ #include <asm/oplib.h>
+
+ extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+@@ -47,23 +46,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
+ }
+ }
+
+-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+- unsigned long tsb, unsigned long nentries)
++static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
++ unsigned long hash_shift,
++ unsigned long nentries)
+ {
+- unsigned long i;
++ unsigned long tag, ent, hash;
+
+- for (i = 0; i < tb->tlb_nr; i++) {
+- unsigned long v = tb->vaddrs[i];
+- unsigned long tag, ent, hash;
++ v &= ~0x1UL;
++ hash = tsb_hash(v, hash_shift, nentries);
++ ent = tsb + (hash * sizeof(struct tsb));
++ tag = (v >> 22UL);
+
+- v &= ~0x1UL;
++ tsb_flush(ent, tag);
++}
+
+- hash = tsb_hash(v, hash_shift, nentries);
+- ent = tsb + (hash * sizeof(struct tsb));
+- tag = (v >> 22UL);
++static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
++ unsigned long tsb, unsigned long nentries)
++{
++ unsigned long i;
+
+- tsb_flush(ent, tag);
+- }
++ for (i = 0; i < tb->tlb_nr; i++)
++ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
+ }
+
+ void flush_tsb_user(struct tlb_batch *tb)
+@@ -91,6 +94,30 @@ void flush_tsb_user(struct tlb_batch *tb)
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+ }
+
++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long nentries, base, flags;
++
++ spin_lock_irqsave(&mm->context.lock, flags);
++
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
++
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
++ }
++#endif
++ spin_unlock_irqrestore(&mm->context.lock, flags);
++}
++
+ #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+ #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
+ #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 874162a..dd10caa 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
+ nop
+
+ .align 32
++ .globl __flush_tlb_page
++__flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, %pstate
++ mov SECONDARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ andn %o1, 1, %o3
++ be,pn %icc, 1f
++ or %o3, 0x10, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ retl
++ wrpr %g7, 0x0, %pstate
++ nop
++ nop
++ nop
++ nop
++
++ .align 32
+ .globl __flush_tlb_pending
+ __flush_tlb_pending: /* 26 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
+ retl
+ wrpr %g7, 0x0, %pstate
+
++__cheetah_flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, 0x0, %pstate
++ wrpr %g0, 1, %tl
++ mov PRIMARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
++ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
++ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ be,pn %icc, 1f
++ andn %o1, 1, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ wrpr %g0, 0, %tl
++ retl
++ wrpr %g7, 0x0, %pstate
++
+ __cheetah_flush_tlb_pending: /* 27 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ rdpr %pstate, %g7
+@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
+ retl
+ nop
+
++__hypervisor_flush_tlb_page: /* 11 insns */
++ /* %o0 = context, %o1 = vaddr */
++ mov %o0, %g2
++ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
++ mov %g2, %o1 /* ARG1: mmu context */
++ mov HV_MMU_ALL, %o2 /* ARG2: flags */
++ srlx %o0, PAGE_SHIFT, %o0
++ sllx %o0, PAGE_SHIFT, %o0
++ ta HV_MMU_UNMAP_ADDR_TRAP
++ brnz,pn %o0, __hypervisor_tlb_tl0_error
++ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
++ retl
++ nop
++
+ __hypervisor_flush_tlb_pending: /* 16 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ sllx %o1, 3, %g1
+@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
+ call tlb_patch_one
+ mov 19, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__cheetah_flush_tlb_page), %o1
++ or %o1, %lo(__cheetah_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 22, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
+ nop
+ nop
+
+- .globl xcall_flush_tlb_pending
+-xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=context, %g1=nr, %g7=vaddrs[] */
+- sllx %g1, 3, %g1
++ .globl xcall_flush_tlb_page
++xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=context, %g1=vaddr */
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
+@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
+ or %g5, %g4, %g5
+ mov PRIMARY_CONTEXT, %g4
+ stxa %g5, [%g4] ASI_DMMU
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %g5
+- andcc %g5, 0x1, %g0
++ andcc %g1, 0x1, %g0
+ be,pn %icc, 2f
+-
+- andn %g5, 0x1, %g5
++ andn %g1, 0x1, %g5
+ stxa %g0, [%g5] ASI_IMMU_DEMAP
+ 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ membar #Sync
+- brnz,pt %g1, 1b
+- nop
+ stxa %g2, [%g4] ASI_DMMU
+ retry
+ nop
++ nop
+
+ .globl xcall_flush_tlb_kernel_range
+ xcall_flush_tlb_kernel_range: /* 25 insns */
+@@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+ membar #Sync
+ retry
+
+- .globl __hypervisor_xcall_flush_tlb_pending
+-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
+- sllx %g1, 3, %g1
++ .globl __hypervisor_xcall_flush_tlb_page
++__hypervisor_xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=ctx, %g1=vaddr */
+ mov %o0, %g2
+ mov %o1, %g3
+ mov %o2, %g4
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
++ mov %g1, %o0 /* ARG0: virtual address */
+ mov %g5, %o1 /* ARG1: mmu context */
+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
+ srlx %o0, PAGE_SHIFT, %o0
+@@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+ mov HV_MMU_UNMAP_ADDR_TRAP, %g6
+ brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ mov %o0, %g5
+- brnz,pt %g1, 1b
+- nop
+ mov %g2, %o0
+ mov %g3, %o1
+ mov %g4, %o2
+@@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 10, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__hypervisor_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 11, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__hypervisor_flush_tlb_pending), %o1
+@@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 21, %o2
+
+- sethi %hi(xcall_flush_tlb_pending), %o0
+- or %o0, %lo(xcall_flush_tlb_pending), %o0
+- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
+- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
++ sethi %hi(xcall_flush_tlb_page), %o0
++ or %o0, %lo(xcall_flush_tlb_page), %o0
++ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+ call tlb_patch_one
+- mov 21, %o2
++ mov 17, %o2
+
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 957c216..4bb12f7 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -130,8 +130,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
+ };
+
+ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
+- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
++ EVENT_EXTRA_END
++};
++
++static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ EVENT_EXTRA_END
+ };
+
+@@ -1711,7 +1717,10 @@ __init int intel_pmu_init(void)
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+- x86_pmu.extra_regs = intel_snb_extra_regs;
++ if (boot_cpu_data.x86_model == 45)
++ x86_pmu.extra_regs = intel_snbep_extra_regs;
++ else
++ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 34a7f40..a4cca06 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -44,11 +44,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+ unsigned long start = 0, good_end;
++ unsigned long pgd_extra = 0;
+ phys_addr_t base;
+
+ for (i = 0; i < nr_range; i++) {
+ unsigned long range, extra;
+
++ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
++ pgd_extra++;
++
+ range = mr[i].end - mr[i].start;
+ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+@@ -73,6 +77,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
++ tables += (pgd_extra * PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+ /* for fixmap */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 69b9ef6..044f5d9 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1391,8 +1391,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ xen_vcpu_setup(cpu);
+- if (xen_have_vector_callback)
++ if (xen_have_vector_callback) {
+ xen_init_lock_cpu(cpu);
++ if (xen_feature(XENFEAT_hvm_safe_pvclock))
++ xen_setup_timer(cpu);
++ }
+ break;
+ default:
+ break;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 9a23fff..6e4d5dc 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -563,6 +563,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
++ xen_uninit_lock_cpu(cpu);
++ xen_teardown_timer(cpu);
+ native_cpu_die(cpu);
+ }
+
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 0296a95..054cc01 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -497,7 +497,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
+ {
+ int cpu = smp_processor_id();
+ xen_setup_runstate_info(cpu);
+- xen_setup_timer(cpu);
++ /*
++ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
++ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
++ * early bootup and also during CPU hotplug events).
++ */
+ xen_setup_cpu_clockevents();
+ }
+
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index ef5356c..0262210 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 6a6dfc0..a1c4f0a 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
++ msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index 7aff631..5b0f075 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -247,8 +247,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+- /* Run _OSC query for all possible controls. */
+- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
++ /* Run _OSC query only with existing controls. */
++ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+ }
+
+ status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 0833896..14d49e4 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -374,26 +374,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+ struct hpet_dev *devp;
+ unsigned long addr;
+
+- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+- return -EINVAL;
+-
+ devp = file->private_data;
+ addr = devp->hd_hpets->hp_hpet_phys;
+
+ if (addr & (PAGE_SIZE - 1))
+ return -ENOSYS;
+
+- vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+- PAGE_SIZE, vma->vm_page_prot)) {
+- printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
+- __func__);
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, addr, PAGE_SIZE);
+ #else
+ return -ENOSYS;
+ #endif
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index ca67338..c77fc67 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,56 +1007,50 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-#define PTE_ADDRESS_MASK 0xfffff000
+-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+-#define PTE_MAPPING_TYPE_MASK (3 << 1)
+-#define PTE_VALID (1 << 0)
+-
+-/**
+- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+- * a physical one
+- * @dev: drm device
+- * @offset: address to translate
+- *
+- * Some chip functions require allocations from stolen space and need the
+- * physical address of the memory in question.
+- */
+-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
++static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+-#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so compute the base by subtracting the stolen memory
+- * from the Top of Low Usable DRAM which is where the BIOS places
+- * the graphics stolen memory.
++ * is unreliable, so on those compute the base by subtracting the
++ * stolen memory from the Top of Low Usable DRAM which is where the
++ * BIOS places the graphics stolen memory.
++ *
++ * On gen2, the layout is slightly different with the Graphics Segment
++ * immediately following Top of Memory (or Top of Usable DRAM). Note
++ * it appears that TOUD is only reported by 865g, so we just use the
++ * top of memory as determined by the e820 probe.
++ *
++ * XXX gen2 requires an unavailable symbol and 945gm fails with
++ * its value of TOLUD.
+ */
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* top 32bits are reserved = 0 */
++ base = 0;
++ if (INTEL_INFO(dev)->gen >= 6) {
++ /* Read Base Data of Stolen Memory Register (BDSM) directly.
++ * Note that there is also a MCHBAR miror at 0x1080c0 or
++ * we could use device 2:0x5c instead.
++ */
++ pci_read_config_dword(pdev, 0xB0, &base);
++ base &= ~4095; /* lower bits used for locking register */
++ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(pdev, 0xA4, &base);
+- } else {
+- /* XXX presume 8xx is the same as i915 */
+- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+- }
+-#else
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- u16 val;
+- pci_read_config_word(pdev, 0xb0, &val);
+- base = val >> 4 << 20;
+- } else {
++#if 0
++ } else if (IS_GEN3(dev)) {
+ u8 val;
++ /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- }
+- base -= dev_priv->mm.gtt->stolen_size;
++ base -= dev_priv->mm.gtt->stolen_size;
++ } else {
++ /* Stolen is immediately above Top of Memory */
++ base = max_low_pfn_mapped << PAGE_SHIFT;
+ #endif
++ }
+
+- return base + offset;
++ return base;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1081,7 +1075,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
++ cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1094,7 +1088,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
++ ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1113,7 +1107,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- cfb_base, ll_base, size >> 20);
++ (long)cfb_base, (long)ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1187,6 +1181,13 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
++ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
++ if (dev_priv->mm.stolen_base == 0)
++ return 0;
++
++ DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
++ dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
++
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 144d37c..20cd295 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,6 +581,7 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
++ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index b0186b8..2865b44 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2520,6 +2520,11 @@ i915_find_fence_reg(struct drm_device *dev,
+ return avail;
+ }
+
++static void i915_gem_write_fence__ipi(void *data)
++{
++ wbinvd();
++}
++
+ /**
+ * i915_gem_object_get_fence - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+@@ -2640,6 +2645,17 @@ update:
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
++ /* In order to fully serialize access to the fenced region and
++ * the update to the fence register we need to take extreme
++ * measures on SNB+. In theory, the write to the fence register
++ * flushes all memory transactions before, and coupled with the
++ * mb() placed around the register write we serialise all memory
++ * operations with respect to the changes in the tiler. Yet, on
++ * SNB+ we need to take a step further and emit an explicit wbinvd()
++ * on each processor in order to manually flush all memory
++ * transactions before updating the fence register.
++ */
++ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
+ break;
+ case 5:
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 897ca06..cfbb893 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9093,6 +9093,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
++ /* destroy backlight, if any, before the connectors */
++ intel_panel_destroy_backlight(dev);
++
+ drm_mode_config_cleanup(dev);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index c8ecaab..a07ccab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2274,11 +2274,6 @@ done:
+ static void
+ intel_dp_destroy(struct drm_connector *connector)
+ {
+- struct drm_device *dev = connector->dev;
+-
+- if (intel_dpd_is_edp(dev))
+- intel_panel_destroy_backlight(dev);
+-
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 6eda1b5..8ac91b8 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -371,6 +371,7 @@ void intel_dvo_init(struct drm_device *dev)
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
+ int gpio;
++ bool dvoinit;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+@@ -390,7 +391,17 @@ void intel_dvo_init(struct drm_device *dev)
+ i2c = &dev_priv->gmbus[gpio].adapter;
+
+ intel_dvo->dev = *dvo;
+- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
++
++ /* GMBUS NAK handling seems to be unstable, hence let the
++ * transmitter detection run in bit banging mode for now.
++ */
++ intel_gmbus_force_bit(i2c, true);
++
++ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
++
++ intel_gmbus_force_bit(i2c, false);
++
++ if (!dvoinit)
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 6601d21..876bac0 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -553,8 +553,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+- intel_panel_destroy_backlight(dev);
+-
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
+ drm_sysfs_connector_remove(connector);
+@@ -788,6 +786,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Fujitsu Esprimo Q900",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 72b8949..04cb34a 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -361,6 +361,9 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+
+ intel_panel_init_backlight(dev);
+
++ if (WARN_ON(dev_priv->backlight))
++ return -ENODEV;
++
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+@@ -388,8 +391,10 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+ void intel_panel_destroy_backlight(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- if (dev_priv->backlight)
++ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
++ dev_priv->backlight = NULL;
++ }
+ }
+ #else
+ int intel_panel_setup_backlight(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 3a05cdb..d969f3c 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1387,10 +1387,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
++ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
++ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
++ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
+ ctx->scratch_size_bytes = 0;
+ if (usage_bytes == 0)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a25d08a..038570a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -544,6 +544,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ /* use frac fb div on APUs */
+ if (ASIC_IS_DCE41(rdev))
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
++ /* use frac fb div on RS780/RS880 */
++ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 60d13fe..0495a50 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -412,6 +412,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
++ * aux dp channel on imac and help (but not completely fix)
++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
++ * also avoid interrupt storms during dpms.
++ */
++ continue;
++ }
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index c45d921..57a825d 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -506,7 +506,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ offset = radeon_encoder->hdmi_offset;
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+@@ -572,7 +572,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 38585c5..383b38e 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1989,6 +1989,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
++ if (num_modes == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+@@ -2361,6 +2363,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
++ if (power_info->pplib.ucNumStates == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ power_info->pplib.ucNumStates, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+@@ -2443,6 +2447,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
++ u8 *power_state_offset;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+@@ -2459,15 +2464,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
++ if (state_array->ucNumEntries == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
++ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+- power_state = (union pplib_power_state *)&state_array->states[i];
+- /* XXX this might be an inagua bug... */
+- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
++ power_state = (union pplib_power_state *)power_state_offset;
++ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+@@ -2479,9 +2486,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+- /* XXX this might be an inagua bug... */
+- if (clock_array_index >= clock_info_array->ucNumEntries)
+- continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+@@ -2503,6 +2507,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info);
+ state_index++;
+ }
++ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+@@ -2549,7 +2554,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+ default:
+ break;
+ }
+- } else {
++ }
++
++ if (state_index == 0) {
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ rdev->pm.power_state[0].clock_info =
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index be2c122..4bb9e27 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -39,8 +39,12 @@ int radeon_driver_unload_kms(struct drm_device *dev)
+
+ if (rdev == NULL)
+ return 0;
++ if (rdev->rmmio == NULL)
++ goto done_free;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
++
++done_free:
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index ebd6c51..d58eccb 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -863,7 +863,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
++ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
++ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
++ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
++ else
++ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 4bb68f3..64e7065 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -311,10 +311,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
+ /* last message in transfer -> STOP */
+ data |= XIIC_TX_DYN_STOP_MASK;
+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
+-
+- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+- } else
+- xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1702133..2d0544c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1588,8 +1588,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ sector, count, 1) == 0)
+ return -EINVAL;
+ }
+- } else if (sb->bblog_offset == 0)
+- rdev->badblocks.shift = -1;
++ } else if (sb->bblog_offset != 0)
++ rdev->badblocks.shift = 0;
+
+ if (!refdev) {
+ ret = 1;
+@@ -3063,7 +3063,7 @@ int md_rdev_init(struct md_rdev *rdev)
+ * be used - I wonder if that matters
+ */
+ rdev->badblocks.count = 0;
+- rdev->badblocks.shift = 0;
++ rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
+ rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ seqlock_init(&rdev->badblocks.lock);
+ if (rdev->badblocks.page == NULL)
+@@ -3135,9 +3135,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
+ goto abort_free;
+ }
+ }
+- if (super_format == -1)
+- /* hot-add for 0.90, or non-persistent: so no badblocks */
+- rdev->badblocks.shift = -1;
+
+ return rdev;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fc07f90..b436b84 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1866,6 +1866,7 @@ err_detach:
+ write_unlock_bh(&bond->lock);
+
+ err_close:
++ slave_dev->priv_flags &= ~IFF_BONDING;
+ dev_close(slave_dev);
+
+ err_unset_master:
+@@ -4853,9 +4854,18 @@ static int __net_init bond_net_init(struct net *net)
+ static void __net_exit bond_net_exit(struct net *net)
+ {
+ struct bond_net *bn = net_generic(net, bond_net_id);
++ struct bonding *bond, *tmp_bond;
++ LIST_HEAD(list);
+
+ bond_destroy_sysfs(bn);
+ bond_destroy_proc_dir(bn);
++
++ /* Kill off any bonds created after unregistering bond rtnl ops */
++ rtnl_lock();
++ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
++ unregister_netdevice_queue(bond->dev, &list);
++ unregister_netdevice_many(&list);
++ rtnl_unlock();
+ }
+
+ static struct pernet_operations bond_net_ops = {
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index edfdf6b..b5fd934 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
+ /* how about 0x2000 */
+ #define MAX_TX_BUF_LEN 0x2000
+ #define MAX_TX_BUF_SHIFT 13
+-/*#define MAX_TX_BUF_LEN 0x3000 */
++#define MAX_TSO_SEG_SIZE 0x3c00
+
+ /* rrs word 1 bit 0:31 */
+ #define RRS_RX_CSUM_MASK 0xFFFF
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index c69dc29..dd893b3 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -2352,6 +2352,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
+
+ INIT_WORK(&adapter->reset_task, atl1e_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
++ netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "register netdevice failed\n");
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index f67b8ae..69c3adf 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -127,7 +127,6 @@ struct gianfar_ptp_registers {
+
+ #define DRIVER "gianfar_ptp"
+ #define DEFAULT_CKSEL 1
+-#define N_ALARM 1 /* first alarm is used internally to reset fipers */
+ #define N_EXT_TS 2
+ #define REG_SIZE sizeof(struct gianfar_ptp_registers)
+
+@@ -410,7 +409,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
+ .owner = THIS_MODULE,
+ .name = "gianfar clock",
+ .max_adj = 512000,
+- .n_alarm = N_ALARM,
++ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .pps = 1,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 41396fa..d93eee1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1937,6 +1937,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
++
++ /* The lower 16bits of the EICR register are for the queue interrupts
++ * which should be masked here in order to not accidently clear them if
++ * the bits are high when ixgbe_msix_other is called. There is a race
++ * condition otherwise which results in possible performance loss
++ * especially if the ixgbe_msix_other interrupt is triggering
++ * consistently (as it would when PPS is turned on for the X540 device)
++ */
++ eicr &= 0xFFFF0000;
++
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+
+ if (eicr & IXGBE_EICR_LSC)
+@@ -5408,7 +5418,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
+
+ pci_wake_from_d3(pdev, false);
+
++ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
++ rtnl_unlock();
+ if (err) {
+ e_dev_err("Cannot initialize interrupts for device\n");
+ return err;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index d812790..f698183 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1629,8 +1629,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
+
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+-
+- desc->opts2 = 0;
+ }
+
+ static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+@@ -5566,6 +5564,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
++ /* 8168evl does not automatically pad to minimum length. */
++ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
++ skb->len < ETH_ZLEN)) {
++ if (skb_padto(skb, ETH_ZLEN))
++ goto err_update_stats;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ }
++
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+@@ -5633,6 +5639,7 @@ err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+ dev_kfree_skb(skb);
++err_update_stats:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+
+@@ -5814,7 +5821,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ dev->stats.rx_fifo_errors++;
+ }
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr = le64_to_cpu(desc->addr);
+@@ -5828,16 +5834,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+- continue;
++ goto release_descriptor;
+ }
+
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+- continue;
++ goto release_descriptor;
+ }
+
+ rtl8169_rx_csum(skb, status);
+@@ -5851,6 +5855,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
++release_descriptor:
++ desc->opts2 = 0;
++ wmb();
++ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index ccf1524..3935994 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -563,6 +563,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ {
+ struct iwl_addsta_cmd sta_cmd;
++ static const struct iwl_link_quality_cmd zero_lq = {};
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+@@ -602,7 +603,9 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+- send_lq = true;
++
++ if (!memcmp(&lq, &zero_lq, sizeof(lq)))
++ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 3cf4ecc..621b84f 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -1821,9 +1821,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+ if (pdev) {
+ pci_iounmap(pdev, card->pci_mmap);
+ pci_iounmap(pdev, card->pci_mmap1);
+-
+- pci_release_regions(pdev);
+ pci_disable_device(pdev);
++ pci_release_region(pdev, 2);
++ pci_release_region(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
+ }
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 6d4a531..363a5c6 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -664,15 +664,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- } else {
++ } else
+ error = -ENODEV;
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- }
++
++ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
++ dev->current_state = PCI_D0;
+
+ return error;
+ }
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 05beb6c..e3eed18 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -805,9 +805,8 @@ static int cmos_suspend(struct device *dev)
+ mask = RTC_IRQMASK;
+ tmp &= ~mask;
+ CMOS_WRITE(tmp, RTC_CONTROL);
++ hpet_mask_rtc_irq_bit(mask);
+
+- /* shut down hpet emulation - we don't need it for alarm */
+- hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
+ cmos_checkintr(cmos, tmp);
+ }
+ spin_unlock_irq(&rtc_lock);
+@@ -872,6 +871,7 @@ static int cmos_resume(struct device *dev)
+ rtc_update_irq(cmos->rtc, 1, mask);
+ tmp &= ~RTC_AIE;
+ hpet_mask_rtc_irq_bit(RTC_AIE);
++ hpet_rtc_timer_init();
+ } while (mask & RTC_AIE);
+ spin_unlock_irq(&rtc_lock);
+ }
+diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
+index 0b54a91..a56a15e 100644
+--- a/drivers/s390/char/sclp_cmd.c
++++ b/drivers/s390/char/sclp_cmd.c
+@@ -509,6 +509,8 @@ static void __init sclp_add_standby_memory(void)
+ add_memory_merged(0);
+ }
+
++#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
++
+ static void __init insert_increment(u16 rn, int standby, int assigned)
+ {
+ struct memory_increment *incr, *new_incr;
+@@ -521,7 +523,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ if (!standby)
+- new_incr->usecount = 1;
++ new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index d19b879..4735928 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -669,6 +669,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+
+ nonseekable_open(inode, filp);
+
++ /* We refuse fsnotify events on ptmx, since it's a shared resource */
++ filp->f_mode |= FMODE_NONOTIFY;
++
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return retval;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index af5ffb9..488214a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1901,6 +1901,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
++
+ if (console_suspend_enabled || !uart_console(uport))
+ uport->suspended = 1;
+
+@@ -1966,9 +1968,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ disable_irq_wake(uport->irq);
+ uport->irq_wake = 0;
+ }
++ put_device(tty_dev);
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
+ uport->suspended = 0;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 05085be..3f35e42 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -940,6 +940,14 @@ void start_tty(struct tty_struct *tty)
+
+ EXPORT_SYMBOL(start_tty);
+
++/* We limit tty time update visibility to every 8 seconds or so. */
++static void tty_update_time(struct timespec *time)
++{
++ unsigned long sec = get_seconds() & ~7;
++ if ((long)(sec - time->tv_sec) > 0)
++ time->tv_sec = sec;
++}
++
+ /**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+@@ -976,8 +984,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
++
+ if (i > 0)
+- inode->i_atime = current_fs_time(inode->i_sb);
++ tty_update_time(&inode->i_atime);
++
+ return i;
+ }
+
+@@ -1079,8 +1089,8 @@ static inline ssize_t do_tty_write(
+ cond_resched();
+ }
+ if (written) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- inode->i_mtime = current_fs_time(inode->i_sb);
++ struct inode *inode = file->f_path.dentry->d_inode;
++ tty_update_time(&inode->i_mtime);
+ ret = written;
+ }
+ out:
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a9df218..22f770a 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -643,6 +643,8 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+ index &= 0xff;
+ switch (requesttype & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
++ if ((index & ~USB_DIR_IN) == 0)
++ return 0;
+ ret = findintfep(ps->dev, index);
+ if (ret >= 0)
+ ret = checkintf(ps, ret);
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index ac0d75a..9f7003e 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
+ { APPLEDISPLAY_DEVICE(0x9219) },
+ { APPLEDISPLAY_DEVICE(0x921c) },
+ { APPLEDISPLAY_DEVICE(0x921d) },
++ { APPLEDISPLAY_DEVICE(0x9236) },
+
+ /* Terminating entry */
+ { }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 06394e5a..51d1712 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -195,6 +195,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+@@ -876,7 +877,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
++ { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+@@ -1816,8 +1819,11 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ }
+
+ /*
+- * First and second port on STMCLiteadaptors is reserved for JTAG interface
+- * and the forth port for pio
++ * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's
++ * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and
++ * can be accessed from userspace.
++ * The next two ports are enabled as UARTs by default, where port 2 is
++ * a conventional RS-232 UART.
+ */
+ static int ftdi_stmclite_probe(struct usb_serial *serial)
+ {
+@@ -1826,12 +1832,13 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
+
+ dbg("%s", __func__);
+
+- if (interface == udev->actconfig->interface[2])
+- return 0;
+-
+- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ if (interface == udev->actconfig->interface[0] ||
++ interface == udev->actconfig->interface[1]) {
++ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ return -ENODEV;
++ }
+
+- return -ENODEV;
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 809c03a..2f86008 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -74,6 +74,7 @@
+ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+ #define FTDI_OPENDCC_GBM_PID 0xBFDC
++#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD
+
+ /* NZR SEM 16+ USB (http://www.nzr.de) */
+ #define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
+@@ -1150,7 +1151,8 @@
+ * STMicroelectonics
+ */
+ #define ST_VID 0x0483
+-#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
++#define ST_STMCLT_2232_PID 0x3746
++#define ST_STMCLT_4232_PID 0x3747
+
+ /*
+ * Papouch products (http://www.papouch.com/)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4418538..8513f51 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,6 +347,7 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
++#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+@@ -1273,6 +1274,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+@@ -1350,6 +1352,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
+index c844718..7341ce2 100644
+--- a/drivers/usb/storage/cypress_atacb.c
++++ b/drivers/usb/storage/cypress_atacb.c
+@@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf,
+ {
+ struct us_data *us;
+ int result;
++ struct usb_device *device;
+
+ result = usb_stor_probe1(&us, intf, id,
+ (id - cypress_usb_ids) + cypress_unusual_dev_list);
+ if (result)
+ return result;
+
+- us->protocol_name = "Transparent SCSI with Cypress ATACB";
+- us->proto_handler = cypress_atacb_passthrough;
++ /* Among CY7C68300 chips, the A revision does not support Cypress ATACB
++ * Filter out this revision from EEPROM default descriptor values
++ */
++ device = interface_to_usbdev(intf);
++ if (device->descriptor.iManufacturer != 0x38 ||
++ device->descriptor.iProduct != 0x4e ||
++ device->descriptor.iSerialNumber != 0x64) {
++ us->protocol_name = "Transparent SCSI with Cypress ATACB";
++ us->proto_handler = cypress_atacb_passthrough;
++ } else {
++ us->protocol_name = "Transparent SCSI";
++ us->proto_handler = usb_stor_transparent_scsi_command;
++ }
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 7a36dff..6b4fb5c 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -1229,6 +1229,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ finished:
+
+ fbcon_free_font(p, free_font);
++ if (free_font)
++ vc->vc_font.data = NULL;
+
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index babbb07..0a22808 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1350,15 +1350,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ {
+ struct fb_info *info = file_fb_info(file);
+ struct fb_ops *fb;
+- unsigned long off;
++ unsigned long mmio_pgoff;
+ unsigned long start;
+ u32 len;
+
+ if (!info)
+ return -ENODEV;
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+- return -EINVAL;
+- off = vma->vm_pgoff << PAGE_SHIFT;
+ fb = info->fbops;
+ if (!fb)
+ return -ENODEV;
+@@ -1370,33 +1367,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ return res;
+ }
+
+- /* frame buffer memory */
++ /*
++ * Ugh. This can be either the frame buffer mapping, or
++ * if pgoff points past it, the mmio mapping.
++ */
+ start = info->fix.smem_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+- if (off >= len) {
+- /* memory mapped io */
+- off -= len;
+- if (info->var.accel_flags) {
+- mutex_unlock(&info->mm_lock);
+- return -EINVAL;
+- }
++ len = info->fix.smem_len;
++ mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
++ if (vma->vm_pgoff >= mmio_pgoff) {
++ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
++ len = info->fix.mmio_len;
+ }
+ mutex_unlock(&info->mm_lock);
+- start &= PAGE_MASK;
+- if ((vma->vm_end - vma->vm_start + off) > len)
+- return -EINVAL;
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+- /* This is an IO map - tell maydump to skip this VMA */
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+- fb_pgprotect(file, vma, off);
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ fb_pgprotect(file, vma, start);
++
++ return vm_iomap_memory(vma, start, len);
+ }
+
+ static int
+diff --git a/fs/aio.c b/fs/aio.c
+index 3b65ee7..8cdd8ea 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1112,9 +1112,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+ spin_unlock(&info->ring_lock);
+
+ out:
+- kunmap_atomic(ring, KM_USER0);
+ dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
+ (unsigned long)ring->head, (unsigned long)ring->tail);
++ kunmap_atomic(ring, KM_USER0);
+ return ret;
+ }
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index e923bf4..d322929 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1176,8 +1176,10 @@ void shrink_dcache_parent(struct dentry * parent)
+ LIST_HEAD(dispose);
+ int found;
+
+- while ((found = select_parent(parent, &dispose)) != 0)
++ while ((found = select_parent(parent, &dispose)) != 0) {
+ shrink_dentry_list(&dispose);
++ cond_resched();
++ }
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+
+diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
+index 9ed1bb1..5459168 100644
+--- a/fs/ext4/Kconfig
++++ b/fs/ext4/Kconfig
+@@ -82,4 +82,5 @@ config EXT4_DEBUG
+ Enables run-time debugging support for the ext4 filesystem.
+
+ If you select Y here, then you will be able to turn on debugging
+- with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug"
++ with a command such as:
++ echo 1 > /sys/module/ext4/parameters/mballoc_debug
+diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
+index bb6c7d8..a8d03a4 100644
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -260,8 +260,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ if (journal->j_flags & JBD2_BARRIER &&
+ !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+ needs_barrier = true;
+- jbd2_log_start_commit(journal, commit_tid);
+- ret = jbd2_log_wait_commit(journal, commit_tid);
++ ret = jbd2_complete_transaction(journal, commit_tid);
+ if (needs_barrier)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ out:
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3270ffd..025b4b6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -147,8 +147,7 @@ void ext4_evict_inode(struct inode *inode)
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+- jbd2_log_start_commit(journal, commit_tid);
+- jbd2_log_wait_commit(journal, commit_tid);
++ jbd2_complete_transaction(journal, commit_tid);
+ filemap_write_and_wait(&inode->i_data);
+ }
+ truncate_inode_pages(&inode->i_data, 0);
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 4765190..73c0bd7 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -276,5 +276,5 @@ const struct file_operations fscache_stats_fops = {
+ .open = fscache_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index d751f04..ab9463a 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -326,7 +326,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ int space_left = 0;
+ int first_tag = 0;
+ int tag_flag;
+- int i, to_free = 0;
++ int i;
+ int tag_bytes = journal_tag_bytes(journal);
+ struct buffer_head *cbh = NULL; /* For transactional checksums */
+ __u32 crc32_sum = ~0;
+@@ -996,7 +996,7 @@ restart_loop:
+ journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
+ spin_unlock(&journal->j_history_lock);
+
+- commit_transaction->t_state = T_FINISHED;
++ commit_transaction->t_state = T_COMMIT_CALLBACK;
+ J_ASSERT(commit_transaction == journal->j_committing_transaction);
+ journal->j_commit_sequence = commit_transaction->t_tid;
+ journal->j_committing_transaction = NULL;
+@@ -1011,38 +1011,44 @@ restart_loop:
+ journal->j_average_commit_time*3) / 4;
+ else
+ journal->j_average_commit_time = commit_time;
++
+ write_unlock(&journal->j_state_lock);
+
+- if (commit_transaction->t_checkpoint_list == NULL &&
+- commit_transaction->t_checkpoint_io_list == NULL) {
+- __jbd2_journal_drop_transaction(journal, commit_transaction);
+- to_free = 1;
++ if (journal->j_checkpoint_transactions == NULL) {
++ journal->j_checkpoint_transactions = commit_transaction;
++ commit_transaction->t_cpnext = commit_transaction;
++ commit_transaction->t_cpprev = commit_transaction;
+ } else {
+- if (journal->j_checkpoint_transactions == NULL) {
+- journal->j_checkpoint_transactions = commit_transaction;
+- commit_transaction->t_cpnext = commit_transaction;
+- commit_transaction->t_cpprev = commit_transaction;
+- } else {
+- commit_transaction->t_cpnext =
+- journal->j_checkpoint_transactions;
+- commit_transaction->t_cpprev =
+- commit_transaction->t_cpnext->t_cpprev;
+- commit_transaction->t_cpnext->t_cpprev =
+- commit_transaction;
+- commit_transaction->t_cpprev->t_cpnext =
++ commit_transaction->t_cpnext =
++ journal->j_checkpoint_transactions;
++ commit_transaction->t_cpprev =
++ commit_transaction->t_cpnext->t_cpprev;
++ commit_transaction->t_cpnext->t_cpprev =
++ commit_transaction;
++ commit_transaction->t_cpprev->t_cpnext =
+ commit_transaction;
+- }
+ }
+ spin_unlock(&journal->j_list_lock);
+-
++ /* Drop all spin_locks because commit_callback may be block.
++ * __journal_remove_checkpoint() can not destroy transaction
++ * under us because it is not marked as T_FINISHED yet */
+ if (journal->j_commit_callback)
+ journal->j_commit_callback(journal, commit_transaction);
+
+ trace_jbd2_end_commit(journal, commit_transaction);
+ jbd_debug(1, "JBD2: commit %d complete, head %d\n",
+ journal->j_commit_sequence, journal->j_tail_sequence);
+- if (to_free)
+- kfree(commit_transaction);
+
++ write_lock(&journal->j_state_lock);
++ spin_lock(&journal->j_list_lock);
++ commit_transaction->t_state = T_FINISHED;
++ /* Recheck checkpoint lists after j_list_lock was dropped */
++ if (commit_transaction->t_checkpoint_list == NULL &&
++ commit_transaction->t_checkpoint_io_list == NULL) {
++ __jbd2_journal_drop_transaction(journal, commit_transaction);
++ kfree(commit_transaction);
++ }
++ spin_unlock(&journal->j_list_lock);
++ write_unlock(&journal->j_state_lock);
+ wake_up(&journal->j_wait_done_commit);
+ }
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 0fa0123..17b04fc 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -663,6 +663,37 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
+ }
+
+ /*
++ * When this function returns the transaction corresponding to tid
++ * will be completed. If the transaction has currently running, start
++ * committing that transaction before waiting for it to complete. If
++ * the transaction id is stale, it is by definition already completed,
++ * so just return SUCCESS.
++ */
++int jbd2_complete_transaction(journal_t *journal, tid_t tid)
++{
++ int need_to_wait = 1;
++
++ read_lock(&journal->j_state_lock);
++ if (journal->j_running_transaction &&
++ journal->j_running_transaction->t_tid == tid) {
++ if (journal->j_commit_request != tid) {
++ /* transaction not yet started, so request it */
++ read_unlock(&journal->j_state_lock);
++ jbd2_log_start_commit(journal, tid);
++ goto wait_commit;
++ }
++ } else if (!(journal->j_committing_transaction &&
++ journal->j_committing_transaction->t_tid == tid))
++ need_to_wait = 0;
++ read_unlock(&journal->j_state_lock);
++ if (!need_to_wait)
++ return 0;
++wait_commit:
++ return jbd2_log_wait_commit(journal, tid);
++}
++EXPORT_SYMBOL(jbd2_complete_transaction);
++
++/*
+ * Log buffer allocation routines:
+ */
+
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
+index 8d4ea83..de88922 100644
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -141,6 +141,9 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
+ timeout);
+ if (ret < 0)
+ return -ERESTARTSYS;
++ /* Reset the lock status after a server reboot so we resend */
++ if (block->b_status == nlm_lck_denied_grace_period)
++ block->b_status = nlm_lck_blocked;
+ req->a_res.status = block->b_status;
+ return 0;
+ }
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index a3a0987..8392cb8 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,9 +551,6 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
+- /* Resend the blocking lock request after a server reboot */
+- if (resp->status == nlm_lck_denied_grace_period)
+- continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index fe5c5fb..08921b8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -880,14 +880,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+
+ nfs4_lock_state();
+ status = nfs4_preprocess_stateid_op(cstate, stateid, WR_STATE, &filp);
+- if (filp)
+- get_file(filp);
+- nfs4_unlock_state();
+-
+ if (status) {
++ nfs4_unlock_state();
+ dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+ return status;
+ }
++ if (filp)
++ get_file(filp);
++ nfs4_unlock_state();
+
+ cnt = write->wr_buflen;
+ write->wr_how_written = write->wr_stable_how;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 7d189dc..4cef99f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -188,13 +188,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
+ {
+ if (atomic_dec_and_test(&fp->fi_access[oflag])) {
+ nfs4_file_put_fd(fp, oflag);
+- /*
+- * It's also safe to get rid of the RDWR open *if*
+- * we no longer have need of the other kind of access
+- * or if we already have the other kind of open:
+- */
+- if (fp->fi_fds[1-oflag]
+- || atomic_read(&fp->fi_access[1 - oflag]) == 0)
++ if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
+ nfs4_file_put_fd(fp, O_RDWR);
+ }
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 24afa96..ade5316 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -360,10 +360,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_atime.tv_sec);
++ READ64(iattr->ia_atime.tv_sec);
+ READ32(iattr->ia_atime.tv_nsec);
+ if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -386,10 +383,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_mtime.tv_sec);
++ READ64(iattr->ia_mtime.tv_sec);
+ READ32(iattr->ia_mtime.tv_nsec);
+ if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -2374,8 +2368,7 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.atime.tv_sec);
++ WRITE64((s64)stat.atime.tv_sec);
+ WRITE32(stat.atime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
+@@ -2388,15 +2381,13 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.ctime.tv_sec);
++ WRITE64((s64)stat.ctime.tv_sec);
+ WRITE32(stat.ctime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.mtime.tv_sec);
++ WRITE64((s64)stat.mtime.tv_sec);
+ WRITE32(stat.mtime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 6f292dd..f255d37 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -577,7 +577,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ int add = (arg & IN_MASK_ADD);
+ int ret;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+@@ -628,7 +627,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+@@ -757,6 +755,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
+ int ret, fput_needed;
+ unsigned flags = 0;
+
++ /* don't allow invalid bits: we don't want flags set */
++ if (unlikely(!(mask & ALL_INOTIFY_BITS)))
++ return -EINVAL;
++
+ filp = fget_light(fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 3899e24..e756bc4 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -977,6 +977,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ enum kobj_ns_type type;
+ const void *ns;
+ ino_t ino;
++ loff_t off;
+
+ type = sysfs_ns_type(parent_sd);
+ ns = sysfs_info(dentry->d_sb)->ns[type];
+@@ -999,6 +1000,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ return 0;
+ }
+ mutex_lock(&sysfs_mutex);
++ off = filp->f_pos;
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+ pos;
+ pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
+@@ -1010,19 +1012,24 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ len = strlen(name);
+ ino = pos->s_ino;
+ type = dt_type(pos);
+- filp->f_pos = ino;
++ off = filp->f_pos = ino;
+ filp->private_data = sysfs_get(pos);
+
+ mutex_unlock(&sysfs_mutex);
+- ret = filldir(dirent, name, len, filp->f_pos, ino, type);
++ ret = filldir(dirent, name, len, off, ino, type);
+ mutex_lock(&sysfs_mutex);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&sysfs_mutex);
+- if ((filp->f_pos > 1) && !pos) { /* EOF */
+- filp->f_pos = INT_MAX;
++
++ /* don't reference last entry if its refcount is dropped */
++ if (!pos) {
+ filp->private_data = NULL;
++
++ /* EOF and not changed as 0 or 1 in read/write path */
++ if (off == filp->f_pos && off > 1)
++ filp->f_pos = INT_MAX;
+ }
+ return 0;
+ }
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index 8a297a5..497c6cc 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -42,8 +42,8 @@ struct ipc_namespace {
+
+ size_t shm_ctlmax;
+ size_t shm_ctlall;
++ unsigned long shm_tot;
+ int shm_ctlmni;
+- int shm_tot;
+ /*
+ * Defines whether IPC_RMID is forced for _all_ shm segments regardless
+ * of shmctl()
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 2092ea2..a153ed5 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -470,6 +470,7 @@ struct transaction_s
+ T_COMMIT,
+ T_COMMIT_DFLUSH,
+ T_COMMIT_JFLUSH,
++ T_COMMIT_CALLBACK,
+ T_FINISHED
+ } t_state;
+
+@@ -1165,6 +1166,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+ int jbd2_journal_force_commit_nested(journal_t *journal);
+ int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
++int jbd2_complete_transaction(journal_t *journal, tid_t tid);
+ int jbd2_log_do_checkpoint(journal_t *journal);
+ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 4baadd1..d0493f6 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1509,6 +1509,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
++
+
+ struct page *follow_page(struct vm_area_struct *, unsigned long address,
+ unsigned int foll_flags);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 00ca32b..8c43fd1 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -225,9 +225,9 @@ struct netdev_hw_addr {
+ #define NETDEV_HW_ADDR_T_SLAVE 3
+ #define NETDEV_HW_ADDR_T_UNICAST 4
+ #define NETDEV_HW_ADDR_T_MULTICAST 5
+- bool synced;
+ bool global_use;
+ int refcount;
++ int synced;
+ struct rcu_head rcu_head;
+ };
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index da65890..efe50af 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2367,6 +2367,13 @@ static inline void nf_reset(struct sk_buff *skb)
+ #endif
+ }
+
++static inline void nf_reset_trace(struct sk_buff *skb)
++{
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
++ skb->nf_trace = 0;
++#endif
++}
++
+ /* Note: This doesn't put any conntrack and bridge info in dst. */
+ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ {
+diff --git a/ipc/shm.c b/ipc/shm.c
+index b76be5b..326a20b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -450,7 +450,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ size_t size = params->u.size;
+ int error;
+ struct shmid_kernel *shp;
+- int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
++ size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct file * file;
+ char name[13];
+ int id;
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 31fdc48..0caf1f8 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -608,9 +608,9 @@ void audit_trim_trees(void)
+ }
+ spin_unlock(&hash_lock);
+ trim_marked(tree);
+- put_tree(tree);
+ drop_collected_mounts(root_mnt);
+ skip_it:
++ put_tree(tree);
+ mutex_lock(&audit_filter_mutex);
+ }
+ list_del(&cursor);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index c0739f8..d2a01fe 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2029,7 +2029,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+ if (!group)
+ return -ENOMEM;
+ /* pre-allocate to guarantee space while iterating in rcu read-side. */
+- retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
++ retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
+ if (retval)
+ goto out_free_group_list;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7d1f05e..9f21915 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5164,7 +5164,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
+
+ static int perf_swevent_init(struct perf_event *event)
+ {
+- int event_id = event->attr.config;
++ u64 event_id = event->attr.config;
+
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+@@ -5756,6 +5756,7 @@ skip_type:
+ if (pmu->pmu_cpu_context)
+ goto got_cpu_context;
+
++ ret = -ENOMEM;
+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+ if (!pmu->pmu_cpu_context)
+ goto free_dev;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index e4cee8d..60f7e32 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -298,6 +298,10 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
++ /* Make sure nsec fits into long */
++ if (unlikely(nsec > KTIME_SEC_MAX))
++ return (ktime_t){ .tv64 = KTIME_MAX };
++
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+@@ -1308,6 +1312,8 @@ retry:
+
+ expires = ktime_sub(hrtimer_get_expires(timer),
+ base->offset);
++ if (expires.tv64 < 0)
++ expires.tv64 = KTIME_MAX;
+ if (expires.tv64 < expires_next.tv64)
+ expires_next = expires;
+ break;
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index cd068b2..c3509fb 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,6 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
+ */
+ int tick_check_broadcast_device(struct clock_event_device *dev)
+ {
++ struct clock_event_device *cur = tick_broadcast_device.evtdev;
++
+ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+@@ -73,6 +75,8 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
+ return 0;
+
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
++ if (cur)
++ cur->event_handler = clockevents_handle_noop;
+ tick_broadcast_device.evtdev = dev;
+ if (!cpumask_empty(tick_get_broadcast_mask()))
+ tick_broadcast_start_periodic(dev);
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index da6c9ec..ead79bc 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -323,6 +323,7 @@ static void tick_shutdown(unsigned int *cpup)
+ */
+ dev->mode = CLOCK_EVT_MODE_UNUSED;
+ clockevents_exchange_device(dev, NULL);
++ dev->event_handler = clockevents_handle_noop;
+ td->evtdev = NULL;
+ }
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 5527211..24b3759 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -554,7 +554,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+- for (i = 0; i < pages; i++) {
++ for (i = 1; i < pages; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pg->next)
+ goto out_free;
+@@ -3303,7 +3303,8 @@ out:
+ if (fail)
+ return -EINVAL;
+
+- ftrace_graph_filter_enabled = 1;
++ ftrace_graph_filter_enabled = !!(*idx);
++
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17edb14..0ec6c34 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4563,6 +4563,8 @@ static __init int tracer_init_debugfs(void)
+ trace_access_lock_init();
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("tracing_enabled", 0644, d_tracer,
+ &global_trace, &tracing_ctrl_fops);
+@@ -4696,36 +4698,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
+ iter->cpu_file = TRACE_PIPE_ALL_CPU;
+ }
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
++void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+ {
+- static arch_spinlock_t ftrace_dump_lock =
+- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ /* use static because iter can be a bit big for the stack */
+ static struct trace_iterator iter;
++ static atomic_t dump_running;
+ unsigned int old_userobj;
+- static int dump_ran;
+ unsigned long flags;
+ int cnt = 0, cpu;
+
+- /* only one dump */
+- local_irq_save(flags);
+- arch_spin_lock(&ftrace_dump_lock);
+- if (dump_ran)
+- goto out;
+-
+- dump_ran = 1;
++ /* Only allow one dump user at a time. */
++ if (atomic_inc_return(&dump_running) != 1) {
++ atomic_dec(&dump_running);
++ return;
++ }
+
++ /*
++ * Always turn off tracing when we dump.
++ * We don't need to show trace output of what happens
++ * between multiple crashes.
++ *
++ * If the user does a sysrq-z, then they can re-enable
++ * tracing with echo 1 > tracing_on.
++ */
+ tracing_off();
+
+- /* Did function tracer already get disabled? */
+- if (ftrace_is_dead()) {
+- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+- printk("# MAY BE MISSING FUNCTION EVENTS\n");
+- }
+-
+- if (disable_tracing)
+- ftrace_kill();
++ local_irq_save(flags);
+
+ trace_init_global_iter(&iter);
+
+@@ -4758,6 +4756,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+
+ printk(KERN_TRACE "Dumping ftrace buffer:\n");
+
++ /* Did function tracer already get disabled? */
++ if (ftrace_is_dead()) {
++ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
++ printk("# MAY BE MISSING FUNCTION EVENTS\n");
++ }
++
+ /*
+ * We need to stop all tracing on all CPUS to read the
+ * the next buffer. This is a bit expensive, but is
+@@ -4796,26 +4800,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+ printk(KERN_TRACE "---------------------------------\n");
+
+ out_enable:
+- /* Re-enable tracing if requested */
+- if (!disable_tracing) {
+- trace_flags |= old_userobj;
++ trace_flags |= old_userobj;
+
+- for_each_tracing_cpu(cpu) {
+- atomic_dec(&iter.tr->data[cpu]->disabled);
+- }
+- tracing_on();
++ for_each_tracing_cpu(cpu) {
++ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+-
+- out:
+- arch_spin_unlock(&ftrace_dump_lock);
++ atomic_dec(&dump_running);
+ local_irq_restore(flags);
+ }
+-
+-/* By default: disable tracing after the dump */
+-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+-{
+- __ftrace_dump(true, oops_dump_mode);
+-}
++EXPORT_SYMBOL_GPL(ftrace_dump);
+
+ __init static int tracer_alloc_buffers(void)
+ {
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 288541f..09fd98a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
+ /* Maximum number of functions to trace before diagnosing a hang */
+ #define GRAPH_MAX_FUNC_TEST 100000000
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
+ static unsigned int graph_hang_thresh;
+
+ /* Wrap the real function entry probe to avoid possible hanging */
+@@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+ ftrace_graph_stop();
+ printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+- if (ftrace_dump_on_oops)
+- __ftrace_dump(false, DUMP_ALL);
++ if (ftrace_dump_on_oops) {
++ ftrace_dump(DUMP_ALL);
++ /* ftrace_dump() disables tracing */
++ tracing_on();
++ }
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index 77575b3..c5b20a3 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -17,13 +17,24 @@
+
+ #define STACK_TRACE_ENTRIES 500
+
++#ifdef CC_USING_FENTRY
++# define fentry 1
++#else
++# define fentry 0
++#endif
++
+ static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+ { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
++/*
++ * Reserve one entry for the passed in ip. This will allow
++ * us to remove most or all of the stack size overhead
++ * added by the stack tracer itself.
++ */
+ static struct stack_trace max_stack_trace = {
+- .max_entries = STACK_TRACE_ENTRIES,
+- .entries = stack_dump_trace,
++ .max_entries = STACK_TRACE_ENTRIES - 1,
++ .entries = &stack_dump_trace[1],
+ };
+
+ static unsigned long max_stack_size;
+@@ -37,25 +48,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
+ int stack_tracer_enabled;
+ static int last_stack_tracer_enabled;
+
+-static inline void check_stack(void)
++static inline void
++check_stack(unsigned long ip, unsigned long *stack)
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
++ static int tracer_frame;
++ int frame_size = ACCESS_ONCE(tracer_frame);
+ int i;
+
+- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
++ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
++ /* Remove the frame of the tracer */
++ this_size -= frame_size;
+
+ if (this_size <= max_stack_size)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_is_on_stack(stack))
+ return;
+
+ local_irq_save(flags);
+ arch_spin_lock(&max_stack_lock);
+
++ /* In case another CPU set the tracer_frame on us */
++ if (unlikely(!frame_size))
++ this_size -= tracer_frame;
++
+ /* a race could have already updated it */
+ if (this_size <= max_stack_size)
+ goto out;
+@@ -68,10 +88,18 @@ static inline void check_stack(void)
+ save_stack_trace(&max_stack_trace);
+
+ /*
++ * Add the passed in ip from the function tracer.
++ * Searching for this on the stack will skip over
++ * most of the overhead from the stack tracer itself.
++ */
++ stack_dump_trace[0] = ip;
++ max_stack_trace.nr_entries++;
++
++ /*
+ * Now find where in the stack these are.
+ */
+ i = 0;
+- start = &this_size;
++ start = stack;
+ top = (unsigned long *)
+ (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
+
+@@ -95,6 +123,18 @@ static inline void check_stack(void)
+ found = 1;
+ /* Start the search from here */
+ start = p + 1;
++ /*
++ * We do not want to show the overhead
++ * of the stack tracer stack in the
++ * max stack. If we haven't figured
++ * out what that is, then figure it out
++ * now.
++ */
++ if (unlikely(!tracer_frame) && i == 1) {
++ tracer_frame = (p - stack) *
++ sizeof(unsigned long);
++ max_stack_size -= tracer_frame;
++ }
+ }
+ }
+
+@@ -110,6 +150,7 @@ static inline void check_stack(void)
+ static void
+ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ {
++ unsigned long stack;
+ int cpu;
+
+ if (unlikely(!ftrace_enabled || stack_trace_disabled))
+@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack();
++ /*
++ * When fentry is used, the traced function does not get
++ * its stack frame set up, and we lose the parent.
++ * The ip is pretty useless because the function tracer
++ * was called before that function set up its stack frame.
++ * In this case, we use the parent ip.
++ *
++ * By adding the return address of either the parent ip
++ * or the current ip we can disregard most of the stack usage
++ * caused by the stack tracer itself.
++ *
++ * The function tracer always reports the address of where the
++ * mcount call was, but the stack will hold the return address.
++ */
++ if (fentry)
++ ip = parent_ip;
++ else
++ ip += MCOUNT_INSN_SIZE;
++
++ check_stack(ip, &stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;
+@@ -351,6 +411,8 @@ static __init int stack_trace_init(void)
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("stack_max_size", 0644, d_tracer,
+ &max_stack_size, &stack_max_size_fops);
+diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
+index 96cffb2..847f88a 100644
+--- a/kernel/trace/trace_stat.c
++++ b/kernel/trace/trace_stat.c
+@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
+ struct dentry *d_tracing;
+
+ d_tracing = tracing_init_dentry();
++ if (!d_tracing)
++ return 0;
+
+ stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ if (!stat_dir)
+diff --git a/mm/memory.c b/mm/memory.c
+index 4f2add1..d5f913b 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2309,6 +2309,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++/**
++ * vm_iomap_memory - remap memory to userspace
++ * @vma: user vma to map to
++ * @start: start of area
++ * @len: size of area
++ *
++ * This is a simplified io_remap_pfn_range() for common driver use. The
++ * driver just needs to give us the physical memory range to be mapped,
++ * we'll figure out the rest from the vma information.
++ *
++ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
++ * whatever write-combining details or similar.
++ */
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
++{
++ unsigned long vm_len, pfn, pages;
++
++ /* Check that the physical memory area passed in looks valid */
++ if (start + len < start)
++ return -EINVAL;
++ /*
++ * You *really* shouldn't map things that aren't page-aligned,
++ * but we've historically allowed it because IO memory might
++ * just have smaller alignment.
++ */
++ len += start & ~PAGE_MASK;
++ pfn = start >> PAGE_SHIFT;
++ pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (pfn + pages < pfn)
++ return -EINVAL;
++
++ /* We start the mapping 'vm_pgoff' pages into the area */
++ if (vma->vm_pgoff > pages)
++ return -EINVAL;
++ pfn += vma->vm_pgoff;
++ pages -= vma->vm_pgoff;
++
++ /* Can we fit all of the mapping? */
++ vm_len = vma->vm_end - vma->vm_start;
++ if (vm_len >> PAGE_SHIFT > pages)
++ return -EINVAL;
++
++ /* Ok, let it rip */
++ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_iomap_memory);
++
+ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ pte_fn_t fn, void *data)
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 0ca06e8..43b6bfe 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -500,6 +500,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
++ msg->msg_namelen = 0;
++
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index b04a6ef..86ac37f 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1641,6 +1641,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
+
++ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+ &digi, NULL, NULL);
+ sax->sax25_family = AF_AX25;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 062124c..838f113 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -245,6 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -252,8 +254,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ return err;
+ }
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 14c4864..82ce164 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -627,6 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
++ msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index a986280..53a8e37 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -320,6 +320,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
++ m->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 720aea0..8e455b8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1619,6 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
++ nf_reset_trace(skb);
+ return netif_rx(skb);
+ }
+ EXPORT_SYMBOL_GPL(dev_forward_skb);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 0387da0..cd09414 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,7 +57,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+- ha->synced = false;
++ ha->synced = 0;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+@@ -155,7 +155,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ addr_len, ha->type);
+ if (err)
+ break;
+- ha->synced = true;
++ ha->synced++;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+@@ -176,7 +176,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+- ha->synced = false;
++ ha->synced--;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3b5e680..5b7d5f2 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1064,7 +1064,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+
+ if (tb[IFLA_EXT_MASK])
+@@ -1907,7 +1907,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ u32 ext_filter_mask = 0;
+ u16 min_ifinfo_dump_size = 0;
+
+- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 530787b..238fc3b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -137,8 +137,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ /* skb is pure payload to encrypt */
+
+- err = -ENOMEM;
+-
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+@@ -174,8 +172,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b2cfe83..8f441b2 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -251,8 +251,7 @@ static void ip_expire(unsigned long arg)
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+- /* skb dst is stale, drop it, and perform route lookup again */
+- skb_dst_drop(head);
++ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+@@ -518,8 +517,16 @@ found:
+ qp->q.last_in |= INET_FRAG_FIRST_IN;
+
+ if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- qp->q.meat == qp->q.len)
+- return ip_frag_reasm(qp, prev, dev);
++ qp->q.meat == qp->q.len) {
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ err = ip_frag_reasm(qp, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return err;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip4_frags.lock);
+ list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 769c0e9..8a1bed2 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
++ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, th->source, th->dest);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 3124e17..872b41d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -115,6 +115,7 @@ int sysctl_tcp_abc __read_mostly;
+ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
+ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
+ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
++#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+
+ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
+ #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
+@@ -3723,6 +3724,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
+ }
+ }
+
++static void tcp_store_ts_recent(struct tcp_sock *tp)
++{
++ tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
++ tp->rx_opt.ts_recent_stamp = get_seconds();
++}
++
++static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
++{
++ if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
++ /* PAWS bug workaround wrt. ACK frames, the PAWS discard
++ * extra check below makes sure this can only happen
++ * for pure ACK frames. -DaveM
++ *
++ * Not only, also it occurs for expired timestamps.
++ */
++
++ if (tcp_paws_check(&tp->rx_opt, 0))
++ tcp_store_ts_recent(tp);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3771,6 +3793,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ prior_fackets = tp->fackets_out;
+ prior_in_flight = tcp_packets_in_flight(tp);
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ if (flag & FLAG_UPDATE_TS_RECENT)
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+ /* Window is constant, pure forward advance.
+ * No more checks are required.
+@@ -4061,27 +4089,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ EXPORT_SYMBOL(tcp_parse_md5sig_option);
+ #endif
+
+-static inline void tcp_store_ts_recent(struct tcp_sock *tp)
+-{
+- tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+- tp->rx_opt.ts_recent_stamp = get_seconds();
+-}
+-
+-static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+-{
+- if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+- /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+- * extra check below makes sure this can only happen
+- * for pure ACK frames. -DaveM
+- *
+- * Not only, also it occurs for expired timestamps.
+- */
+-
+- if (tcp_paws_check(&tp->rx_opt, 0))
+- tcp_store_ts_recent(tp);
+- }
+-}
+-
+ /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
+ *
+ * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
+@@ -5552,14 +5559,10 @@ slow_path:
+ return 0;
+
+ step5:
+- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
++ if (th->ack &&
++ tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -5923,7 +5926,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ /* step 5: check the ACK field */
+ if (th->ack) {
+- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
++ int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
++ FLAG_UPDATE_TS_RECENT) > 0;
+
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+@@ -6030,11 +6034,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8589c2d..d84033b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2404,6 +2404,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
+ static void init_loopback(struct net_device *dev)
+ {
+ struct inet6_dev *idev;
++ struct net_device *sp_dev;
++ struct inet6_ifaddr *sp_ifa;
++ struct rt6_info *sp_rt;
+
+ /* ::1 */
+
+@@ -2415,6 +2418,30 @@ static void init_loopback(struct net_device *dev)
+ }
+
+ add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
++
++ /* Add routes to other interface's IPv6 addresses */
++ for_each_netdev(dev_net(dev), sp_dev) {
++ if (!strcmp(sp_dev->name, dev->name))
++ continue;
++
++ idev = __in6_dev_get(sp_dev);
++ if (!idev)
++ continue;
++
++ read_lock_bh(&idev->lock);
++ list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
++
++ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
++ continue;
++
++ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
++
++ /* Failure cases are ignored */
++ if (!IS_ERR(sp_rt))
++ ip6_ins_rt(sp_rt);
++ }
++ read_unlock_bh(&idev->lock);
++ }
+ }
+
+ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 2b0a4ca..411fe2c 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -386,8 +386,17 @@ found:
+ }
+
+ if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- fq->q.meat == fq->q.len)
+- return ip6_frag_reasm(fq, prev, dev);
++ fq->q.meat == fq->q.len) {
++ int res;
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ res = ip6_frag_reasm(fq, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return res;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip6_frags.lock);
+ list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index f4b49c5..91821e9 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index cf98d62..e836140 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1356,6 +1356,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int blen;
+ int err = 0;
+
++ msg->msg_namelen = 0;
++
+ if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 99a60d5..e5565c7 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index f156382..3df7c5a 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1178,6 +1178,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ if (sax != NULL) {
++ memset(sax, 0, sizeof(*sax));
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index f9ea925..1f96fb9 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+ if (srose != NULL) {
++ memset(srose, 0, msg->msg_namelen);
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 599f67a..b7cddb9 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -963,8 +963,11 @@ cbq_dequeue(struct Qdisc *sch)
+ cbq_update(q);
+ if ((incr -= incr2) < 0)
+ incr = 0;
++ q->now += incr;
++ } else {
++ if (now > q->now)
++ q->now = now;
+ }
+- q->now += incr;
+ q->now_rt = now;
+
+ for (;;) {
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index bf81204..333926d 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
+ return;
+
+ if (atomic_dec_and_test(&key->refcnt)) {
+- kfree(key);
++ kzfree(key);
+ SCTP_DBG_OBJCNT_DEC(keys);
+ }
+ }
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 42b8324..fdf34af 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -829,6 +829,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+ if (addr) {
+ addr->family = AF_TIPC;
+ addr->addrtype = TIPC_ADDR_ID;
++ memset(&addr->addr, 0, sizeof(addr->addr));
+ addr->addr.id.ref = msg_origport(msg);
+ addr->addr.id.node = msg_orignode(msg);
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+@@ -948,6 +949,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1074,6 +1078,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 18978b6..5611563 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1956,7 +1956,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ (UNIXCB(skb).cred != siocb->scm->cred))
+ break;
+- } else {
++ } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ /* Copy credentials */
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
+ check_creds = 1;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 0b08905..21958cd 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -853,7 +853,7 @@ static void handle_channel(struct wiphy *wiphy,
+ return;
+
+ REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
+- chan->flags = IEEE80211_CHAN_DISABLED;
++ chan->flags |= IEEE80211_CHAN_DISABLED;
+ return;
+ }
+
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 7ada40e..638600b 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3204,18 +3204,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
+ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+ {
+- long size;
+- unsigned long offset;
++ struct snd_pcm_runtime *runtime = substream->runtime;;
+
+ area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
+- area->vm_flags |= VM_IO;
+- size = area->vm_end - area->vm_start;
+- offset = area->vm_pgoff << PAGE_SHIFT;
+- if (io_remap_pfn_range(area, area->vm_start,
+- (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
+- size, area->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
+ }
+
+ EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index ebbf63c..b7cf246 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -2007,7 +2007,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
+ ret);
+ goto err_access;
+ }
+- dev_info(codec->dev, "revision %c\n", ret + 'A');
++ dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A');
+
+ snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 566acb3..acb7fac 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -611,7 +611,9 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ int err = -ENODEV;
+
+ down_read(&chip->shutdown_rwsem);
+- if (!chip->shutdown && !chip->probing)
++ if (chip->probing)
++ err = 0;
++ else if (!chip->shutdown)
+ err = usb_autopm_get_interface(chip->pm_intf);
+ up_read(&chip->shutdown_rwsem);
+
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 665e297..2b7559c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -73,6 +73,7 @@ struct snd_usb_substream {
+ unsigned int fill_max: 1; /* fill max packet size always */
+ unsigned int txfr_quirk:1; /* allow sub-frame alignment */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
++ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
+
+ unsigned int running: 1; /* running status */
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 9ab2b3e..5ebe8c4 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -458,7 +458,7 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
+ stride = runtime->frame_bits >> 3;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+- cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
++ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj;
+ if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
+ snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ // continue;
+@@ -898,6 +898,7 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->speed = snd_usb_get_speed(subs->dev);
+ if (subs->speed >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
++ subs->pkt_offset_adj = 0;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 34b9bb7..e5fee18 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -126,7 +126,6 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- bool autopm_reference;
+ unsigned int opened[2];
+ unsigned char disconnected;
+ unsigned char input_running;
+@@ -1040,7 +1039,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
+- int err;
+
+ down_read(&umidi->disc_rwsem);
+ if (umidi->disconnected) {
+@@ -1051,13 +1049,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ mutex_lock(&umidi->mutex);
+ if (open) {
+ if (!umidi->opened[0] && !umidi->opened[1]) {
+- err = usb_autopm_get_interface(umidi->iface);
+- umidi->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES) {
+- mutex_unlock(&umidi->mutex);
+- up_read(&umidi->disc_rwsem);
+- return -EIO;
+- }
+ if (umidi->roland_load_ctl) {
+ ctl = umidi->roland_load_ctl;
+ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+@@ -1080,8 +1071,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+ }
+- if (umidi->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
+@@ -2256,6 +2245,8 @@ int snd_usbmidi_create(struct snd_card *card,
+ return err;
+ }
+
++ usb_autopm_get_interface_no_resume(umidi->iface);
++
+ list_add_tail(&umidi->list, midi_list);
+ return 0;
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index dfbd65d..42eeee8 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -744,6 +744,7 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ break;
+ }
+ snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
++ subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
+ }
+
+ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 5ff8010..33a335b 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -168,6 +168,14 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
+ if (!csep && altsd->bNumEndpoints >= 2)
+ csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
+
++ /*
++ * If we can't locate the USB_DT_CS_ENDPOINT descriptor in the extra
++ * bytes after the first endpoint, go search the entire interface.
++ * Some devices have it directly *before* the standard endpoint.
++ */
++ if (!csep)
++ csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
++
+ if (!csep || csep->bLength < 7 ||
+ csep->bDescriptorSubtype != UAC_EP_GENERAL) {
+ snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"