summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.2.59/1057_linux-3.2.58.patch')
-rw-r--r--3.2.59/1057_linux-3.2.58.patch3567
1 files changed, 3567 insertions, 0 deletions
diff --git a/3.2.59/1057_linux-3.2.58.patch b/3.2.59/1057_linux-3.2.58.patch
new file mode 100644
index 0000000..db5723a
--- /dev/null
+++ b/3.2.59/1057_linux-3.2.58.patch
@@ -0,0 +1,3567 @@
+diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
+index b15e29f..90aae856 100644
+--- a/Documentation/video4linux/gspca.txt
++++ b/Documentation/video4linux/gspca.txt
+@@ -55,6 +55,7 @@ zc3xx 0458:700f Genius VideoCam Web V2
+ sonixj 0458:7025 Genius Eye 311Q
+ sn9c20x 0458:7029 Genius Look 320s
+ sonixj 0458:702e Genius Slim 310 NB
++sn9c20x 0458:7045 Genius Look 1320 V2
+ sn9c20x 0458:704a Genius Slim 1320
+ sn9c20x 0458:704c Genius i-Look 1321
+ sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
+diff --git a/Makefile b/Makefile
+index c92db9b..d59b394 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
+index 1d2ef5a..40736da 100644
+--- a/arch/alpha/lib/csum_partial_copy.c
++++ b/arch/alpha/lib/csum_partial_copy.c
+@@ -373,11 +373,6 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+ {
+- __wsum checksum;
+- mm_segment_t oldfs = get_fs();
+- set_fs(KERNEL_DS);
+- checksum = csum_partial_copy_from_user((__force const void __user *)src,
+- dst, len, sum, NULL);
+- set_fs(oldfs);
+- return checksum;
++ return csum_partial_copy_from_user((__force const void __user *)src,
++ dst, len, sum, NULL);
+ }
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 253cc86..aefd459 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -3,11 +3,6 @@
+
+ #ifdef __KERNEL__
+
+-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+-/* ARM doesn't provide unprivileged exclusive memory accessors */
+-#include <asm-generic/futex.h>
+-#else
+-
+ #include <linux/futex.h>
+ #include <linux/uaccess.h>
+ #include <asm/errno.h>
+@@ -163,6 +158,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ return ret;
+ }
+
+-#endif /* !(CPU_USE_DOMAINS && SMP) */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_ARM_FUTEX_H */
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 470457e..1cb80c4 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -123,6 +123,7 @@
+ #define L_PTE_USER (_AT(pteval_t, 1) << 8)
+ #define L_PTE_XN (_AT(pteval_t, 1) << 9)
+ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
++#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
+ /*
+ * These are the memory types, defined to be compatible with
+@@ -138,6 +139,7 @@
+ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
++#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
+ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+ #endif /* _ASM_PGTABLE_2LEVEL_H */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9b419ab..fcbac3c 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -74,7 +74,7 @@ extern pgprot_t pgprot_kernel;
+
+ #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+
+-#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
++#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
+ #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
+ #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
+ #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+@@ -84,7 +84,7 @@ extern pgprot_t pgprot_kernel;
+ #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
+ #define PAGE_KERNEL_EXEC pgprot_kernel
+
+-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
++#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
+ #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
+ #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+ #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+@@ -279,7 +279,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
++ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 67f75a0..4e1ef6e 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -458,7 +458,6 @@ config CPU_32v5
+ config CPU_32v6
+ bool
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+- select CPU_USE_DOMAINS if CPU_V6 && MMU
+
+ config CPU_32v6K
+ bool
+@@ -652,7 +651,7 @@ config ARM_THUMBEE
+
+ config SWP_EMULATE
+ bool "Emulate SWP/SWPB instructions"
+- depends on !CPU_USE_DOMAINS && CPU_V7
++ depends on CPU_V7
+ select HAVE_PROC_CPU if PROC_FS
+ default y if SMP
+ help
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 9e28fdb..082fa18 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -426,6 +426,14 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+ /*
++ * We don't use domains on ARMv6 (since this causes problems with
++ * v6/v7 kernels), so we must use a separate memory type for user
++ * r/o, kernel r/w to map the vectors page.
++ */
++ if (cpu_arch == CPU_ARCH_ARMv6)
++ vecs_pgprot |= L_PTE_MT_VECTORS;
++
++ /*
+ * ARMv6 and above have extended page tables.
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index 307a4de..8a3edd4 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -106,13 +106,9 @@
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+- * 110x 0 1 0 r/w r/o
+- * 11x0 0 1 0 r/w r/o
+- * 1111 0 1 1 r/w r/w
+- *
+- * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
++ * 1111 0 1 1 r/w r/w
+ */
+ .macro armv6_mt_table pfx
+ \pfx\()_mt_table:
+@@ -131,7 +127,7 @@
+ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+- .long 0x00 @ unused
++ .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
+ .endm
+
+ .macro armv6_set_pte_ext pfx
+@@ -152,20 +148,21 @@
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+ tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
++
++ @ user read-only -> kernel read-only
++ bicne r3, r3, #PTE_EXT_AP0
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+- orr r3, r3, r2
++ eor r3, r3, r2
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
++ tstne r1, #L_PTE_NONE
++ movne r3, #0
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 19d21ff..43c6981 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -160,17 +160,14 @@ ENTRY(cpu_v7_set_pte_ext)
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+- tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
++ eorne r1, r1, #L_PTE_NONE
++ tstne r1, #L_PTE_NONE
+ moveq r3, #0
+
+ ARM( str r3, [r0, #2048]! )
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index f8a751c..5bf34ec 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -44,6 +44,7 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
++ jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
+index 694158b..3a6528c 100644
+--- a/arch/sh/kernel/dumpstack.c
++++ b/arch/sh/kernel/dumpstack.c
+@@ -80,7 +80,7 @@ static int print_trace_stack(void *data, char *name)
+ */
+ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+- printk(data);
++ printk("%s", (char *)data);
+ printk_address(addr, reliable);
+ }
+
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 87537e2..88d442d 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -24,7 +24,7 @@ config SPARC
+ select HAVE_IRQ_WORK
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+- select HAVE_ARCH_JUMP_LABEL
++ select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_SHOW
+ select USE_GENERIC_SMP_HELPERS if SMP
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 3e1449f..6d6c731 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -267,8 +267,8 @@ extern long __strnlen_user(const char __user *, long len);
+
+ #define strlen_user __strlen_user
+ #define strnlen_user __strnlen_user
+-#define __copy_to_user_inatomic ___copy_to_user
+-#define __copy_from_user_inatomic ___copy_from_user
++#define __copy_to_user_inatomic __copy_to_user
++#define __copy_from_user_inatomic __copy_from_user
+
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index 31111e3..656b5b6 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -487,8 +487,8 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev,
+ pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
+ apb_calc_first_last(map, &first, &last);
+ res = bus->resource[1];
+- res->start = (first << 21);
+- res->end = (last << 21) + ((1 << 21) - 1);
++ res->start = (first << 29);
++ res->end = (last << 29) + ((1 << 29) - 1);
+ res->flags = IORESOURCE_MEM;
+ pci_resource_adjust(res, &pbm->mem_space);
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 817187d..557212c 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -184,7 +184,8 @@ linux_sparc_syscall32:
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
+- ba,a,pt %xcc, 3f
++ ba,pt %xcc, 3f
++ sra %o0, 0, %o0
+
+ /* Linux native system calls enter here... */
+ .align 32
+@@ -212,7 +213,6 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 646d192..1a3cf6e 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -18,6 +18,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/hyperv.h>
+ #include <asm/mshyperv.h>
++#include <asm/timer.h>
+
+ struct ms_hyperv_info ms_hyperv;
+ EXPORT_SYMBOL_GPL(ms_hyperv);
+@@ -70,6 +71,11 @@ static void __init ms_hyperv_init_platform(void)
+
+ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++
++#ifdef CONFIG_X86_IO_APIC
++ no_timer_check = 1;
++#endif
++
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ea69726..4ac4531 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -230,6 +230,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+ }
+
++ /*
++ * On x86-64 we do not support 16-bit segments due to
++ * IRET leaking the high bits of the kernel stack address.
++ */
++#ifdef CONFIG_X86_64
++ if (!ldt_info.seg_32bit) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a219c89..ec494ff 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2077,7 +2077,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+ if (!req->bio)
+ return false;
+
+- trace_block_rq_complete(req->q, req);
++ trace_block_rq_complete(req->q, req, nr_bytes);
+
+ /*
+ * For fs requests, rq is just carrier of independent bio's
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index 92ce302..263f899 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
+
+ static inline int read_all_bytes(struct si_sm_data *bt)
+ {
+- unsigned char i;
++ unsigned int i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
+index b3379d6..2c6b671 100644
+--- a/drivers/cpufreq/powernow-k6.c
++++ b/drivers/cpufreq/powernow-k6.c
+@@ -25,41 +25,108 @@
+ static unsigned int busfreq; /* FSB, in 10 kHz */
+ static unsigned int max_multiplier;
+
++static unsigned int param_busfreq = 0;
++static unsigned int param_max_multiplier = 0;
++
++module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
++MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
++
++module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
++MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+ /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+ static struct cpufreq_frequency_table clock_ratio[] = {
+- {45, /* 000 -> 4.5x */ 0},
++ {60, /* 110 -> 6.0x */ 0},
++ {55, /* 011 -> 5.5x */ 0},
+ {50, /* 001 -> 5.0x */ 0},
++ {45, /* 000 -> 4.5x */ 0},
+ {40, /* 010 -> 4.0x */ 0},
+- {55, /* 011 -> 5.5x */ 0},
+- {20, /* 100 -> 2.0x */ 0},
+- {30, /* 101 -> 3.0x */ 0},
+- {60, /* 110 -> 6.0x */ 0},
+ {35, /* 111 -> 3.5x */ 0},
++ {30, /* 101 -> 3.0x */ 0},
++ {20, /* 100 -> 2.0x */ 0},
+ {0, CPUFREQ_TABLE_END}
+ };
+
++static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
++static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
++
++static const struct {
++ unsigned freq;
++ unsigned mult;
++} usual_frequency_table[] = {
++ { 400000, 40 }, // 100 * 4
++ { 450000, 45 }, // 100 * 4.5
++ { 475000, 50 }, // 95 * 5
++ { 500000, 50 }, // 100 * 5
++ { 506250, 45 }, // 112.5 * 4.5
++ { 533500, 55 }, // 97 * 5.5
++ { 550000, 55 }, // 100 * 5.5
++ { 562500, 50 }, // 112.5 * 5
++ { 570000, 60 }, // 95 * 6
++ { 600000, 60 }, // 100 * 6
++ { 618750, 55 }, // 112.5 * 5.5
++ { 660000, 55 }, // 120 * 5.5
++ { 675000, 60 }, // 112.5 * 6
++ { 720000, 60 }, // 120 * 6
++};
++
++#define FREQ_RANGE 3000
+
+ /**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+- * Returns the current setting of the frequency multiplier. Core clock
++ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+ static int powernow_k6_get_cpu_multiplier(void)
+ {
+- u64 invalue = 0;
++ unsigned long invalue = 0;
+ u32 msrval;
+
++ local_irq_disable();
++
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+- return clock_ratio[(invalue >> 5)&7].index;
++ local_irq_enable();
++
++ return clock_ratio[register_to_index[(invalue >> 5)&7]].index;
+ }
+
++static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
++{
++ unsigned long outvalue, invalue;
++ unsigned long msrval;
++ unsigned long cr0;
++
++ /* we now need to transform best_i to the BVC format, see AMD#23446 */
++
++ /*
++ * The processor doesn't respond to inquiry cycles while changing the
++ * frequency, so we must disable cache.
++ */
++ local_irq_disable();
++ cr0 = read_cr0();
++ write_cr0(cr0 | X86_CR0_CD);
++ wbinvd();
++
++ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
++
++ msrval = POWERNOW_IOPORT + 0x1;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
++ invalue = inl(POWERNOW_IOPORT + 0x8);
++ invalue = invalue & 0x1f;
++ outvalue = outvalue | invalue;
++ outl(outvalue, (POWERNOW_IOPORT + 0x8));
++ msrval = POWERNOW_IOPORT + 0x0;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++
++ write_cr0(cr0);
++ local_irq_enable();
++}
+
+ /**
+ * powernow_k6_set_state - set the PowerNow! multiplier
+@@ -69,8 +136,6 @@ static int powernow_k6_get_cpu_multiplier(void)
+ */
+ static void powernow_k6_set_state(unsigned int best_i)
+ {
+- unsigned long outvalue = 0, invalue = 0;
+- unsigned long msrval;
+ struct cpufreq_freqs freqs;
+
+ if (clock_ratio[best_i].index > max_multiplier) {
+@@ -84,18 +149,7 @@ static void powernow_k6_set_state(unsigned int best_i)
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+- /* we now need to transform best_i to the BVC format, see AMD#23446 */
+-
+- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
+-
+- msrval = POWERNOW_IOPORT + 0x1;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+- invalue = inl(POWERNOW_IOPORT + 0x8);
+- invalue = invalue & 0xf;
+- outvalue = outvalue | invalue;
+- outl(outvalue , (POWERNOW_IOPORT + 0x8));
+- msrval = POWERNOW_IOPORT + 0x0;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++ powernow_k6_set_cpu_multiplier(best_i);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+@@ -140,18 +194,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
+ return 0;
+ }
+
+-
+ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ {
+ unsigned int i, f;
+ int result;
++ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+- /* get frequencies */
+- max_multiplier = powernow_k6_get_cpu_multiplier();
+- busfreq = cpu_khz / max_multiplier;
++ max_multiplier = 0;
++ khz = cpu_khz;
++ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
++ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
++ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
++ khz = usual_frequency_table[i].freq;
++ max_multiplier = usual_frequency_table[i].mult;
++ break;
++ }
++ }
++ if (param_max_multiplier) {
++ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
++ if (clock_ratio[i].index == param_max_multiplier) {
++ max_multiplier = param_max_multiplier;
++ goto have_max_multiplier;
++ }
++ }
++ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
++ return -EINVAL;
++ }
++
++ if (!max_multiplier) {
++ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
++ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
++ return -EOPNOTSUPP;
++ }
++
++have_max_multiplier:
++ param_max_multiplier = max_multiplier;
++
++ if (param_busfreq) {
++ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
++ busfreq = param_busfreq / 10;
++ goto have_busfreq;
++ }
++ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
++ return -EINVAL;
++ }
++
++ busfreq = khz / max_multiplier;
++have_busfreq:
++ param_busfreq = busfreq * 10;
+
+ /* table init */
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+@@ -163,7 +256,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.transition_latency = 200000;
++ policy->cpuinfo.transition_latency = 500000;
+ policy->cur = busfreq * max_multiplier;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
+diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
+index 385c58e..0f8114d 100644
+--- a/drivers/gpio/gpio-mxs.c
++++ b/drivers/gpio/gpio-mxs.c
+@@ -167,7 +167,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
+ ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port->id);
+
+- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
++ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
++ IRQ_NOREQUEST, 0);
+ }
+
+ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 2ea8a96..27999d9 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8946,6 +8946,12 @@ struct intel_quirk intel_quirks[] = {
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
++ /* Acer Aspire 4736Z */
++ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
++
++ /* Acer Aspire 5336 */
++ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
++
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 12041fa..b221f2b 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1599,9 +1599,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
+ /*
+ * If the device type is not TV, continue.
+ */
+- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+- p_child->device_type != DEVICE_TYPE_TV)
++ switch (p_child->device_type) {
++ case DEVICE_TYPE_INT_TV:
++ case DEVICE_TYPE_TV:
++ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
++ break;
++ default:
+ continue;
++ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 63e7143..3291ab8 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -738,6 +738,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
++ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 34e51a1..907c26f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ }
+
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
+- info->fix.line_length,
++ var->xres * var->bits_per_pixel/8,
+ var->yoffset + var->yres)) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ int ret;
+
++ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
++
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
++ vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 810658e..d01edf3 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -485,7 +485,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ /* Make sure all reads are done before we update the read index since */
+ /* the writer may start writing to the read area once the read index */
+ /*is updated */
+- smp_mb();
++ mb();
+
+ /* Update the read index */
+ hv_set_next_read_location(inring_info, next_read_location);
+diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
+index d9b0ebc..6eeb84d 100644
+--- a/drivers/infiniband/hw/ehca/ehca_cq.c
++++ b/drivers/infiniband/hw/ehca/ehca_cq.c
+@@ -296,6 +296,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
+ (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ ehca_err(device, "Copy to udata failed.");
++ cq = ERR_PTR(-EFAULT);
+ goto create_cq_exit4;
+ }
+ }
+diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
+index 714293b..e2f9a51 100644
+--- a/drivers/infiniband/hw/ipath/ipath_diag.c
++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ size_t count, loff_t *off)
+ {
+ u32 __iomem *piobuf;
+- u32 plen, clen, pbufn;
++ u32 plen, pbufn, maxlen_reserve;
+ struct ipath_diag_pkt odp;
+ struct ipath_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+@@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ u64 val;
+ u32 l_state, lt_state; /* LinkState, LinkTrainingState */
+
+- if (count < sizeof(odp)) {
+- ret = -EINVAL;
+- goto bail;
+- }
+
+ if (count == sizeof(dp)) {
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+- } else if (copy_from_user(&odp, data, sizeof(odp))) {
+- ret = -EFAULT;
++ } else if (count == sizeof(odp)) {
++ if (copy_from_user(&odp, data, sizeof(odp))) {
++ ret = -EFAULT;
++ goto bail;
++ }
++ } else {
++ ret = -EINVAL;
+ goto bail;
+ }
+
+- /*
+- * Due to padding/alignment issues (lessened with new struct)
+- * the old and new structs are the same length. We need to
+- * disambiguate them, which we can do because odp.len has never
+- * been less than the total of LRH+BTH+DETH so far, while
+- * dp.unit (same offset) unit is unlikely to get that high.
+- * Similarly, dp.data, the pointer to user at the same offset
+- * as odp.unit, is almost certainly at least one (512byte)page
+- * "above" NULL. The if-block below can be omitted if compatibility
+- * between a new driver and older diagnostic code is unimportant.
+- * compatibility the other direction (new diags, old driver) is
+- * handled in the diagnostic code, with a warning.
+- */
+- if (dp.unit >= 20 && dp.data < 512) {
+- /* very probable version mismatch. Fix it up */
+- memcpy(&odp, &dp, sizeof(odp));
+- /* We got a legacy dp, copy elements to dp */
+- dp.unit = odp.unit;
+- dp.data = odp.data;
+- dp.len = odp.len;
+- dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
+- }
+-
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+- clen = dp.len >> 2;
++ plen = dp.len >> 2;
+
+ dd = ipath_lookup(dp.unit);
+ if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
+@@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ goto bail;
+ }
+
+- /* need total length before first word written */
+- /* +1 word is for the qword padding */
+- plen = sizeof(u32) + dp.len;
+-
+- if ((plen + 4) > dd->ipath_ibmaxlen) {
++ /*
++ * need total length before first word written, plus 2 Dwords. One Dword
++ * is for padding so we get the full user data when not aligned on
++ * a word boundary. The other Dword is to make sure we have room for the
++ * ICRC which gets tacked on later.
++ */
++ maxlen_reserve = 2 * sizeof(u32);
++ if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
+ ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
+- plen - 4, dd->ipath_ibmaxlen);
++ dp.len, dd->ipath_ibmaxlen);
+ ret = -EINVAL;
+- goto bail; /* before writing pbc */
++ goto bail;
+ }
++
++ plen = sizeof(u32) + dp.len;
++
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
+@@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ */
+ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
+ ipath_flush_wc();
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
+ ipath_flush_wc();
+- __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
++ __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
+ } else
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen);
+
+ ipath_flush_wc();
+
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index 5b71d43..42dde06 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+
+ if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
+ mthca_free_cq(to_mdev(ibdev), cq);
++ err = -EFAULT;
+ goto err_free;
+ }
+
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index b0471b4..330eb6e 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1183,7 +1183,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+- return NULL;
++ return ERR_PTR(-EFAULT);
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 4df80fb..75ca5d2 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
+ static void
+ isdnloop_fake_err(isdnloop_card * card)
+ {
+- char buf[60];
++ char buf[64];
+
+- sprintf(buf, "E%s", card->omsg);
++ snprintf(buf, sizeof(buf), "E%s", card->omsg);
+ isdnloop_fake(card, buf, -1);
+ isdnloop_fake(card, "NAK", -1);
+ }
+@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card * card)
+ case 7:
+ /* 0x;EAZ */
+ p += 3;
++ if (strlen(p) >= sizeof(card->eazlist[0]))
++ break;
+ strcpy(card->eazlist[ch - 1], p);
+ break;
+ case 8:
+@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ return -EBUSY;
+ if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
+ return -EFAULT;
++
++ for (i = 0; i < 3; i++) {
++ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
++ return -EINVAL;
++ }
++
+ spin_lock_irqsave(&card->isdnloop_lock, flags);
+ switch (sdef.ptype) {
+ case ISDN_PTYPE_EURO:
+@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ {
+ ulong a;
+ int i;
+- char cbuf[60];
++ char cbuf[80];
+ isdn_ctrl cmd;
+ isdnloop_cdef cdef;
+
+@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ break;
+ if ((c->arg & 255) < ISDNLOOP_BCH) {
+ char *p;
+- char dial[50];
+ char dcode[4];
+
+ a = c->arg;
+@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ } else
+ /* Normal Dial */
+ strcpy(dcode, "CAL");
+- strcpy(dial, p);
+- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+- dcode, dial, c->parm.setup.si1,
+- c->parm.setup.si2, c->parm.setup.eazmsn);
++ snprintf(cbuf, sizeof(cbuf),
++ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
++ dcode, p, c->parm.setup.si1,
++ c->parm.setup.si2, c->parm.setup.eazmsn);
+ i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
+ }
+ break;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 2c9dd2c..80f8bd5 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1298,9 +1298,9 @@ static void process_deferred_bios(struct pool *pool)
+ */
+ if (ensure_next_mapping(pool)) {
+ spin_lock_irqsave(&pool->lock, flags);
++ bio_list_add(&pool->deferred_bios, bio);
+ bio_list_merge(&pool->deferred_bios, &bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+-
+ break;
+ }
+ process_bio(tc, bio);
+diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
+index 86e07a1..509e202 100644
+--- a/drivers/media/video/gspca/sn9c20x.c
++++ b/drivers/media/video/gspca/sn9c20x.c
+@@ -2521,6 +2521,7 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
+ {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
++ {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
+diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
+index b015e8e..af5c040 100644
+--- a/drivers/media/video/uvc/uvc_video.c
++++ b/drivers/media/video/uvc/uvc_video.c
+@@ -1267,7 +1267,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
+
+ if (!enable) {
+ uvc_uninit_video(stream, 1);
+- usb_set_interface(stream->dev->udev, stream->intfnum, 0);
++ if (stream->intf->num_altsetting > 1) {
++ usb_set_interface(stream->dev->udev,
++ stream->intfnum, 0);
++ } else {
++ /* UVC doesn't specify how to inform a bulk-based device
++ * when the video stream is stopped. Windows sends a
++ * CLEAR_FEATURE(HALT) request to the video streaming
++ * bulk endpoint, mimic the same behaviour.
++ */
++ unsigned int epnum = stream->header.bEndpointAddress
++ & USB_ENDPOINT_NUMBER_MASK;
++ unsigned int dir = stream->header.bEndpointAddress
++ & USB_ENDPOINT_DIR_MASK;
++ unsigned int pipe;
++
++ pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
++ usb_clear_halt(stream->dev->udev, pipe);
++ }
++
+ uvc_queue_enable(&stream->queue, 0);
+ return 0;
+ }
+diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
+index e017dc8..f035dd3 100644
+--- a/drivers/mfd/88pm860x-i2c.c
++++ b/drivers/mfd/88pm860x-i2c.c
+@@ -290,6 +290,12 @@ static int __devinit pm860x_probe(struct i2c_client *client,
+ chip->companion_addr = pdata->companion_addr;
+ chip->companion = i2c_new_dummy(chip->client->adapter,
+ chip->companion_addr);
++ if (!chip->companion) {
++ dev_err(&client->dev,
++ "Failed to allocate I2C companion device\n");
++ kfree(chip);
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->companion, chip);
+ }
+
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index f1391c2..b2b6916 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -772,9 +772,6 @@ config MFD_INTEL_MSIC
+ Passage) chip. This chip embeds audio, battery, GPIO, etc.
+ devices used in Intel Medfield platforms.
+
+-endmenu
+-endif
+-
+ menu "Multimedia Capabilities Port drivers"
+ depends on ARCH_SA1100
+
+@@ -797,3 +794,6 @@ config MCP_UCB1200_TS
+ depends on MCP_UCB1200 && INPUT
+
+ endmenu
++
++endmenu
++endif
+diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
+index 0219115..90b450c 100644
+--- a/drivers/mfd/max8925-i2c.c
++++ b/drivers/mfd/max8925-i2c.c
+@@ -156,9 +156,18 @@ static int __devinit max8925_probe(struct i2c_client *client,
+ mutex_init(&chip->io_lock);
+
+ chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
++ if (!chip->rtc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->rtc, chip);
+
+ chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
++ if (!chip->adc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
++ i2c_unregister_device(chip->rtc);
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->adc, chip);
+
+ max8925_device_init(chip, pdata);
+diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
+index 5be53ae..1926a54 100644
+--- a/drivers/mfd/max8997.c
++++ b/drivers/mfd/max8997.c
+@@ -148,10 +148,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8997->iolock);
+
+ max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
++ if (!max8997->rtc) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8997->rtc, max8997);
++
+ max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
++ if (!max8997->haptic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
++ ret = -ENODEV;
++ goto err_i2c_haptic;
++ }
+ i2c_set_clientdata(max8997->haptic, max8997);
++
+ max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
++ if (!max8997->muic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
++ ret = -ENODEV;
++ goto err_i2c_muic;
++ }
+ i2c_set_clientdata(max8997->muic, max8997);
+
+ pm_runtime_set_active(max8997->dev);
+@@ -178,7 +194,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ err_mfd:
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
++err_i2c_muic:
+ i2c_unregister_device(max8997->haptic);
++err_i2c_haptic:
+ i2c_unregister_device(max8997->rtc);
+ err:
+ kfree(max8997);
+diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
+index de4096a..2fa6a28 100644
+--- a/drivers/mfd/max8998.c
++++ b/drivers/mfd/max8998.c
+@@ -152,6 +152,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8998->iolock);
+
+ max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
++ if (!max8998->rtc) {
++ dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8998->rtc, max8998);
+
+ max8998_irq_init(max8998);
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 2e88af1..a0ba5ac 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1390,7 +1390,7 @@ int ath_cabq_update(struct ath_softc *sc)
+ else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
+ sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
+
+- qi.tqi_readyTime = (cur_conf->beacon_interval *
++ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
+ sc->config.cabqReadytime) / 100;
+ ath_txq_update(sc, qnum, &qi);
+
+diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
+index b17d9b6..0490c7c 100644
+--- a/drivers/net/wireless/b43/phy_n.c
++++ b/drivers/net/wireless/b43/phy_n.c
+@@ -3937,22 +3937,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 old_band_5ghz;
+- u32 tmp32;
++ u16 tmp16;
+
+ old_band_5ghz =
+ b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 94d35ad..4a36973 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -246,13 +246,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, bt_runtime_config);
+
++ mutex_lock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+- return;
++ goto out;
+
+ /* dont send host command if rf-kill is on */
+ if (!iwl_is_ready_rf(priv->shrd))
+- return;
++ goto out;
++
+ iwlagn_send_advance_bt_config(priv);
++out:
++ mutex_unlock(&priv->shrd->mutex);
+ }
+
+ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+index c474486..503c160 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+@@ -924,7 +924,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 tmp_byte = 0;
+-
++ unsigned long flags;
+ bool rtstatus = true;
+ u8 tmp_u1b;
+ int err = false;
+@@ -936,6 +936,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpci->being_init_adapter = true;
+
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ /* 1. MAC Initialize */
+@@ -969,7 +979,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
+ if (rtl92s_phy_mac_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("MAC Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* Make sure BB/RF write OK. We should prevent enter IPS. radio off. */
+@@ -979,7 +990,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
+ if (rtl92s_phy_bb_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("BB Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
+@@ -1015,7 +1027,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ if (rtl92s_phy_rf_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("RF Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* After read predefined TXT, we must set BB/MAC/RF
+@@ -1089,8 +1102,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
+ rtl92s_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpci->being_init_adapter = false;
+-
+ return err;
+ }
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 9a4626c..b2528f6 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -346,8 +346,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+ * into multiple copies tend to give large frags their
+ * own buffers as before.
+ */
+- if ((offset + size > MAX_BUFFER_OFFSET) &&
+- (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ BUG_ON(size > MAX_BUFFER_OFFSET);
++ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ return true;
+
+ return false;
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index 21a6769..38a99d2 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -610,6 +610,13 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
++ u32 port_state = iport->sm.current_state_id;
++
++ /* if the PORT'S state is resetting then the link up is from
++ * port hard reset in this case, we need to tell the port
++ * that link up is recieved
++ */
++ BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 60a530f..e294d11 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1390,7 +1390,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+- ret = -ENODEV;
++ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
+index c44e41a..c2731ca 100644
+--- a/drivers/staging/serqt_usb2/serqt_usb2.c
++++ b/drivers/staging/serqt_usb2/serqt_usb2.c
+@@ -772,7 +772,7 @@ static int qt_startup(struct usb_serial *serial)
+ goto startup_error;
+ }
+
+- switch (serial->dev->descriptor.idProduct) {
++ switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
+ case QUATECH_DSU100:
+ case QUATECH_QSU100:
+ case QUATECH_ESU100A:
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 4a88eea..ab5dd16 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2358,6 +2358,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ {
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
++ bool found = false;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+@@ -2366,11 +2367,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
++ found = true;
+ break;
+ }
+ }
+
+- if (!conn_p)
++ if (!found)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index ab0a3fa..b328011 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -72,6 +72,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+
+ if (tport) {
+ tport->tpg = tpg;
++ tpg->tport = tport;
+ return tport;
+ }
+
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index b6b2d18..7b97e7e 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -31,6 +31,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/major.h>
++#include <linux/atomic.h>
+ #include <linux/sysrq.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
+@@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
+ /* Picks up late kicks after list walk but before schedule() */
+ static int hvc_kicked;
+
++/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
++static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
++
+ static int hvc_init(void);
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+@@ -825,7 +829,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
+ int i;
+
+ /* We wait until a driver actually comes along */
+- if (!hvc_driver) {
++ if (atomic_inc_not_zero(&hvc_needs_init)) {
+ int err = hvc_init();
+ if (err)
+ return ERR_PTR(err);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3f35e42..446df6b 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1222,9 +1222,9 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
+ *
+ * Locking: None
+ */
+-static void tty_line_name(struct tty_driver *driver, int index, char *p)
++static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+ {
+- sprintf(p, "%s%d", driver->name, index + driver->name_base);
++ return sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ }
+
+ /**
+@@ -3321,9 +3321,19 @@ static ssize_t show_cons_active(struct device *dev,
+ if (i >= ARRAY_SIZE(cs))
+ break;
+ }
+- while (i--)
+- count += sprintf(buf + count, "%s%d%c",
+- cs[i]->name, cs[i]->index, i ? ' ':'\n');
++ while (i--) {
++ int index = cs[i]->index;
++ struct tty_driver *drv = cs[i]->device(cs[i], &index);
++
++ /* don't resolve tty0 as some programs depend on it */
++ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
++ count += tty_line_name(drv, index, buf + count);
++ else
++ count += sprintf(buf + count, "%s%d",
++ cs[i]->name, cs[i]->index);
++
++ count += sprintf(buf + count, "%c", i ? ' ':'\n');
++ }
+ console_unlock();
+
+ return count;
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 4795c0c..ae2b763 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -716,15 +716,15 @@ struct dwc3_event_depevt {
+ * 12 - VndrDevTstRcved
+ * @reserved15_12: Reserved, not used
+ * @event_info: Information about this event
+- * @reserved31_24: Reserved, not used
++ * @reserved31_25: Reserved, not used
+ */
+ struct dwc3_event_devt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 type:4;
+ u32 reserved15_12:4;
+- u32 event_info:8;
+- u32 reserved31_24:8;
++ u32 event_info:9;
++ u32 reserved31_25:7;
+ } __packed;
+
+ /**
+diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
+index 271a9d8..b299c32 100644
+--- a/drivers/usb/gadget/atmel_usba_udc.c
++++ b/drivers/usb/gadget/atmel_usba_udc.c
+@@ -1875,12 +1875,13 @@ static int atmel_usba_stop(struct usb_gadget_driver *driver)
+
+ driver->unbind(&udc->gadget);
+ udc->gadget.dev.driver = NULL;
+- udc->driver = NULL;
+
+ clk_disable(udc->hclk);
+ clk_disable(udc->pclk);
+
+- DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
++ DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
++
++ udc->driver = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 5c58128..7ef84c1 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -319,9 +319,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ r = -ENOBUFS;
+ goto err;
+ }
+- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
++ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num);
++ if (unlikely(r < 0))
++ goto err;
++
++ d = r;
+ if (d == vq->num) {
+ r = 0;
+ goto err;
+@@ -346,6 +350,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ *iovcount = seg;
+ if (unlikely(log))
+ *log_num = nlogs;
++
++ /* Detect overrun */
++ if (unlikely(datalen > 0)) {
++ r = UIO_MAXIOV + 1;
++ goto err;
++ }
+ return headcount;
+ err:
+ vhost_discard_vq_desc(vq, headcount);
+@@ -400,6 +410,14 @@ static void handle_rx(struct vhost_net *net)
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ break;
++ /* On overrun, truncate and discard */
++ if (unlikely(headcount > UIO_MAXIOV)) {
++ msg.msg_iovlen = 1;
++ err = sock->ops->recvmsg(NULL, sock, &msg,
++ 1, MSG_DONTWAIT | MSG_TRUNC);
++ pr_debug("Discarded rx packet: len %zd\n", sock_len);
++ continue;
++ }
+ /* OK, now we need to know about added descriptors. */
+ if (!headcount) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
+index e45833c..182bd68 100644
+--- a/drivers/video/aty/mach64_accel.c
++++ b/drivers/video/aty/mach64_accel.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <asm/unaligned.h>
+ #include <linux/fb.h>
+ #include <video/mach64.h>
+ #include "atyfb.h"
+@@ -419,7 +420,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ u32 *pbitmap, dwords = (src_bytes + 3) / 4;
+ for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) {
+ wait_for_fifo(1, par);
+- aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par);
++ aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
+ }
+ }
+
+diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
+index 46f72ed..4b87318 100644
+--- a/drivers/video/aty/mach64_cursor.c
++++ b/drivers/video/aty/mach64_cursor.c
+@@ -5,6 +5,7 @@
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/string.h>
++#include "../fb_draw.h"
+
+ #include <asm/io.h>
+
+@@ -157,24 +158,33 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
++ u16 l = 0xaaaa;
+ b = *src++;
+ m = *msk++;
+ switch (cursor->rop) {
+ case ROP_XOR:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
++ l = cursor_bits_lookup[(b ^ m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b ^ m) & 0x0f] << 8);
+ break;
+ case ROP_COPY:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
++ l = cursor_bits_lookup[(b & m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b & m) & 0x0f] << 8);
+ break;
+ }
++ /*
++ * If cursor size is not a multiple of 8 characters
++ * we must pad it with transparent pattern (0xaaaa).
++ */
++ if ((j + 1) * 8 > cursor->image.width) {
++ l = comp(l, 0xaaaa,
++ (1 << ((cursor->image.width & 7) * 2)) - 1);
++ }
++ fb_writeb(l & 0xff, dst++);
++ fb_writeb(l >> 8, dst++);
+ }
+ dst += offset;
+ }
+diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
+index bb5a96b..bcb5723 100644
+--- a/drivers/video/cfbcopyarea.c
++++ b/drivers/video/cfbcopyarea.c
+@@ -43,13 +43,22 @@
+ */
+
+ static void
+-bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int const shift = dst_idx-src_idx;
+- int left, right;
++
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
+
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
+@@ -98,9 +107,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- right = shift & (bits - 1);
+- left = -shift & (bits - 1);
+- bswapmask &= shift;
++ int const left = shift & (bits - 1);
++ int const right = -shift & (bits - 1);
+
+ if (dst_idx+n <= bits) {
+ // Single destination word
+@@ -110,15 +118,15 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ if (shift > 0) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else if (src_idx+n <= bits) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src + 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -135,60 +143,59 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift > 0) {
+ // Single source word
+ d1 = d0;
+- d0 >>= right;
+- dst++;
++ d0 <<= left;
+ n -= bits - dst_idx;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+
+- d0 = d0<<left | d1>>right;
+- dst++;
++ d0 = d0 >> right | d1 << left;
+ n -= bits - dst_idx;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+ d0 = d1;
++ dst++;
+
+ // Main chunk
+ m = n % bits;
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 << left | d1 >> right;
++ d0 = d0 >> right | d1 << left;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst++);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= right) {
++ if (m) {
++ if (m <= bits - right) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -202,43 +209,46 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ */
+
+ static void
+-bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int shift;
+
+- dst += (n-1)/bits;
+- src += (n-1)/bits;
+- if ((n-1) % bits) {
+- dst_idx += (n-1) % bits;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= bits - 1;
+- src_idx += (n-1) % bits;
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= bits - 1;
+- }
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
++
++ dst += (dst_idx + n - 1) / bits;
++ src += (src_idx + n - 1) / bits;
++ dst_idx = (dst_idx + n - 1) % bits;
++ src_idx = (src_idx + n - 1) % bits;
+
+ shift = dst_idx-src_idx;
+
+- first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
+- last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
+- bswapmask);
++ first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
++ last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
+
+ if (!shift) {
+ // Same alignment for source and dest
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single word
+- if (last)
+- first &= last;
+- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
++ if (first)
++ last &= first;
++ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+
+ // Leading bits
+- if (first != ~0UL) {
++ if (first) {
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
+ dst--;
+ src--;
+@@ -262,7 +272,7 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ FB_WRITEL(FB_READL(src--), dst--);
+
+ // Trailing bits
+- if (last)
++ if (last != -1UL)
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ }
+ } else {
+@@ -270,29 +280,28 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- int const left = -shift & (bits-1);
+- int const right = shift & (bits-1);
+- bswapmask &= shift;
++ int const left = shift & (bits-1);
++ int const right = -shift & (bits-1);
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single destination word
+- if (last)
+- first &= last;
++ if (first)
++ last &= first;
+ d0 = FB_READL(src);
+ if (shift < 0) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else if (1+(unsigned long)src_idx >= n) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src - 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
++ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+ /** We must always remember the last value read, because in case
+@@ -307,12 +316,12 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift < 0) {
+ // Single source word
+ d1 = d0;
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -325,39 +334,39 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 >> right | d1 << left;
++ d0 = d0 << left | d1 >> right;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst--);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= left) {
++ if (m) {
++ if (m <= bits - left) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -371,9 +380,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 height = area->height, width = area->width;
+ unsigned long const bits_per_line = p->fix.line_length*8u;
+- unsigned long __iomem *dst = NULL, *src = NULL;
++ unsigned long __iomem *base = NULL;
+ int bits = BITS_PER_LONG, bytes = bits >> 3;
+- int dst_idx = 0, src_idx = 0, rev_copy = 0;
++ unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
+ u32 bswapmask = fb_compute_bswapmask(p);
+
+ if (p->state != FBINFO_STATE_RUNNING)
+@@ -389,7 +398,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+
+ // split the base of the framebuffer into a long-aligned address and the
+ // index of the first bit
+- dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
++ base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
+ dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
+ // add offset of source and target area
+ dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
+@@ -402,20 +411,14 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ while (height--) {
+ dst_idx -= bits_per_line;
+ src_idx -= bits_per_line;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ }
+ } else {
+ while (height--) {
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ dst_idx += bits_per_line;
+ src_idx += bits_per_line;
+diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
+index 8335a6f..0d5cb85 100644
+--- a/drivers/video/matrox/matroxfb_accel.c
++++ b/drivers/video/matrox/matroxfb_accel.c
+@@ -192,10 +192,18 @@ void matrox_cfbX_init(struct matrox_fb_info *minfo)
+ minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
+ if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC;
+ minfo->accel.m_opmode = mopmode;
++ minfo->accel.m_access = maccess;
++ minfo->accel.m_pitch = mpitch;
+ }
+
+ EXPORT_SYMBOL(matrox_cfbX_init);
+
++static void matrox_accel_restore_maccess(struct matrox_fb_info *minfo)
++{
++ mga_outl(M_MACCESS, minfo->accel.m_access);
++ mga_outl(M_PITCH, minfo->accel.m_pitch);
++}
++
+ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ int sx, int dy, int dx, int height, int width)
+ {
+@@ -207,7 +215,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -215,7 +224,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -224,7 +234,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(4);
++ mga_fifo(6);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -246,7 +257,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -254,7 +266,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -263,7 +276,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -298,7 +312,8 @@ static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color,
+
+ CRITBEGIN
+
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE);
+ mga_outl(M_FCOL, color);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -341,7 +356,8 @@ static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx,
+ width >>= 1;
+ sx >>= 1;
+ if (width) {
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2);
+ mga_outl(M_FCOL, bgx);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -415,7 +431,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+
+ CRITBEGIN
+
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ if (easy)
+ mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE);
+ else
+@@ -425,7 +442,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+ fxbndry = ((xx + width - 1) << 16) | xx;
+ mmio = minfo->mmio.vbase;
+
+- mga_fifo(6);
++ mga_fifo(8);
++ matrox_accel_restore_maccess(minfo);
+ mga_writel(mmio, M_FXBNDRY, fxbndry);
+ mga_writel(mmio, M_AR0, ar0);
+ mga_writel(mmio, M_AR3, 0);
+diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
+index 11ed57b..556d96c 100644
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -307,6 +307,8 @@ struct matrox_accel_data {
+ #endif
+ u_int32_t m_dwg_rect;
+ u_int32_t m_opmode;
++ u_int32_t m_access;
++ u_int32_t m_pitch;
+ };
+
+ struct v4l2_queryctrl;
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index aba7686..ac2cf6d 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -1146,222 +1146,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy,
+ __raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+ }
+
+-/* The general case of forward copy in 8bpp mode. */
++/* The (almost) general case of backward copy in 8bpp mode. */
+ static inline void
+-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length)
++copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
++ u32 height, u32 width, u32 line_length,
++ const struct fb_copyarea *area)
+ {
+ struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, copied, left;
+- unsigned long dpos, spos, dalign, salign, yincr;
+- u32 smask_first, dmask_first, dmask_last;
+- int pixel_shift, need_prime, need_second;
+- unsigned long n64, n32, xincr_first;
++ unsigned i, yincr;
++ int depos, sepos, backward, last_step, step;
++ u32 mask_last;
++ unsigned n32;
+ void __iomem *tga_regs;
+ void __iomem *tga_fb;
+
+- yincr = line_length;
+- if (dy > sy) {
+- dy += height - 1;
+- sy += height - 1;
+- yincr = -yincr;
+- }
+-
+- /* Compute the offsets and alignments in the frame buffer.
+- More than anything else, these control how we do copies. */
+- dpos = dy * line_length + dx;
+- spos = sy * line_length + sx;
+- dalign = dpos & 7;
+- salign = spos & 7;
+- dpos &= -8;
+- spos &= -8;
+-
+- /* Compute the value for the PIXELSHIFT register. This controls
+- both non-co-aligned source and destination and copy direction. */
+- if (dalign >= salign)
+- pixel_shift = dalign - salign;
+- else
+- pixel_shift = 8 - (salign - dalign);
+-
+- /* Figure out if we need an additional priming step for the
+- residue register. */
+- need_prime = (salign > dalign);
+- if (need_prime)
+- dpos -= 8;
+-
+- /* Begin by copying the leading unaligned destination. Copy enough
+- to make the next destination address 32-byte aligned. */
+- copied = 32 - (dalign + (dpos & 31));
+- if (copied == 32)
+- copied = 0;
+- xincr_first = (copied + 7) & -8;
+- smask_first = dmask_first = (1ul << copied) - 1;
+- smask_first <<= salign;
+- dmask_first <<= dalign + need_prime*8;
+- if (need_prime && copied > 24)
+- copied -= 8;
+- left = width - copied;
+-
+- /* Care for small copies. */
+- if (copied > width) {
+- u32 t;
+- t = (1ul << width) - 1;
+- t <<= dalign + need_prime*8;
+- dmask_first &= t;
+- left = 0;
+- }
+-
+- /* Attempt to use 64-byte copies. This is only possible if the
+- source and destination are co-aligned at 64 bytes. */
+- n64 = need_second = 0;
+- if ((dpos & 63) == (spos & 63)
+- && (height == 1 || line_length % 64 == 0)) {
+- /* We may need a 32-byte copy to ensure 64 byte alignment. */
+- need_second = (dpos + xincr_first) & 63;
+- if ((need_second & 32) != need_second)
+- printk(KERN_ERR "tgafb: need_second wrong\n");
+- if (left >= need_second + 64) {
+- left -= need_second;
+- n64 = left / 64;
+- left %= 64;
+- } else
+- need_second = 0;
+- }
+-
+- /* Copy trailing full 32-byte sections. This will be the main
+- loop if the 64 byte loop can't be used. */
+- n32 = left / 32;
+- left %= 32;
+-
+- /* Copy the trailing unaligned destination. */
+- dmask_last = (1ul << left) - 1;
+-
+- tga_regs = par->tga_regs_base;
+- tga_fb = par->tga_fb_base;
+-
+- /* Set up the MODE and PIXELSHIFT registers. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
+- __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
+- wmb();
+-
+- for (i = 0; i < height; ++i) {
+- unsigned long j;
+- void __iomem *sfb;
+- void __iomem *dfb;
+-
+- sfb = tga_fb + spos;
+- dfb = tga_fb + dpos;
+- if (dmask_first) {
+- __raw_writel(smask_first, sfb);
+- wmb();
+- __raw_writel(dmask_first, dfb);
+- wmb();
+- sfb += xincr_first;
+- dfb += xincr_first;
+- }
+-
+- if (need_second) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
+- printk(KERN_ERR
+- "tgafb: misaligned copy64 (s:%p, d:%p)\n",
+- sfb, dfb);
+-
+- for (j = 0; j < n64; ++j) {
+- __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+- wmb();
+- __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+- wmb();
+- sfb += 64;
+- dfb += 64;
+- }
+-
+- for (j = 0; j < n32; ++j) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (dmask_last) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(dmask_last, dfb);
+- wmb();
+- }
+-
+- spos += yincr;
+- dpos += yincr;
++ /* Do acceleration only if we are aligned on 8 pixels */
++ if ((dx | sx | width) & 7) {
++ cfb_copyarea(info, area);
++ return;
+ }
+
+- /* Reset the MODE register to normal. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+-}
+-
+-/* The (almost) general case of backward copy in 8bpp mode. */
+-static inline void
+-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length,
+- const struct fb_copyarea *area)
+-{
+- struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, left, yincr;
+- unsigned long depos, sepos, dealign, sealign;
+- u32 mask_first, mask_last;
+- unsigned long n32;
+- void __iomem *tga_regs;
+- void __iomem *tga_fb;
+-
+ yincr = line_length;
+ if (dy > sy) {
+ dy += height - 1;
+ sy += height - 1;
+ yincr = -yincr;
+ }
++ backward = dy == sy && dx > sx && dx < sx + width;
+
+ /* Compute the offsets and alignments in the frame buffer.
+ More than anything else, these control how we do copies. */
+- depos = dy * line_length + dx + width;
+- sepos = sy * line_length + sx + width;
+- dealign = depos & 7;
+- sealign = sepos & 7;
+-
+- /* ??? The documentation appears to be incorrect (or very
+- misleading) wrt how pixel shifting works in backward copy
+- mode, i.e. when PIXELSHIFT is negative. I give up for now.
+- Do handle the common case of co-aligned backward copies,
+- but frob everything else back on generic code. */
+- if (dealign != sealign) {
+- cfb_copyarea(info, area);
+- return;
+- }
+-
+- /* We begin the copy with the trailing pixels of the
+- unaligned destination. */
+- mask_first = (1ul << dealign) - 1;
+- left = width - dealign;
+-
+- /* Care for small copies. */
+- if (dealign > width) {
+- mask_first ^= (1ul << (dealign - width)) - 1;
+- left = 0;
+- }
++ depos = dy * line_length + dx;
++ sepos = sy * line_length + sx;
++ if (backward)
++ depos += width, sepos += width;
+
+ /* Next copy full words at a time. */
+- n32 = left / 32;
+- left %= 32;
++ n32 = width / 32;
++ last_step = width % 32;
+
+ /* Finally copy the unaligned head of the span. */
+- mask_last = -1 << (32 - left);
++ mask_last = (1ul << last_step) - 1;
++
++ if (!backward) {
++ step = 32;
++ last_step = 32;
++ } else {
++ step = -32;
++ last_step = -last_step;
++ sepos -= 32;
++ depos -= 32;
++ }
+
+ tga_regs = par->tga_regs_base;
+ tga_fb = par->tga_fb_base;
+@@ -1378,25 +1213,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+
+ sfb = tga_fb + sepos;
+ dfb = tga_fb + depos;
+- if (mask_first) {
+- __raw_writel(mask_first, sfb);
+- wmb();
+- __raw_writel(mask_first, dfb);
+- wmb();
+- }
+
+- for (j = 0; j < n32; ++j) {
+- sfb -= 32;
+- dfb -= 32;
++ for (j = 0; j < n32; j++) {
++ if (j < 2 && j + 1 < n32 && !backward &&
++ !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
++ do {
++ __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
++ wmb();
++ __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
++ wmb();
++ sfb += 64;
++ dfb += 64;
++ j += 2;
++ } while (j + 1 < n32);
++ j--;
++ continue;
++ }
+ __raw_writel(0xffffffff, sfb);
+ wmb();
+ __raw_writel(0xffffffff, dfb);
+ wmb();
++ sfb += step;
++ dfb += step;
+ }
+
+ if (mask_last) {
+- sfb -= 32;
+- dfb -= 32;
++ sfb += last_step - step;
++ dfb += last_step - step;
+ __raw_writel(mask_last, sfb);
+ wmb();
+ __raw_writel(mask_last, dfb);
+@@ -1457,14 +1300,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+ else if (bpp == 32)
+ cfb_copyarea(info, area);
+
+- /* Detect overlapping source and destination that requires
+- a backward copy. */
+- else if (dy == sy && dx > sx && dx < sx + width)
+- copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length, area);
+ else
+- copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length);
++ copyarea_8bpp(info, dx, dy, sx, sy, height,
++ width, line_length, area);
+ }
+
+
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 94fd738..28153fb 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -271,6 +271,12 @@ static int balloon(void *_vballoon)
+ else if (diff < 0)
+ leak_balloon(vb, -diff);
+ update_balloon_size(vb);
++
++ /*
++ * For large balloon changes, we could spend a lot of time
++ * and always have work to do. Be nice if preempt disabled.
++ */
++ cond_resched();
+ }
+ return 0;
+ }
+diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
+index 40788c9..73705af 100644
+--- a/drivers/w1/w1_netlink.c
++++ b/drivers/w1/w1_netlink.c
+@@ -54,28 +54,29 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
+ struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
+ int avail;
++ u64 *data;
+
+ /* update kernel slave list */
+ w1_slave_found(dev, rn);
+
+ avail = dev->priv_size - cmd->len;
+
+- if (avail > 8) {
+- u64 *data = (void *)(cmd + 1) + cmd->len;
++ if (avail < 8) {
++ msg->ack++;
++ cn_netlink_send(msg, 0, GFP_KERNEL);
+
+- *data = rn;
+- cmd->len += 8;
+- hdr->len += 8;
+- msg->len += 8;
+- return;
++ msg->len = sizeof(struct w1_netlink_msg) +
++ sizeof(struct w1_netlink_cmd);
++ hdr->len = sizeof(struct w1_netlink_cmd);
++ cmd->len = 0;
+ }
+
+- msg->ack++;
+- cn_netlink_send(msg, 0, GFP_KERNEL);
++ data = (void *)(cmd + 1) + cmd->len;
+
+- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+- hdr->len = sizeof(struct w1_netlink_cmd);
+- cmd->len = 0;
++ *data = rn;
++ cmd->len += 8;
++ hdr->len += 8;
++ msg->len += 8;
+ }
+
+ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 6b2a724..cfdf6fe 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2731,6 +2731,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+ /* send down all the barriers */
+ head = &info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors++;
+ continue;
+@@ -2745,6 +2747,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+
+ /* wait for all the barriers */
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors++;
+ continue;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 81376d9..292e847 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -460,7 +460,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *info = root->fs_info;
+ int count = 0;
+
+- if (--trans->use_count) {
++ if (trans->use_count > 1) {
++ trans->use_count--;
+ trans->block_rsv = trans->orig_rsv;
+ return 0;
+ }
+@@ -494,17 +495,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ }
+
+ if (lock && cur_trans->blocked && !cur_trans->in_commit) {
+- if (throttle) {
+- /*
+- * We may race with somebody else here so end up having
+- * to call end_transaction on ourselves again, so inc
+- * our use_count.
+- */
+- trans->use_count++;
++ if (throttle)
+ return btrfs_commit_transaction(trans, root);
+- } else {
++ else
+ wake_up_process(info->transaction_kthread);
+- }
+ }
+
+ WARN_ON(cur_trans != info->running_transaction);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index bf35fe0..834d9a1 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2372,6 +2372,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ ex_ee_block = le32_to_cpu(ex->ee_block);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
+
++ /*
++ * If we're starting with an extent other than the last one in the
++ * node, we need to see if it shares a cluster with the extent to
++ * the right (towards the end of the file). If its leftmost cluster
++ * is this extent's rightmost cluster and it is not cluster aligned,
++ * we'll mark it as a partial that is not to be deallocated.
++ */
++
++ if (ex != EXT_LAST_EXTENT(eh)) {
++ ext4_fsblk_t current_pblk, right_pblk;
++ long long current_cluster, right_cluster;
++
++ current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
++ current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
++ right_pblk = ext4_ext_pblock(ex + 1);
++ right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
++ if (current_cluster == right_cluster &&
++ EXT4_PBLK_COFF(sbi, right_pblk))
++ *partial_cluster = -right_cluster;
++ }
++
+ trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
+ while (ex >= EXT_FIRST_EXTENT(eh) &&
+diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
+index 16a5047..406d9cc 100644
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index e4619b0..fa35ff7 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
+ uint32_t version;
+ uint32_t data_crc;
+ uint32_t partial_crc;
+- uint16_t csize;
++ uint32_t csize;
+ uint16_t overlapped;
+ };
+
+diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
+index 694aa5b..145ba39 100644
+--- a/fs/jffs2/nodemgmt.c
++++ b/fs/jffs2/nodemgmt.c
+@@ -128,6 +128,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ spin_unlock(&c->erase_completion_lock);
+
+ schedule();
++ remove_wait_queue(&c->erase_wait, &wait);
+ } else
+ spin_unlock(&c->erase_completion_lock);
+ } else if (ret)
+@@ -158,19 +159,24 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize)
+ {
+- int ret = -EAGAIN;
++ int ret;
+ minsize = PAD(minsize);
+
+ D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+
+- spin_lock(&c->erase_completion_lock);
+- while(ret == -EAGAIN) {
++ while (true) {
++ spin_lock(&c->erase_completion_lock);
+ ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
+ if (ret) {
+ D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+ }
++ spin_unlock(&c->erase_completion_lock);
++
++ if (ret == -EAGAIN)
++ cond_resched();
++ else
++ break;
+ }
+- spin_unlock(&c->erase_completion_lock);
+ if (!ret)
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index e065497..315a1ba 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1224,6 +1224,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
+ /* If op is non-idempotent */
+ if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
+ plen = opdesc->op_rsize_bop(rqstp, op);
++ /*
++ * If there's still another operation, make sure
++ * we'll have space to at least encode an error:
++ */
++ if (resp->opcnt < args->opcnt)
++ plen += COMPOUND_ERR_SLACK_SPACE;
+ op->status = nfsd4_check_resp_size(resp, plen);
+ }
+
+@@ -1381,7 +1387,8 @@ static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *o
+
+ static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+ {
+- return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
++ return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
++ sizeof(__be32);
+ }
+
+ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316..4835b90 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2413,6 +2413,8 @@ out_acl:
+ WRITE64(stat.ino);
+ }
+ if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
++ if ((buflen -= 16) < 0)
++ goto out_resource;
+ WRITE32(3);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 6a66fc0..11e1888 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -406,6 +406,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ int ftype = 0;
+ __be32 err;
+ int host_err;
++ bool get_write_count;
+ int size_change = 0;
+
+ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+@@ -413,10 +414,18 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ if (iap->ia_valid & ATTR_SIZE)
+ ftype = S_IFREG;
+
++ /* Callers that do fh_verify should do the fh_want_write: */
++ get_write_count = !fhp->fh_dentry;
++
+ /* Get inode */
+ err = fh_verify(rqstp, fhp, ftype, accmode);
+ if (err)
+ goto out;
++ if (get_write_count) {
++ host_err = fh_want_write(fhp);
++ if (host_err)
++ return nfserrno(host_err);
++ }
+
+ dentry = fhp->fh_dentry;
+ inode = dentry->d_inode;
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 85d4d42..c7fe962 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -108,4 +108,14 @@ struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
+ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
+ #endif
+
++static inline int fh_want_write(struct svc_fh *fh)
++{
++ return mnt_want_write(fh->fh_export->ex_path.mnt);
++}
++
++static inline void fh_drop_write(struct svc_fh *fh)
++{
++ mnt_drop_write(fh->fh_export->ex_path.mnt);
++}
++
+ #endif /* LINUX_NFSD_VFS_H */
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index 5d18ad1..4f66e00 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -90,7 +90,6 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
+ * information for this bh as it's not marked locally
+ * uptodate. */
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+@@ -420,7 +419,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
+
+ if (!buffer_uptodate(bh)) {
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 01ebfd0..d15b071 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -540,7 +540,10 @@ master_here:
+ /* success! see if any other nodes need recovery */
+ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->name, dlm->reco.dead_node, dlm->node_num);
+- dlm_reset_recovery(dlm);
++ spin_lock(&dlm->spinlock);
++ __dlm_reset_recovery(dlm);
++ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
+ }
+ dlm_end_recovery(dlm);
+
+@@ -698,6 +701,14 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
+ if (all_nodes_done) {
+ int ret;
+
++ /* Set this flag on recovery master to avoid
++ * a new recovery for another dead node start
++ * before the recovery is not done. That may
++ * cause recovery hung.*/
++ spin_lock(&dlm->spinlock);
++ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
++
+ /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+ * just send a finalize message to everyone and
+ * clean up */
+@@ -1752,13 +1763,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres)
+ {
+ struct dlm_migratable_lock *ml;
+- struct list_head *queue;
++ struct list_head *queue, *iter;
+ struct list_head *tmpq = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ int ret = 0;
+ int i, j, bad;
+- struct dlm_lock *lock = NULL;
++ struct dlm_lock *lock;
+ u8 from = O2NM_MAX_NODES;
+ unsigned int added = 0;
+ __be64 c;
+@@ -1793,14 +1804,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ /* MIGRATION ONLY! */
+ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
+
++ lock = NULL;
+ spin_lock(&res->spinlock);
+ for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
+ tmpq = dlm_list_idx_to_ptr(res, j);
+- list_for_each_entry(lock, tmpq, list) {
+- if (lock->ml.cookie != ml->cookie)
+- lock = NULL;
+- else
++ list_for_each(iter, tmpq) {
++ lock = list_entry(iter,
++ struct dlm_lock, list);
++ if (lock->ml.cookie == ml->cookie)
+ break;
++ lock = NULL;
+ }
+ if (lock)
+ break;
+@@ -2870,8 +2883,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ BUG();
+ }
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ __dlm_reset_recovery(dlm);
+ spin_unlock(&dlm->spinlock);
+- dlm_reset_recovery(dlm);
+ dlm_kick_recovery_thread(dlm);
+ break;
+ default:
+diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
+index 133e935..8048eea 100644
+--- a/fs/reiserfs/dir.c
++++ b/fs/reiserfs/dir.c
+@@ -128,6 +128,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ char *d_name;
+ off_t d_off;
+ ino_t d_ino;
++ loff_t cur_pos = deh_offset(deh);
+
+ if (!de_visible(deh))
+ /* it is hidden entry */
+@@ -200,8 +201,9 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ if (local_buf != small_buf) {
+ kfree(local_buf);
+ }
+- // next entry should be looked for with such offset
+- next_pos = deh_offset(deh) + 1;
++
++ /* deh_offset(deh) may be invalid now. */
++ next_pos = cur_pos + 1;
+
+ if (item_moved(&tmp_ih, &path_to_entry)) {
+ goto research;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c17fdfb..cb34ff4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1690,6 +1690,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+ }
+
+
++static int pid_alive(const struct task_struct *p);
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
++{
++ pid_t pid = 0;
++
++ rcu_read_lock();
++ if (pid_alive(tsk))
++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
++ rcu_read_unlock();
++
++ return pid;
++}
++
++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
++{
++ return task_ppid_nr_ns(tsk, &init_pid_ns);
++}
++
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+ {
+@@ -1727,7 +1745,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ */
+-static inline int pid_alive(struct task_struct *p)
++static inline int pid_alive(const struct task_struct *p)
+ {
+ return p->pids[PIDTYPE_PID].pid != NULL;
+ }
+diff --git a/include/trace/events/block.h b/include/trace/events/block.h
+index 05c5e61..048e265 100644
+--- a/include/trace/events/block.h
++++ b/include/trace/events/block.h
+@@ -81,6 +81,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * block_rq_complete - block IO operation completed by device driver
+ * @q: queue containing the block operation request
+ * @rq: block operations request
++ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+@@ -88,11 +89,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
++TRACE_EVENT(block_rq_complete,
+
+- TP_PROTO(struct request_queue *q, struct request *rq),
++ TP_PROTO(struct request_queue *q, struct request *rq,
++ unsigned int nr_bytes),
+
+- TP_ARGS(q, rq)
++ TP_ARGS(q, rq, nr_bytes),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( int, errors )
++ __array( char, rwbs, RWBS_LEN )
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
++ __entry->sector = blk_rq_pos(rq);
++ __entry->nr_sector = nr_bytes >> 9;
++ __entry->errors = rq->errors;
++
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
++ blk_dump_cmd(__get_str(cmd), rq);
++ ),
++
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->rwbs, __get_str(cmd),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->errors)
+ );
+
+ DECLARE_EVENT_CLASS(block_rq,
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 47b7fc1..aeac7cc 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -473,7 +473,7 @@ static int audit_filter_rules(struct task_struct *tsk,
+ case AUDIT_PPID:
+ if (ctx) {
+ if (!ctx->ppid)
+- ctx->ppid = sys_getppid();
++ ctx->ppid = task_ppid_nr(tsk);
+ result = audit_comparator(ctx->ppid, f->op, f->val);
+ }
+ break;
+@@ -1335,7 +1335,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
+ /* tsk == current */
+ context->pid = tsk->pid;
+ if (!context->ppid)
+- context->ppid = sys_getppid();
++ context->ppid = task_ppid_nr(tsk);
+ cred = current_cred();
+ context->uid = cred->uid;
+ context->gid = cred->gid;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 234e152..fde15f9 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -734,9 +734,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ struct list_head *dead)
+ {
+ list_move_tail(&p->sibling, &p->real_parent->children);
+-
+- if (p->exit_state == EXIT_DEAD)
+- return;
+ /*
+ * If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+@@ -744,9 +741,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ if (same_thread_group(p->real_parent, father))
+ return;
+
+- /* We don't want people slaying init. */
++ /*
++ * We don't want people slaying init.
++ *
++ * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
++ * can change ->exit_state to EXIT_ZOMBIE. If this is the final
++ * state, do_notify_parent() was already called and ->exit_signal
++ * doesn't matter.
++ */
+ p->exit_signal = SIGCHLD;
+
++ if (p->exit_state == EXIT_DEAD)
++ return;
++
+ /* If it has exited notify the new parent about this child's death. */
+ if (!p->ptrace &&
+ p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 16fc34a..92cac05 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -699,6 +699,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q: queue the io is for
+ * @rq: the source request
++ * @nr_bytes: number of completed bytes
+ * @what: the action
+ *
+ * Description:
+@@ -706,7 +707,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ *
+ **/
+ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+- u32 what)
++ unsigned int nr_bytes, u32 what)
+ {
+ struct blk_trace *bt = q->blk_trace;
+
+@@ -715,11 +716,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ what |= BLK_TC_ACT(BLK_TC_PC);
+- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
++ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
+ what, rq->errors, rq->cmd_len, rq->cmd);
+ } else {
+ what |= BLK_TC_ACT(BLK_TC_FS);
+- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
++ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
+ rq->cmd_flags, what, rq->errors, 0, NULL);
+ }
+ }
+@@ -727,33 +728,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+ static void blk_add_trace_rq_abort(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
+ }
+
+ static void blk_add_trace_rq_insert(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
+ }
+
+ static void blk_add_trace_rq_issue(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
+ }
+
+ static void blk_add_trace_rq_requeue(void *ignore,
+ struct request_queue *q,
+ struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
+ }
+
+ static void blk_add_trace_rq_complete(void *ignore,
+ struct request_queue *q,
+- struct request *rq)
++ struct request *rq,
++ unsigned int nr_bytes)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
++ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
+ }
+
+ /**
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index a8408b6..190ae10 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -299,9 +299,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
+ */
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+- int len = strlen(str) + 1;
+- int d = nla_len(nla) - len;
++ int len = strlen(str);
++ char *buf = nla_data(nla);
++ int attrlen = nla_len(nla);
++ int d;
+
++ if (attrlen > 0 && buf[attrlen - 1] == '\0')
++ attrlen--;
++
++ d = attrlen - len;
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
+index f8a3f1a..33459e0 100644
+--- a/lib/percpu_counter.c
++++ b/lib/percpu_counter.c
+@@ -166,7 +166,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
+ struct percpu_counter *fbc;
+
+ compute_batch_value();
+- if (action != CPU_DEAD)
++ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return NOTIFY_OK;
+
+ cpu = (unsigned long)hcpu;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3a5aae2..d399f5f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1447,6 +1447,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ while (min_count < persistent_huge_pages(h)) {
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
+ break;
++ cond_resched_lock(&hugetlb_lock);
+ }
+ while (count < persistent_huge_pages(h)) {
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 4f4f53b..39b3a7d 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -78,6 +78,7 @@ void __clear_page_mlock(struct page *page)
+ */
+ void mlock_vma_page(struct page *page)
+ {
++ /* Serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (!TestSetPageMlocked(page)) {
+@@ -105,6 +106,7 @@ void mlock_vma_page(struct page *page)
+ */
+ void munlock_vma_page(struct page *page)
+ {
++ /* For try_to_munlock() and to serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (TestClearPageMlocked(page)) {
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 52a2f36..9ac405b 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1385,9 +1385,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
+ BUG_ON(!page || PageAnon(page));
+
+ if (locked_vma) {
+- mlock_vma_page(page); /* no-op if already mlocked */
+- if (page == check_page)
++ if (page == check_page) {
++ /* we know we have check_page locked */
++ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
++ } else if (trylock_page(page)) {
++ /*
++ * If we can lock the page, perform mlock.
++ * Otherwise leave the page alone, it will be
++ * eventually encountered again later.
++ */
++ mlock_vma_page(page);
++ unlock_page(page);
++ }
+ continue; /* don't unmap */
+ }
+
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 48a62d8..c43a788 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -529,6 +529,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
+ {
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+
++ if (saddr == NULL)
++ saddr = dev->dev_addr;
++
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+ }
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 2157984..398a297 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1138,6 +1138,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+
+ br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
+
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
++ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 5d41293..b9edff0 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -900,8 +900,11 @@ retry:
+ * Lifetime is greater than REGEN_ADVANCE time units. In particular,
+ * an implementation must not create a temporary address with a zero
+ * Preferred Lifetime.
++ * Use age calculation as in addrconf_verify to avoid unnecessary
++ * temporary addresses being generated.
+ */
+- if (tmp_prefered_lft <= regen_advance) {
++ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
++ if (tmp_prefered_lft <= regen_advance + age) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index d505453..ceced67 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -499,7 +499,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+ MSG_DONTWAIT, np->dontfrag);
+ if (err) {
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index cd4b529..7871cc6 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1191,21 +1191,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt,
+- bool pmtuprobe)
++ unsigned int orig_mtu)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+- *mtu = *mtu - rt->dst.header_len;
++ *mtu = orig_mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = min(*mtu, pmtuprobe ?
+- rt->dst.dev->mtu :
+- dst_mtu(rt->dst.path));
++ *mtu = orig_mtu;
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1222,7 +1220,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen, mtu;
++ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+@@ -1307,6 +1305,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ dst_exthdrlen = 0;
+ mtu = cork->fragsize;
+ }
++ orig_mtu = mtu;
+
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+
+@@ -1389,8 +1388,7 @@ alloc_new_skb:
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt,
+- np->pmtudisc ==
+- IPV6_PMTUDISC_PROBE);
++ orig_mtu);
+
+ skb_prev = skb;
+
+@@ -1660,8 +1658,8 @@ int ip6_push_pending_frames(struct sock *sk)
+ if (proto == IPPROTO_ICMPV6) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
++ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ }
+
+ err = ip6_local_out(skb);
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d20a9be..4f12b66 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1435,11 +1435,12 @@ static void mld_sendpack(struct sk_buff *skb)
+ dst_output);
+ out:
+ if (!err) {
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+- } else
+- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
++ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
++ } else {
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ }
+
+ rcu_read_unlock();
+ return;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 9a4f437..39e11f9 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1250,7 +1250,7 @@ int ip6_route_add(struct fib6_config *cfg)
+ goto out;
+ }
+
+- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
++ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
+
+ if (rt == NULL) {
+ err = -ENOMEM;
+diff --git a/net/rds/iw.c b/net/rds/iw.c
+index 7826d46..5899356 100644
+--- a/net/rds/iw.c
++++ b/net/rds/iw.c
+@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support IB devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_RNIC)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 0121e0a..c95a3f2 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1366,8 +1366,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
+ BUG_ON(!list_empty(&chunk->list));
+ list_del_init(&chunk->transmitted_list);
+
+- /* Free the chunk skb data and the SCTP_chunk stub itself. */
+- dev_kfree_skb(chunk->skb);
++ consume_skb(chunk->skb);
++ consume_skb(chunk->auth_chunk);
+
+ SCTP_DBG_OBJCNT_DEC(chunk);
+ kmem_cache_free(sctp_chunk_cachep, chunk);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index f131caf..5ac33b6 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -749,7 +749,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
+- kfree_skb(chunk->auth_chunk);
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ }
+@@ -764,10 +763,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+ auth.transport = chunk->transport;
+
+ ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+-
+- /* We can now safely free the auth_chunk clone */
+- kfree_skb(chunk->auth_chunk);
+-
+ if (ret != SCTP_IERROR_NO_ERROR) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+diff --git a/net/socket.c b/net/socket.c
+index d4faade..3faa358 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1884,6 +1884,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ {
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
++
++ if (kmsg->msg_namelen < 0)
++ return -EINVAL;
++
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 54fc90b..8705ee3 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1771,8 +1771,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+@@ -1887,6 +1890,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct unix_sock *u = unix_sk(sk);
+ struct sockaddr_un *sunaddr = msg->msg_name;
+ int copied = 0;
++ int noblock = flags & MSG_DONTWAIT;
+ int check_creds = 0;
+ int target;
+ int err = 0;
+@@ -1901,7 +1905,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
++ timeo = sock_rcvtimeo(sk, noblock);
+
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+@@ -1913,8 +1917,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(timeo);
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 4fa7939..69477ff 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1328,15 +1328,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ isec->sid = sbsec->sid;
+
+ if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
+- if (opt_dentry) {
+- isec->sclass = inode_mode_to_security_class(inode->i_mode);
+- rc = selinux_proc_get_sid(opt_dentry,
+- isec->sclass,
+- &sid);
+- if (rc)
+- goto out_unlock;
+- isec->sid = sid;
+- }
++ /* We must have a dentry to determine the label on
++ * procfs inodes */
++ if (opt_dentry)
++ /* Called from d_instantiate or
++ * d_splice_alias. */
++ dentry = dget(opt_dentry);
++ else
++ /* Called from selinux_complete_init, try to
++ * find a dentry. */
++ dentry = d_find_alias(inode);
++ /*
++ * This can be hit on boot when a file is accessed
++ * before the policy is loaded. When we load policy we
++ * may find inodes that have no dentry on the
++ * sbsec->isec_head list. No reason to complain as
++ * these will get fixed up the next time we go through
++ * inode_doinit() with a dentry, before these inodes
++ * could be used again by userspace.
++ */
++ if (!dentry)
++ goto out_unlock;
++ isec->sclass = inode_mode_to_security_class(inode->i_mode);
++ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
++ dput(dentry);
++ if (rc)
++ goto out_unlock;
++ isec->sid = sid;
+ }
+ break;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 36bce68..d307adb 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3855,6 +3855,7 @@ static void alc_auto_init_std(struct hda_codec *codec)
+
+ static const struct snd_pci_quirk beep_white_list[] = {
+ SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
++ SND_PCI_QUIRK(0x1043, 0x115d, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+ SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
+diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
+index 44446f2..7a4a196 100644
+--- a/sound/pci/ice1712/ice1712.c
++++ b/sound/pci/ice1712/ice1712.c
+@@ -686,9 +686,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pointer(struct snd_pcm_substream *
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
+ return 0;
+ ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
+@@ -705,9 +706,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substrea
+ addr = ICE1712_DSC_ADDR0;
+ ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
+ ice->playback_con_virt_addr[substream->number];
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
+@@ -718,9 +720,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *s
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
+ return 0;
+ ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback = {
+@@ -1114,9 +1117,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pro_pointer(struct snd_pcm_substre
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
+ return 0;
+ ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
+@@ -1127,9 +1131,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substrea
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
+ return 0;
+ ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback_pro = {