summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '2.6.39/1000_linux-2.6.39.1.patch')
-rw-r--r--2.6.39/1000_linux-2.6.39.1.patch6469
1 files changed, 6469 insertions, 0 deletions
diff --git a/2.6.39/1000_linux-2.6.39.1.patch b/2.6.39/1000_linux-2.6.39.1.patch
new file mode 100644
index 0000000..776c2f6
--- /dev/null
+++ b/2.6.39/1000_linux-2.6.39.1.patch
@@ -0,0 +1,6469 @@
+diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
+index 5ebf5af..5aa5337 100644
+--- a/Documentation/i2c/writing-clients
++++ b/Documentation/i2c/writing-clients
+@@ -38,7 +38,7 @@ static struct i2c_driver foo_driver = {
+ .name = "foo",
+ },
+
+- .id_table = foo_ids,
++ .id_table = foo_idtable,
+ .probe = foo_probe,
+ .remove = foo_remove,
+ /* if device autodetection is needed: */
+diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf
+index 612e722..37a02ce 100644
+--- a/Documentation/usb/linux-cdc-acm.inf
++++ b/Documentation/usb/linux-cdc-acm.inf
+@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
+ [SourceDisksFiles]
+ [SourceDisksNames]
+ [DeviceList]
+-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02
++%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+
+ [DeviceList.NTamd64]
+-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_0525&PID_A4AB&MI_02
++%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+
+
+ ;------------------------------------------------------------------------------
+diff --git a/Documentation/usb/linux.inf b/Documentation/usb/linux.inf
+index 4dee958..4ffa715b0 100644
+--- a/Documentation/usb/linux.inf
++++ b/Documentation/usb/linux.inf
+@@ -18,15 +18,15 @@ DriverVer = 06/21/2006,6.0.6000.16384
+
+ ; Decoration for x86 architecture
+ [LinuxDevices.NTx86]
+-%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
++%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
+
+ ; Decoration for x64 architecture
+ [LinuxDevices.NTamd64]
+-%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
++%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
+
+ ; Decoration for ia64 architecture
+ [LinuxDevices.NTia64]
+-%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_0525&PID_a4ab&MI_00
++%LinuxDevice% = RNDIS.NT.5.1, USB\VID_0525&PID_a4a2, USB\VID_1d6b&PID_0104&MI_00
+
+ ;@@@ This is the common setting for setup
+ [ControlFlags]
+diff --git a/Makefile b/Makefile
+index 123d858..045b186 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1378,7 +1378,7 @@ endif # KBUILD_EXTMOD
+ clean: $(clean-dirs)
+ $(call cmd,rmdirs)
+ $(call cmd,rmfiles)
+- @find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
++ @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
+ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
+ -o -name '*.symtypes' -o -name 'modules.order' \
+diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
+index c96fa1b..73b4a8b 100644
+--- a/arch/arm/mm/cache-v6.S
++++ b/arch/arm/mm/cache-v6.S
+@@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range)
+ */
+ ENTRY(v6_flush_kern_dcache_area)
+ add r1, r0, r1
++ bic r0, r0, #D_CACHE_LINE_SIZE - 1
+ 1:
+ #ifdef HARVARD_CACHE
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index dc18d81..d32f02b 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range)
+ ENTRY(v7_flush_kern_dcache_area)
+ dcache_line_size r2, r3
+ add r1, r0, r1
++ sub r3, r2, #1
++ bic r0, r0, r3
+ 1:
+ mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
+ add r0, r0, r2
+diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
+index 9b8393d..c54cca87 100644
+--- a/arch/m68k/kernel/syscalltable.S
++++ b/arch/m68k/kernel/syscalltable.S
+@@ -319,8 +319,8 @@ ENTRY(sys_call_table)
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat /* 300 */
+- .long sys_ni_syscall /* Reserved for pselect6 */
+- .long sys_ni_syscall /* Reserved for ppoll */
++ .long sys_pselect6
++ .long sys_ppoll
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 305 */
+diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
+index 3eb82c2..9cbc2c3 100644
+--- a/arch/parisc/include/asm/unistd.h
++++ b/arch/parisc/include/asm/unistd.h
+@@ -814,8 +814,14 @@
+ #define __NR_recvmmsg (__NR_Linux + 319)
+ #define __NR_accept4 (__NR_Linux + 320)
+ #define __NR_prlimit64 (__NR_Linux + 321)
+-
+-#define __NR_Linux_syscalls (__NR_prlimit64 + 1)
++#define __NR_fanotify_init (__NR_Linux + 322)
++#define __NR_fanotify_mark (__NR_Linux + 323)
++#define __NR_clock_adjtime (__NR_Linux + 324)
++#define __NR_name_to_handle_at (__NR_Linux + 325)
++#define __NR_open_by_handle_at (__NR_Linux + 326)
++#define __NR_syncfs (__NR_Linux + 327)
++
++#define __NR_Linux_syscalls (__NR_syncfs + 1)
+
+
+ #define __IGNORE_select /* newselect */
+diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
+index 88a0ad1..dc9a624 100644
+--- a/arch/parisc/kernel/sys_parisc32.c
++++ b/arch/parisc/kernel/sys_parisc32.c
+@@ -228,3 +228,11 @@ asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+ ((loff_t)lenhi << 32) | lenlo);
+ }
++
++asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
++ u32 mask_lo, int fd,
++ const char __user *pathname)
++{
++ return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
++ fd, pathname);
++}
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 4be85ee..a5b02ce 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -420,6 +420,12 @@
+ ENTRY_COMP(recvmmsg)
+ ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
++ ENTRY_SAME(fanotify_init)
++ ENTRY_COMP(fanotify_mark)
++ ENTRY_COMP(clock_adjtime)
++ ENTRY_SAME(name_to_handle_at) /* 325 */
++ ENTRY_COMP(open_by_handle_at)
++ ENTRY_SAME(syncfs)
+
+ /* Nothing yet */
+
+diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
+index 5b5e1f0..c37ff6b 100644
+--- a/arch/powerpc/kernel/crash.c
++++ b/arch/powerpc/kernel/crash.c
+@@ -170,7 +170,7 @@ static void crash_kexec_wait_realmode(int cpu)
+ int i;
+
+ msecs = 10000;
+- for (i=0; i < NR_CPUS && msecs > 0; i++) {
++ for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
+ if (i == cpu)
+ continue;
+
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 206a321..e89df59 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -462,7 +462,8 @@ _GLOBAL(disable_kernel_fp)
+ * wait for the flag to change, indicating this kernel is going away but
+ * the slave code for the next one is at addresses 0 to 100.
+ *
+- * This is used by all slaves.
++ * This is used by all slaves, even those that did not find a matching
++ * paca in the secondary startup code.
+ *
+ * Physical (hardware) cpu id should be in r3.
+ */
+@@ -471,10 +472,6 @@ _GLOBAL(kexec_wait)
+ 1: mflr r5
+ addi r5,r5,kexec_flag-1b
+
+- li r4,KEXEC_STATE_REAL_MODE
+- stb r4,PACAKEXECSTATE(r13)
+- SYNC
+-
+ 99: HMT_LOW
+ #ifdef CONFIG_KEXEC /* use no memory without kexec */
+ lwz r4,0(r5)
+@@ -499,11 +496,17 @@ kexec_flag:
+ *
+ * get phys id from paca
+ * switch to real mode
++ * mark the paca as no longer used
+ * join other cpus in kexec_wait(phys_id)
+ */
+ _GLOBAL(kexec_smp_wait)
+ lhz r3,PACAHWCPUID(r13)
+ bl real_mode
++
++ li r4,KEXEC_STATE_REAL_MODE
++ stb r4,PACAKEXECSTATE(r13)
++ SYNC
++
+ b .kexec_wait
+
+ /*
+diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
+index 8ee51a2..e6bec74 100644
+--- a/arch/powerpc/oprofile/op_model_power4.c
++++ b/arch/powerpc/oprofile/op_model_power4.c
+@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
+ return is_kernel;
+ }
+
++static bool pmc_overflow(unsigned long val)
++{
++ if ((int)val < 0)
++ return true;
++
++ /*
++ * Events on POWER7 can roll back if a speculative event doesn't
++ * eventually complete. Unfortunately in some rare cases they will
++ * raise a performance monitor exception. We need to catch this to
++ * ensure we reset the PMC. In all cases the PMC will be 256 or less
++ * cycles from overflow.
++ *
++ * We only do this if the first pass fails to find any overflowing
++ * PMCs because a user might set a period of less than 256 and we
++ * don't want to mistakenly reset them.
++ */
++ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
++ return true;
++
++ return false;
++}
++
+ static void power4_handle_interrupt(struct pt_regs *regs,
+ struct op_counter_config *ctr)
+ {
+@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
+
+ for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
+ val = classic_ctr_read(i);
+- if (val < 0) {
++ if (pmc_overflow(val)) {
+ if (oprofile_running && ctr[i].enabled) {
+ oprofile_add_ext_sample(pc, regs, i, is_kernel);
+ classic_ctr_write(i, reset_value[i]);
+diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
+index d49c213..ae95935 100644
+--- a/arch/sh/kernel/cpu/Makefile
++++ b/arch/sh/kernel/cpu/Makefile
+@@ -17,7 +17,5 @@ obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
+
+ obj-$(CONFIG_SH_ADC) += adc.o
+ obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
+-obj-$(CONFIG_SH_FPU) += fpu.o
+-obj-$(CONFIG_SH_FPU_EMU) += fpu.o
+
+-obj-y += irq/ init.o clock.o hwblk.o proc.o
++obj-y += irq/ init.o clock.o fpu.o hwblk.o proc.o
+diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
+index a9da516..795ea8e 100644
+--- a/arch/um/Kconfig.x86
++++ b/arch/um/Kconfig.x86
+@@ -29,10 +29,10 @@ config X86_64
+ def_bool 64BIT
+
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool X86_XADD
++ def_bool X86_XADD && 64BIT
+
+ config RWSEM_GENERIC_SPINLOCK
+- def_bool !X86_XADD
++ def_bool !RWSEM_XCHGADD_ALGORITHM
+
+ config 3_LEVEL_PGTABLES
+ bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 91f3e087..cc5b052 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -125,7 +125,7 @@
+ #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
+ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
+ #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
+-#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
++#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
+ #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
+
+ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index abd3e0e..99f0ad7 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -42,7 +42,7 @@
+ * Returns 0 if the range is valid, nonzero otherwise.
+ *
+ * This is equivalent to the following test:
+- * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
++ * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
+ *
+ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
+ */
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 45fd33d..df63620 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -621,14 +621,14 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
+ struct IO_APIC_route_entry **ioapic_entries;
+
+ ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (!ioapic_entries)
+ return 0;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ ioapic_entries[apic] =
+ kzalloc(sizeof(struct IO_APIC_route_entry) *
+- nr_ioapic_registers[apic], GFP_KERNEL);
++ nr_ioapic_registers[apic], GFP_ATOMIC);
+ if (!ioapic_entries[apic])
+ goto nomem;
+ }
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 6f9d1f6..b13ed39 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ }
+ #endif
+
+- /* As a rule processors have APIC timer running in deep C states */
+- if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
++ /*
++ * Family 0x12 and above processors have APIC timer
++ * running in deep C states.
++ */
++ if (c->x86 > 0x11)
+ set_cpu_cap(c, X86_FEATURE_ARAT);
+
+ /*
+@@ -629,10 +632,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
+ */
+ u64 mask;
++ int err;
+
+- rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+- mask |= (1 << 10);
+- wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
++ err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
++ if (err == 0) {
++ mask |= (1 << 10);
++ checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
++ }
+ }
+ }
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e2ced00..173f3a3 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
+
+ cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
+
+- if (eax > 0)
+- c->x86_capability[9] = ebx;
++ c->x86_capability[9] = ebx;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+index 755a31e..907c8e6 100644
+--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
++++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+@@ -39,7 +39,7 @@
+
+ #include <acpi/processor.h>
+
+-#define PCC_VERSION "1.00.00"
++#define PCC_VERSION "1.10.00"
+ #define POLL_LOOPS 300
+
+ #define CMD_COMPLETE 0x1
+@@ -102,7 +102,7 @@ static struct acpi_generic_address doorbell;
+ static u64 doorbell_preserve;
+ static u64 doorbell_write;
+
+-static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f,
++static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
+ 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
+
+ struct pcc_cpu {
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 4be9b39..c6724e4 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -912,6 +912,13 @@ void __init setup_arch(char **cmdline_p)
+ memblock.current_limit = get_max_mapped();
+ memblock_x86_fill();
+
++ /*
++ * The EFI specification says that boot service code won't be called
++ * after ExitBootServices(). This is, in fact, a lie.
++ */
++ if (efi_enabled)
++ efi_reserve_boot_services();
++
+ /* preallocate 4k for mptable mpc */
+ early_reserve_e820_mpc_new();
+
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index 99e4826..a73397f 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -72,7 +72,7 @@ ENTRY(_copy_to_user)
+ addq %rdx,%rcx
+ jc bad_to_user
+ cmpq TI_addr_limit(%rax),%rcx
+- jae bad_to_user
++ ja bad_to_user
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+ ENDPROC(_copy_to_user)
+@@ -85,7 +85,7 @@ ENTRY(_copy_from_user)
+ addq %rdx,%rcx
+ jc bad_from_user
+ cmpq TI_addr_limit(%rax),%rcx
+- jae bad_from_user
++ ja bad_from_user
+ ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ CFI_ENDPROC
+ ENDPROC(_copy_from_user)
+diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
+index c3b8e24..9fd8a56 100644
+--- a/arch/x86/oprofile/op_model_amd.c
++++ b/arch/x86/oprofile/op_model_amd.c
+@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
+ wrmsrl(MSR_AMD64_IBSOPCTL, 0);
+ }
+
+-static inline int eilvt_is_available(int offset)
++static inline int get_eilvt(int offset)
+ {
+- /* check if we may assign a vector */
+ return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
+ }
+
++static inline int put_eilvt(int offset)
++{
++ return !setup_APIC_eilvt(offset, 0, 0, 1);
++}
++
+ static inline int ibs_eilvt_valid(void)
+ {
+ int offset;
+ u64 val;
++ int valid = 0;
++
++ preempt_disable();
+
+ rdmsrl(MSR_AMD64_IBSCTL, val);
+ offset = val & IBSCTL_LVT_OFFSET_MASK;
+@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
+ if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
+ pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+- return 0;
++ goto out;
+ }
+
+- if (!eilvt_is_available(offset)) {
++ if (!get_eilvt(offset)) {
+ pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
+ smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
+- return 0;
++ goto out;
+ }
+
+- return 1;
++ valid = 1;
++out:
++ preempt_enable();
++
++ return valid;
+ }
+
+ static inline int get_ibs_offset(void)
+@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
+
+ static int force_ibs_eilvt_setup(void)
+ {
+- int i;
++ int offset;
+ int ret;
+
+- /* find the next free available EILVT entry */
+- for (i = 1; i < 4; i++) {
+- if (!eilvt_is_available(i))
+- continue;
+- ret = setup_ibs_ctl(i);
+- if (ret)
+- return ret;
+- pr_err(FW_BUG "using offset %d for IBS interrupts\n", i);
+- return 0;
++ /*
++ * find the next free available EILVT entry, skip offset 0,
++ * pin search to this cpu
++ */
++ preempt_disable();
++ for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
++ if (get_eilvt(offset))
++ break;
+ }
++ preempt_enable();
+
+- printk(KERN_DEBUG "No EILVT entry available\n");
+-
+- return -EBUSY;
+-}
+-
+-static int __init_ibs_nmi(void)
+-{
+- int ret;
+-
+- if (ibs_eilvt_valid())
+- return 0;
++ if (offset == APIC_EILVT_NR_MAX) {
++ printk(KERN_DEBUG "No EILVT entry available\n");
++ return -EBUSY;
++ }
+
+- ret = force_ibs_eilvt_setup();
++ ret = setup_ibs_ctl(offset);
+ if (ret)
+- return ret;
++ goto out;
+
+- if (!ibs_eilvt_valid())
+- return -EFAULT;
++ if (!ibs_eilvt_valid()) {
++ ret = -EFAULT;
++ goto out;
++ }
+
++ pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
+ pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+
+ return 0;
++out:
++ preempt_disable();
++ put_eilvt(offset);
++ preempt_enable();
++ return ret;
+ }
+
+ /*
+ * check and reserve APIC extended interrupt LVT offset for IBS if
+ * available
+- *
+- * init_ibs() preforms implicitly cpu-local operations, so pin this
+- * thread to its current CPU
+ */
+
+ static void init_ibs(void)
+ {
+- preempt_disable();
+-
+ ibs_caps = get_ibs_caps();
++
+ if (!ibs_caps)
++ return;
++
++ if (ibs_eilvt_valid())
+ goto out;
+
+- if (__init_ibs_nmi() < 0)
+- ibs_caps = 0;
+- else
+- printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
++ if (!force_ibs_eilvt_setup())
++ goto out;
++
++ /* Failed to setup ibs */
++ ibs_caps = 0;
++ return;
+
+ out:
+- preempt_enable();
++ printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
+ }
+
+ static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 0fe27d7..b00c4ea 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -315,6 +315,40 @@ static void __init print_efi_memmap(void)
+ }
+ #endif /* EFI_DEBUG */
+
++void __init efi_reserve_boot_services(void)
++{
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ efi_memory_desc_t *md = p;
++ unsigned long long start = md->phys_addr;
++ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
++
++ if (md->type != EFI_BOOT_SERVICES_CODE &&
++ md->type != EFI_BOOT_SERVICES_DATA)
++ continue;
++
++ memblock_x86_reserve_range(start, start + size, "EFI Boot");
++ }
++}
++
++static void __init efi_free_boot_services(void)
++{
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ efi_memory_desc_t *md = p;
++ unsigned long long start = md->phys_addr;
++ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
++
++ if (md->type != EFI_BOOT_SERVICES_CODE &&
++ md->type != EFI_BOOT_SERVICES_DATA)
++ continue;
++
++ free_bootmem_late(start, size);
++ }
++}
++
+ void __init efi_init(void)
+ {
+ efi_config_table_t *config_tables;
+@@ -507,7 +541,9 @@ void __init efi_enter_virtual_mode(void)
+ efi.systab = NULL;
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+- if (!(md->attribute & EFI_MEMORY_RUNTIME))
++ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
++ md->type != EFI_BOOT_SERVICES_CODE &&
++ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+@@ -558,6 +594,13 @@ void __init efi_enter_virtual_mode(void)
+ }
+
+ /*
++ * Thankfully, it does seem that no runtime services other than
++ * SetVirtualAddressMap() will touch boot services code, so we can
++ * get rid of it all at this point
++ */
++ efi_free_boot_services();
++
++ /*
+ * Now that EFI is in virtual mode, update the function
+ * pointers in the runtime service table to the new virtual addresses.
+ *
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index ac0621a..641264c 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -64,10 +64,11 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
+ if (!(__supported_pte_mask & _PAGE_NX))
+ return;
+
+- /* Make EFI runtime service code area executable */
++ /* Make EFI service code area executable */
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+- if (md->type == EFI_RUNTIME_SERVICES_CODE) {
++ if (md->type == EFI_RUNTIME_SERVICES_CODE ||
++ md->type == EFI_BOOT_SERVICES_CODE) {
+ unsigned long end;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+ early_mapping_set_exec(md->phys_addr, end, executable);
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 0684f3c..f298bd7 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info)
+
+ active_mm = percpu_read(cpu_tlbstate.active_mm);
+
+- if (active_mm == mm)
++ if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+ leave_mm(smp_processor_id());
+
+ /* If this cpu still has a stale cr3 reference, then make sure
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 141eb0d..c881ae4 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -522,11 +522,20 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
+ /* Boundary cross-over for the edges: */
+ if (idx) {
+ unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
++ unsigned long *mid_mfn_p;
+
+ p2m_init(p2m);
+
+ p2m_top[topidx][mididx] = p2m;
+
++ /* For save/restore we need to MFN of the P2M saved */
++
++ mid_mfn_p = p2m_top_mfn_p[topidx];
++ WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
++ "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
++ topidx, mididx);
++ mid_mfn_p[mididx] = virt_to_mfn(p2m);
++
+ }
+ return idx != 0;
+ }
+@@ -549,12 +558,29 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
+ pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
+ {
+ unsigned topidx = p2m_top_index(pfn);
+- if (p2m_top[topidx] == p2m_mid_missing) {
+- unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
++ unsigned long *mid_mfn_p;
++ unsigned long **mid;
++
++ mid = p2m_top[topidx];
++ mid_mfn_p = p2m_top_mfn_p[topidx];
++ if (mid == p2m_mid_missing) {
++ mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
++
++ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
++ }
++ /* And the save/restore P2M tables.. */
++ if (mid_mfn_p == p2m_mid_missing_mfn) {
++ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
++ p2m_mid_mfn_init(mid_mfn_p);
++
++ p2m_top_mfn_p[topidx] = mid_mfn_p;
++ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
++ /* Note: we don't set mid_mfn_p[midix] here,
++ * look in __early_alloc_p2m */
+ }
+ }
+
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 90bac0a..ca6297b 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -166,7 +166,7 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
+ if (last > end)
+ continue;
+
+- if (entry->type == E820_RAM) {
++ if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) {
+ if (start > start_pci)
+ identity += set_phys_range_identity(
+ PFN_UP(start_pci), PFN_DOWN(start));
+@@ -227,7 +227,11 @@ char * __init xen_memory_setup(void)
+
+ memcpy(map_raw, map, sizeof(map));
+ e820.nr_map = 0;
++#ifdef CONFIG_X86_32
++ xen_extra_mem_start = mem_end;
++#else
+ xen_extra_mem_start = max((1ULL << 32), mem_end);
++#endif
+ for (i = 0; i < memmap.nr_entries; i++) {
+ unsigned long long end;
+
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index 6c9b5e1..bb21e4c 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
+ }
+
+ /*
+- * Moving a request silently to empty queue_head may stall the
+- * queue. Kick the queue in those cases. This function is called
+- * from request completion path and calling directly into
+- * request_fn may confuse the driver. Always use kblockd.
++ * Kick the queue to avoid stall for two cases:
++ * 1. Moving a request silently to empty queue_head may stall the
++ * queue.
++ * 2. When flush request is running in non-queueable queue, the
++ * queue is hold. Restart the queue after flush request is finished
++ * to avoid stall.
++ * This function is called from request completion path and calling
++ * directly into request_fn may confuse the driver. Always use
++ * kblockd.
+ */
+- if (queued)
++ if (queued || q->flush_queue_delayed)
+ blk_run_queue_async(q);
++ q->flush_queue_delayed = 0;
+ }
+
+ /**
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 1fa7692..fa1eb04 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim)
+ lim->discard_granularity = 0;
+ lim->discard_alignment = 0;
+ lim->discard_misaligned = 0;
+- lim->discard_zeroes_data = -1;
++ lim->discard_zeroes_data = 1;
+ lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+ lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
+ lim->alignment_offset = 0;
+@@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+
+ blk_set_default_limits(&q->limits);
+ blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
++ q->limits.discard_zeroes_data = 0;
+
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+@@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
+ }
+ EXPORT_SYMBOL_GPL(blk_queue_flush);
+
++void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
++{
++ q->flush_not_queueable = !queueable;
++}
++EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
++
+ static int __init blk_settings_init(void)
+ {
+ blk_max_low_pfn = max_low_pfn - 1;
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index bd23631..d935bd8 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
+
+ static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
+ {
+- return queue_var_show(q->limits.max_discard_sectors << 9, page);
++ return sprintf(page, "%llu\n",
++ (unsigned long long)q->limits.max_discard_sectors << 9);
+ }
+
+ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
+diff --git a/block/blk.h b/block/blk.h
+index 6126346..1566e8d 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -61,8 +61,28 @@ static inline struct request *__elv_next_request(struct request_queue *q)
+ rq = list_entry_rq(q->queue_head.next);
+ return rq;
+ }
+-
+- if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
++ /*
++ * Flush request is running and flush request isn't queueable
++ * in the drive, we can hold the queue till flush request is
++ * finished. Even we don't do this, driver can't dispatch next
++ * requests and will requeue them. And this can improve
++ * throughput too. For example, we have request flush1, write1,
++ * flush 2. flush1 is dispatched, then queue is hold, write1
++ * isn't inserted to queue. After flush1 is finished, flush2
++ * will be dispatched. Since disk cache is already clean,
++ * flush2 will be finished very soon, so looks like flush2 is
++ * folded to flush1.
++ * Since the queue is hold, a flag is set to indicate the queue
++ * should be restarted later. Please see flush_end_io() for
++ * details.
++ */
++ if (q->flush_pending_idx != q->flush_running_idx &&
++ !queue_flush_queueable(q)) {
++ q->flush_queue_delayed = 1;
++ return NULL;
++ }
++ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
++ !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 2dd9887..95822ae 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk)
+ {
+ struct disk_events *ev;
+
+- if (!disk->fops->check_events || !(disk->events | disk->async_events))
++ if (!disk->fops->check_events)
+ return;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e2f57e9e..d51f979 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
+ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ struct ata_device *dev)
+ {
++ struct request_queue *q = sdev->request_queue;
++
+ if (!ata_id_has_unload(dev->id))
+ dev->flags |= ATA_DFLAG_NO_UNLOAD;
+
+ /* configure max sectors */
+- blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors);
++ blk_queue_max_hw_sectors(q, dev->max_sectors);
+
+ if (dev->class == ATA_DEV_ATAPI) {
+- struct request_queue *q = sdev->request_queue;
+ void *buf;
+
+ sdev->sector_size = ATA_SECT_SIZE;
+
+ /* set DMA padding */
+- blk_queue_update_dma_pad(sdev->request_queue,
+- ATA_DMA_PAD_SZ - 1);
++ blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
+
+ /* configure draining */
+ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
+@@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+ sdev->sector_size);
+
+- blk_queue_update_dma_alignment(sdev->request_queue,
+- sdev->sector_size - 1);
++ blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
+
+ if (dev->flags & ATA_DFLAG_AN)
+ set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
+@@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
+ }
+
++ blk_queue_flush_queueable(q, false);
++
+ dev->sdev = sdev;
+ return 0;
+ }
+@@ -2138,7 +2139,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+ * with the unmap bit set.
+ */
+ if (ata_id_has_trim(args->id)) {
+- put_unaligned_be32(65535 * 512 / 8, &rbuf[20]);
++ put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
+ put_unaligned_be32(1, &rbuf[28]);
+ }
+
+diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
+index 905ff76..635a759 100644
+--- a/drivers/ata/pata_cmd64x.c
++++ b/drivers/ata/pata_cmd64x.c
+@@ -41,6 +41,9 @@
+ enum {
+ CFR = 0x50,
+ CFR_INTR_CH0 = 0x04,
++ CNTRL = 0x51,
++ CNTRL_CH0 = 0x04,
++ CNTRL_CH1 = 0x08,
+ CMDTIM = 0x52,
+ ARTTIM0 = 0x53,
+ DRWTIM0 = 0x54,
+@@ -328,9 +331,19 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ .port_ops = &cmd648_port_ops
+ }
+ };
+- const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
+- u8 mrdmode;
++ const struct ata_port_info *ppi[] = {
++ &cmd_info[id->driver_data],
++ &cmd_info[id->driver_data],
++ NULL
++ };
++ u8 mrdmode, reg;
+ int rc;
++ struct pci_dev *bridge = pdev->bus->self;
++ /* mobility split bridges don't report enabled ports correctly */
++ int port_ok = !(bridge && bridge->vendor ==
++ PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
++ /* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
++ int cntrl_ch0_ok = (id->driver_data != 0);
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+@@ -341,11 +354,18 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ if (pdev->device == PCI_DEVICE_ID_CMD_646) {
+ /* Does UDMA work ? */
+- if (pdev->revision > 4)
++ if (pdev->revision > 4) {
+ ppi[0] = &cmd_info[2];
++ ppi[1] = &cmd_info[2];
++ }
+ /* Early rev with other problems ? */
+- else if (pdev->revision == 1)
++ else if (pdev->revision == 1) {
+ ppi[0] = &cmd_info[3];
++ ppi[1] = &cmd_info[3];
++ }
++ /* revs 1,2 have no CNTRL_CH0 */
++ if (pdev->revision < 3)
++ cntrl_ch0_ok = 0;
+ }
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+@@ -354,6 +374,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ mrdmode |= 0x02; /* Memory read line enable */
+ pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
++ /* check for enabled ports */
++ pci_read_config_byte(pdev, CNTRL, &reg);
++ if (!port_ok)
++ dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
++ if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
++ dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
++ ppi[0] = &ata_dummy_port_info;
++
++ }
++ if (port_ok && !(reg & CNTRL_CH1)) {
++ dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
++ ppi[1] = &ata_dummy_port_info;
++ }
++
+ /* Force PIO 0 here.. */
+
+ /* PPC specific fixup copied from old driver */
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index b7f51e4..c94bc48 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -552,7 +552,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+ struct kobject *kobj;
+
+ mutex_lock(&brd_devices_mutex);
+- brd = brd_init_one(dev & MINORMASK);
++ brd = brd_init_one(MINOR(dev) >> part_shift);
+ kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+ mutex_unlock(&brd_devices_mutex);
+
+@@ -585,15 +585,18 @@ static int __init brd_init(void)
+ if (max_part > 0)
+ part_shift = fls(max_part);
+
++ if ((1UL << part_shift) > DISK_MAX_PARTS)
++ return -EINVAL;
++
+ if (rd_nr > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+
+ if (rd_nr) {
+ nr = rd_nr;
+- range = rd_nr;
++ range = rd_nr << part_shift;
+ } else {
+ nr = CONFIG_BLK_DEV_RAM_COUNT;
+- range = 1UL << (MINORBITS - part_shift);
++ range = 1UL << MINORBITS;
+ }
+
+ if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
+@@ -632,7 +635,7 @@ static void __exit brd_exit(void)
+ unsigned long range;
+ struct brd_device *brd, *next;
+
+- range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
++ range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
+
+ list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
+ brd_del_one(brd);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index a076a14..c59a672 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1658,7 +1658,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ struct kobject *kobj;
+
+ mutex_lock(&loop_devices_mutex);
+- lo = loop_init_one(dev & MINORMASK);
++ lo = loop_init_one(MINOR(dev) >> part_shift);
+ kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
+ mutex_unlock(&loop_devices_mutex);
+
+@@ -1691,15 +1691,18 @@ static int __init loop_init(void)
+ if (max_part > 0)
+ part_shift = fls(max_part);
+
++ if ((1UL << part_shift) > DISK_MAX_PARTS)
++ return -EINVAL;
++
+ if (max_loop > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+
+ if (max_loop) {
+ nr = max_loop;
+- range = max_loop;
++ range = max_loop << part_shift;
+ } else {
+ nr = 8;
+- range = 1UL << (MINORBITS - part_shift);
++ range = 1UL << MINORBITS;
+ }
+
+ if (register_blkdev(LOOP_MAJOR, "loop"))
+@@ -1738,7 +1741,7 @@ static void __exit loop_exit(void)
+ unsigned long range;
+ struct loop_device *lo, *next;
+
+- range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift);
++ range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+
+ list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
+ loop_del_one(lo);
+diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
+index 8690e31..46b8136 100644
+--- a/drivers/block/paride/pcd.c
++++ b/drivers/block/paride/pcd.c
+@@ -320,6 +320,7 @@ static void pcd_init_units(void)
+ disk->first_minor = unit;
+ strcpy(disk->disk_name, cd->name); /* umm... */
+ disk->fops = &pcd_bdops;
++ disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ }
+ }
+
+diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
+index e427fbe..7878da8 100644
+--- a/drivers/cdrom/viocd.c
++++ b/drivers/cdrom/viocd.c
+@@ -625,7 +625,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ blk_queue_max_hw_sectors(q, 4096 / 512);
+ gendisk->queue = q;
+ gendisk->fops = &viocd_fops;
+- gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
++ gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
++ GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ set_capacity(gendisk, 0);
+ gendisk->private_data = d;
+ d->viocd_disk = gendisk;
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index d72433f..ee01716 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -139,8 +139,8 @@ static int i8k_smm(struct smm_regs *regs)
+ "movl %%edi,20(%%rax)\n\t"
+ "popq %%rdx\n\t"
+ "movl %%edx,0(%%rax)\n\t"
+- "lahf\n\t"
+- "shrl $8,%%eax\n\t"
++ "pushfq\n\t"
++ "popq %%rax\n\t"
+ "andl $1,%%eax\n"
+ :"=a"(rc)
+ : "a"(regs)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 2dafc5c..7c10f96 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1208,12 +1208,28 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+ cpufreq_driver->exit(data);
+ unlock_policy_rwsem_write(cpu);
+
++ cpufreq_debug_enable_ratelimit();
++
++#ifdef CONFIG_HOTPLUG_CPU
++ /* when the CPU which is the parent of the kobj is hotplugged
++ * offline, check for siblings, and create cpufreq sysfs interface
++ * and symlinks
++ */
++ if (unlikely(cpumask_weight(data->cpus) > 1)) {
++ /* first sibling now owns the new sysfs dir */
++ cpumask_clear_cpu(cpu, data->cpus);
++ cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
++
++ /* finally remove our own symlink */
++ lock_policy_rwsem_write(cpu);
++ __cpufreq_remove_dev(sys_dev);
++ }
++#endif
++
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
+ kfree(data);
+- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+
+- cpufreq_debug_enable_ratelimit();
+ return 0;
+ }
+
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 00d73fc..4f1b8de 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+ return -1;
+ }
+
++/* should be called late in the CPU removal sequence so that the stats
++ * memory is still available in case someone tries to use it.
++ */
+ static void cpufreq_stats_free_table(unsigned int cpu)
+ {
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+- if (policy && policy->cpu == cpu)
+- sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ if (stat) {
+ kfree(stat->time_in_state);
+ kfree(stat);
+ }
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
++}
++
++/* must be called early in the CPU removal sequence (before
++ * cpufreq_remove_dev) so that policy is still valid.
++ */
++static void cpufreq_stats_free_sysfs(unsigned int cpu)
++{
++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++ if (policy && policy->cpu == cpu)
++ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ if (policy)
+ cpufreq_cpu_put(policy);
+ }
+@@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ case CPU_ONLINE_FROZEN:
+ cpufreq_update_policy(cpu);
+ break;
++ case CPU_DOWN_PREPARE:
++ cpufreq_stats_free_sysfs(cpu);
++ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpufreq_stats_free_table(cpu);
+@@ -324,9 +337,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
++/* priority=1 so this will get called before cpufreq_remove_dev */
+ static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
+ {
+ .notifier_call = cpufreq_stat_cpu_callback,
++ .priority = 1,
+ };
+
+ static struct notifier_block notifier_policy_block = {
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index f508690..c47f3d0 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
+ unsigned int power_usage = -1;
+ int i;
+ int multiplier;
++ struct timespec t;
+
+ if (data->needs_update) {
+ menu_update(dev);
+@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
+ return 0;
+
+ /* determine the expected residency time, round up */
++ t = ktime_to_timespec(tick_nohz_get_sleep_length());
+ data->expected_us =
+- DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
++ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
+
+
+ data->bucket = which_bucket(data->expected_us);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index e9e6f71..c4504a2 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -666,12 +666,37 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+ static bool
+ bsd_ring_get_irq(struct intel_ring_buffer *ring)
+ {
+- return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
++ struct drm_device *dev = ring->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ if (!dev->irq_enabled)
++ return false;
++
++ spin_lock(&ring->irq_lock);
++ if (ring->irq_refcount++ == 0) {
++ if (IS_G4X(dev))
++ i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
++ else
++ ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
++ }
++ spin_unlock(&ring->irq_lock);
++
++ return true;
+ }
+ static void
+ bsd_ring_put_irq(struct intel_ring_buffer *ring)
+ {
+- ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
++ struct drm_device *dev = ring->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
++
++ spin_lock(&ring->irq_lock);
++ if (--ring->irq_refcount == 0) {
++ if (IS_G4X(dev))
++ i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
++ else
++ ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
++ }
++ spin_unlock(&ring->irq_lock);
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 9073e3b..296e6ec 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1578,7 +1578,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+- u32 hdp_host_path_cntl;
++ u32 hdp_host_path_cntl, tmp;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+@@ -2141,6 +2141,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+ WREG32(i, 0);
+
++ tmp = RREG32(HDP_MISC_CNTL);
++ tmp |= HDP_FLUSH_INVALIDATE_CACHE;
++ WREG32(HDP_MISC_CNTL, tmp);
++
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index fc40e0c..f37e91e 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -64,6 +64,8 @@
+ #define GB_BACKEND_MAP 0x98FC
+ #define DMIF_ADDR_CONFIG 0xBD4
+ #define HDP_ADDR_CONFIG 0x2F48
++#define HDP_MISC_CNTL 0x2F4C
++#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+
+ #define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+ #define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 3d8a763..b205ba1 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -417,7 +417,7 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ num_shader_engines = 1;
+ if (num_shader_engines > rdev->config.cayman.max_shader_engines)
+ num_shader_engines = rdev->config.cayman.max_shader_engines;
+- if (num_backends_per_asic > num_shader_engines)
++ if (num_backends_per_asic < num_shader_engines)
+ num_backends_per_asic = num_shader_engines;
+ if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
+ num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
+@@ -829,7 +829,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ rdev->config.cayman.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |=
+- (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
++ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+@@ -931,6 +931,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
++ tmp = RREG32(HDP_MISC_CNTL);
++ tmp |= HDP_FLUSH_INVALIDATE_CACHE;
++ WREG32(HDP_MISC_CNTL, tmp);
++
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
+index 0f9a08b..b2088c1 100644
+--- a/drivers/gpu/drm/radeon/nid.h
++++ b/drivers/gpu/drm/radeon/nid.h
+@@ -136,6 +136,8 @@
+ #define HDP_NONSURFACE_INFO 0x2C08
+ #define HDP_NONSURFACE_SIZE 0x2C0C
+ #define HDP_ADDR_CONFIG 0x2F48
++#define HDP_MISC_CNTL 0x2F4C
++#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+
+ #define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+ #define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index ca57619..d948265 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -782,6 +782,7 @@ static struct radeon_asic evergreen_asic = {
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
++ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+@@ -828,6 +829,7 @@ static struct radeon_asic sumo_asic = {
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
++ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+@@ -874,6 +876,7 @@ static struct radeon_asic btc_asic = {
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
++ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+@@ -920,6 +923,7 @@ static struct radeon_asic cayman_asic = {
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
++ .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index 0ec91c1..a5eda4c 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev,
+ }
+ report->size = 6;
+
++ /*
++ * The device reponds with 'invalid report id' when feature
++ * report switching it into multitouch mode is sent to it.
++ *
++ * This results in -EIO from the _raw low-level transport callback,
++ * but there seems to be no other way of switching the mode.
++ * Thus the super-ugly hacky success check below.
++ */
+ ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
+ HID_FEATURE_REPORT);
+- if (ret != sizeof(feature)) {
++ if (ret != -EIO) {
+ hid_err(hdev, "unable to request touch data (%d)\n", ret);
+ goto err_stop_hw;
+ }
+diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
+index 196ffaf..7df490e 100644
+--- a/drivers/hwmon/pmbus_core.c
++++ b/drivers/hwmon/pmbus_core.c
+@@ -700,6 +700,7 @@ do { \
+ struct sensor_device_attribute *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
++ sysfs_attr_init(&a->dev_attr.attr); \
+ a->dev_attr.attr.name = _name; \
+ a->dev_attr.attr.mode = _mode; \
+ a->dev_attr.show = _show; \
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index b4ab39b..5f1b92c 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -330,6 +330,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+
++ if (!i2c_dev->is_dvc) {
++ u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
++ i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
++ }
++
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index a5ec5a7..9560822 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -1773,7 +1773,8 @@ static int ide_cd_probe(ide_drive_t *drive)
+
+ g->minors = 1;
+ g->driverfs_dev = &drive->gendev;
+- g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
++ g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
++ GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ if (ide_cdrom_setup(drive)) {
+ put_device(&info->dev);
+ goto failed;
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 5c93627..70bd738 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -493,11 +493,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
+ spin_unlock_irqrestore(&bitmap->lock, flags);
+ sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+ sb->events = cpu_to_le64(bitmap->mddev->events);
+- if (bitmap->mddev->events < bitmap->events_cleared) {
++ if (bitmap->mddev->events < bitmap->events_cleared)
+ /* rocking back to read-only */
+ bitmap->events_cleared = bitmap->mddev->events;
+- sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
+- }
++ sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
++ sb->state = cpu_to_le32(bitmap->flags);
+ /* Just in case these have been changed via sysfs: */
+ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
+ sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
+@@ -618,7 +618,7 @@ success:
+ if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
+ bitmap->flags |= BITMAP_HOSTENDIAN;
+ bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
+- if (sb->state & cpu_to_le32(BITMAP_STALE))
++ if (bitmap->flags & BITMAP_STALE)
+ bitmap->events_cleared = bitmap->mddev->events;
+ err = 0;
+ out:
+@@ -652,9 +652,11 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
+ switch (op) {
+ case MASK_SET:
+ sb->state |= cpu_to_le32(bits);
++ bitmap->flags |= bits;
+ break;
+ case MASK_UNSET:
+ sb->state &= cpu_to_le32(~bits);
++ bitmap->flags &= ~bits;
+ break;
+ default:
+ BUG();
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index a550a05..aa4e570 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
+ if (!error && !clone->errors)
+ return 0; /* I/O complete */
+
+- if (error == -EOPNOTSUPP || error == -EREMOTEIO)
++ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
+ return error;
+
+ if (mpio->pgpath)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index cb8380c..53e603b 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
+ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+ {
++ struct request_queue *q;
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ sector_t dev_size =
+@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ limits->logical_block_size >> SECTOR_SHIFT;
+ char b[BDEVNAME_SIZE];
+
++ /*
++ * Some devices exist without request functions,
++ * such as loop devices not yet bound to backing files.
++ * Forbid the use of such devices.
++ */
++ q = bdev_get_queue(bdev);
++ if (!q || !q->make_request_fn) {
++ DMWARN("%s: %s is not yet initialised: "
++ "start=%llu, len=%llu, dev_size=%llu",
++ dm_device_name(ti->table->md), bdevname(bdev, b),
++ (unsigned long long)start,
++ (unsigned long long)len,
++ (unsigned long long)dev_size);
++ return 1;
++ }
++
+ if (!dev_size)
+ return 0;
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7d6f7f1..4a4c0f8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4347,13 +4347,19 @@ static int md_alloc(dev_t dev, char *name)
+ disk->fops = &md_fops;
+ disk->private_data = mddev;
+ disk->queue = mddev->queue;
++ blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
+ /* Allow extended partitions. This makes the
+ * 'mdp' device redundant, but we can't really
+ * remove it now.
+ */
+ disk->flags |= GENHD_FL_EXT_DEVT;
+- add_disk(disk);
+ mddev->gendisk = disk;
++ /* As soon as we call add_disk(), another thread could get
++ * through to md_open, so make sure it doesn't get too far
++ */
++ mutex_lock(&mddev->open_mutex);
++ add_disk(disk);
++
+ error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "md");
+ if (error) {
+@@ -4367,8 +4373,7 @@ static int md_alloc(dev_t dev, char *name)
+ if (mddev->kobj.sd &&
+ sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ printk(KERN_DEBUG "pointless warning\n");
+-
+- blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
++ mutex_unlock(&mddev->open_mutex);
+ abort:
+ mutex_unlock(&disks_mutex);
+ if (!error && mddev->kobj.sd) {
+diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
+index d4e466a..1d47d4d 100644
+--- a/drivers/media/dvb/frontends/dib0070.c
++++ b/drivers/media/dvb/frontends/dib0070.c
+@@ -73,27 +73,47 @@ struct dib0070_state {
+
+ u8 wbd_gain_current;
+ u16 wbd_offset_3_3[2];
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[3];
++ u8 i2c_read_buffer[2];
+ };
+
+ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
+ {
+- u8 b[2];
+- struct i2c_msg msg[2] = {
+- { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
+- { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2 },
+- };
+- if (i2c_transfer(state->i2c, msg, 2) != 2) {
++ state->i2c_write_buffer[0] = reg;
++
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->cfg->i2c_address;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 1;
++ state->msg[1].addr = state->cfg->i2c_address;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = state->i2c_read_buffer;
++ state->msg[1].len = 2;
++
++ if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
+ printk(KERN_WARNING "DiB0070 I2C read failed\n");
+ return 0;
+ }
+- return (b[0] << 8) | b[1];
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
+ {
+- u8 b[3] = { reg, val >> 8, val & 0xff };
+- struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = b, .len = 3 };
+- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
++ state->i2c_write_buffer[0] = reg;
++ state->i2c_write_buffer[1] = val >> 8;
++ state->i2c_write_buffer[2] = val & 0xff;
++
++ memset(state->msg, 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->cfg->i2c_address;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 3;
++
++ if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0070 I2C write failed\n");
+ return -EREMOTEIO;
+ }
+diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
+index 52ff1a2..c9c935a 100644
+--- a/drivers/media/dvb/frontends/dib0090.c
++++ b/drivers/media/dvb/frontends/dib0090.c
+@@ -191,6 +191,11 @@ struct dib0090_state {
+ u8 wbd_calibration_gain;
+ const struct dib0090_wbd_slope *current_wbd_table;
+ u16 wbdmux;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[3];
++ u8 i2c_read_buffer[2];
+ };
+
+ struct dib0090_fw_state {
+@@ -198,27 +203,48 @@ struct dib0090_fw_state {
+ struct dvb_frontend *fe;
+ struct dib0090_identity identity;
+ const struct dib0090_config *config;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg;
++ u8 i2c_write_buffer[2];
++ u8 i2c_read_buffer[2];
+ };
+
+ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
+ {
+- u8 b[2];
+- struct i2c_msg msg[2] = {
+- {.addr = state->config->i2c_address, .flags = 0, .buf = &reg, .len = 1},
+- {.addr = state->config->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2},
+- };
+- if (i2c_transfer(state->i2c, msg, 2) != 2) {
++ state->i2c_write_buffer[0] = reg;
++
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->config->i2c_address;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 1;
++ state->msg[1].addr = state->config->i2c_address;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = state->i2c_read_buffer;
++ state->msg[1].len = 2;
++
++ if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
+ printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ return 0;
+ }
+- return (b[0] << 8) | b[1];
++
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
+ {
+- u8 b[3] = { reg & 0xff, val >> 8, val & 0xff };
+- struct i2c_msg msg = {.addr = state->config->i2c_address, .flags = 0, .buf = b, .len = 3 };
+- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
++ state->i2c_write_buffer[0] = reg & 0xff;
++ state->i2c_write_buffer[1] = val >> 8;
++ state->i2c_write_buffer[2] = val & 0xff;
++
++ memset(state->msg, 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->config->i2c_address;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 3;
++
++ if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ return -EREMOTEIO;
+ }
+@@ -227,20 +253,31 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
+
+ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
+ {
+- u8 b[2];
+- struct i2c_msg msg = {.addr = reg, .flags = I2C_M_RD, .buf = b, .len = 2 };
+- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
++ state->i2c_write_buffer[0] = reg;
++
++ memset(&state->msg, 0, sizeof(struct i2c_msg));
++ state->msg.addr = reg;
++ state->msg.flags = I2C_M_RD;
++ state->msg.buf = state->i2c_read_buffer;
++ state->msg.len = 2;
++ if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ return 0;
+ }
+- return (b[0] << 8) | b[1];
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
+ {
+- u8 b[2] = { val >> 8, val & 0xff };
+- struct i2c_msg msg = {.addr = reg, .flags = 0, .buf = b, .len = 2 };
+- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
++ state->i2c_write_buffer[0] = val >> 8;
++ state->i2c_write_buffer[1] = val & 0xff;
++
++ memset(&state->msg, 0, sizeof(struct i2c_msg));
++ state->msg.addr = reg;
++ state->msg.flags = 0;
++ state->msg.buf = state->i2c_write_buffer;
++ state->msg.len = 2;
++ if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ return -EREMOTEIO;
+ }
+diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
+index 289a798..79cb1c2 100644
+--- a/drivers/media/dvb/frontends/dib7000m.c
++++ b/drivers/media/dvb/frontends/dib7000m.c
+@@ -50,6 +50,11 @@ struct dib7000m_state {
+ u16 revision;
+
+ u8 agc_state;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[4];
++ u8 i2c_read_buffer[2];
+ };
+
+ enum dib7000m_power_mode {
+@@ -64,29 +69,39 @@ enum dib7000m_power_mode {
+
+ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
+ {
+- u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
+- u8 rb[2];
+- struct i2c_msg msg[2] = {
+- { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
+- { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+- };
+-
+- if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
++ state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
++ state->i2c_write_buffer[1] = reg & 0xff;
++
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 2;
++ state->msg[1].addr = state->i2c_addr >> 1;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = state->i2c_read_buffer;
++ state->msg[1].len = 2;
++
++ if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
+ dprintk("i2c read error on %d",reg);
+
+- return (rb[0] << 8) | rb[1];
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
+ {
+- u8 b[4] = {
+- (reg >> 8) & 0xff, reg & 0xff,
+- (val >> 8) & 0xff, val & 0xff,
+- };
+- struct i2c_msg msg = {
+- .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+- };
+- return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++ state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ state->i2c_write_buffer[1] = reg & 0xff;
++ state->i2c_write_buffer[2] = (val >> 8) & 0xff;
++ state->i2c_write_buffer[3] = val & 0xff;
++
++ memset(&state->msg[0], 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 4;
++
++ return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ }
+ static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
+ {
+diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
+index 900af60..0c9f40c 100644
+--- a/drivers/media/dvb/frontends/dib7000p.c
++++ b/drivers/media/dvb/frontends/dib7000p.c
+@@ -63,6 +63,11 @@ struct dib7000p_state {
+
+ u16 tuner_enable;
+ struct i2c_adapter dib7090_tuner_adap;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[4];
++ u8 i2c_read_buffer[2];
+ };
+
+ enum dib7000p_power_mode {
+@@ -76,29 +81,39 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff);
+
+ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
+ {
+- u8 wb[2] = { reg >> 8, reg & 0xff };
+- u8 rb[2];
+- struct i2c_msg msg[2] = {
+- {.addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+- {.addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
+- };
++ state->i2c_write_buffer[0] = reg >> 8;
++ state->i2c_write_buffer[1] = reg & 0xff;
++
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 2;
++ state->msg[1].addr = state->i2c_addr >> 1;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = state->i2c_read_buffer;
++ state->msg[1].len = 2;
+
+- if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
++ if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
+ dprintk("i2c read error on %d", reg);
+
+- return (rb[0] << 8) | rb[1];
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
+ {
+- u8 b[4] = {
+- (reg >> 8) & 0xff, reg & 0xff,
+- (val >> 8) & 0xff, val & 0xff,
+- };
+- struct i2c_msg msg = {
+- .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+- };
+- return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++ state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ state->i2c_write_buffer[1] = reg & 0xff;
++ state->i2c_write_buffer[2] = (val >> 8) & 0xff;
++ state->i2c_write_buffer[3] = val & 0xff;
++
++ memset(&state->msg[0], 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 4;
++
++ return i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ }
+
+ static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf)
+@@ -1550,11 +1565,24 @@ static void dib7000p_release(struct dvb_frontend *demod)
+
+ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
+ {
+- u8 tx[2], rx[2];
++ u8 *tx, *rx;
+ struct i2c_msg msg[2] = {
+- {.addr = 18 >> 1, .flags = 0, .buf = tx, .len = 2},
+- {.addr = 18 >> 1, .flags = I2C_M_RD, .buf = rx, .len = 2},
++ {.addr = 18 >> 1, .flags = 0, .len = 2},
++ {.addr = 18 >> 1, .flags = I2C_M_RD, .len = 2},
+ };
++ int ret = 0;
++
++ tx = kzalloc(2*sizeof(u8), GFP_KERNEL);
++ if (!tx)
++ return -ENOMEM;
++ rx = kzalloc(2*sizeof(u8), GFP_KERNEL);
++ if (!rx) {
++ goto rx_memory_error;
++ ret = -ENOMEM;
++ }
++
++ msg[0].buf = tx;
++ msg[1].buf = rx;
+
+ tx[0] = 0x03;
+ tx[1] = 0x00;
+@@ -1574,7 +1602,11 @@ int dib7000pc_detection(struct i2c_adapter *i2c_adap)
+ }
+
+ dprintk("-D- DiB7000PC not detected");
+- return 0;
++
++ kfree(rx);
++rx_memory_error:
++ kfree(tx);
++ return ret;
+ }
+ EXPORT_SYMBOL(dib7000pc_detection);
+
+diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
+index c1c3e26..7d2ea11 100644
+--- a/drivers/media/dvb/frontends/dib8000.c
++++ b/drivers/media/dvb/frontends/dib8000.c
+@@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
+ struct i2c_device {
+ struct i2c_adapter *adap;
+ u8 addr;
++ u8 *i2c_write_buffer;
++ u8 *i2c_read_buffer;
+ };
+
+ struct dib8000_state {
+@@ -70,6 +72,11 @@ struct dib8000_state {
+ u32 status;
+
+ struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[4];
++ u8 i2c_read_buffer[2];
+ };
+
+ enum dib8000_power_mode {
+@@ -79,22 +86,41 @@ enum dib8000_power_mode {
+
+ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
+ {
+- u8 wb[2] = { reg >> 8, reg & 0xff };
+- u8 rb[2];
+ struct i2c_msg msg[2] = {
+- {.addr = i2c->addr >> 1,.flags = 0,.buf = wb,.len = 2},
+- {.addr = i2c->addr >> 1,.flags = I2C_M_RD,.buf = rb,.len = 2},
++ {.addr = i2c->addr >> 1, .flags = 0,
++ .buf = i2c->i2c_write_buffer, .len = 2},
++ {.addr = i2c->addr >> 1, .flags = I2C_M_RD,
++ .buf = i2c->i2c_read_buffer, .len = 2},
+ };
+
++ msg[0].buf[0] = reg >> 8;
++ msg[0].buf[1] = reg & 0xff;
++
+ if (i2c_transfer(i2c->adap, msg, 2) != 2)
+ dprintk("i2c read error on %d", reg);
+
+- return (rb[0] << 8) | rb[1];
++ return (msg[1].buf[0] << 8) | msg[1].buf[1];
+ }
+
+ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+ {
+- return dib8000_i2c_read16(&state->i2c, reg);
++ state->i2c_write_buffer[0] = reg >> 8;
++ state->i2c_write_buffer[1] = reg & 0xff;
++
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c.addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 2;
++ state->msg[1].addr = state->i2c.addr >> 1;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = state->i2c_read_buffer;
++ state->msg[1].len = 2;
++
++ if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
++ dprintk("i2c read error on %d", reg);
++
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
+@@ -109,19 +135,34 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
+
+ static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
+ {
+- u8 b[4] = {
+- (reg >> 8) & 0xff, reg & 0xff,
+- (val >> 8) & 0xff, val & 0xff,
+- };
+- struct i2c_msg msg = {
+- .addr = i2c->addr >> 1,.flags = 0,.buf = b,.len = 4
+- };
+- return i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++ struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0,
++ .buf = i2c->i2c_write_buffer, .len = 4};
++ int ret = 0;
++
++ msg.buf[0] = (reg >> 8) & 0xff;
++ msg.buf[1] = reg & 0xff;
++ msg.buf[2] = (val >> 8) & 0xff;
++ msg.buf[3] = val & 0xff;
++
++ ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++
++ return ret;
+ }
+
+ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
+ {
+- return dib8000_i2c_write16(&state->i2c, reg, val);
++ state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ state->i2c_write_buffer[1] = reg & 0xff;
++ state->i2c_write_buffer[2] = (val >> 8) & 0xff;
++ state->i2c_write_buffer[3] = val & 0xff;
++
++ memset(&state->msg[0], 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c.addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 4;
++
++ return i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+ }
+
+ static const s16 coeff_2k_sb_1seg_dqpsk[8] = {
+@@ -980,30 +1021,31 @@ static void dib8000_update_timf(struct dib8000_state *state)
+ dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
+ }
+
++static const u16 adc_target_16dB[11] = {
++ (1 << 13) - 825 - 117,
++ (1 << 13) - 837 - 117,
++ (1 << 13) - 811 - 117,
++ (1 << 13) - 766 - 117,
++ (1 << 13) - 737 - 117,
++ (1 << 13) - 693 - 117,
++ (1 << 13) - 648 - 117,
++ (1 << 13) - 619 - 117,
++ (1 << 13) - 575 - 117,
++ (1 << 13) - 531 - 117,
++ (1 << 13) - 501 - 117
++};
++static const u8 permu_seg[] = { 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12 };
++
+ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosearching)
+ {
+ u16 mode, max_constellation, seg_diff_mask = 0, nbseg_diff = 0;
+ u8 guard, crate, constellation, timeI;
+- u8 permu_seg[] = { 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12 };
+ u16 i, coeff[4], P_cfr_left_edge = 0, P_cfr_right_edge = 0, seg_mask13 = 0x1fff; // All 13 segments enabled
+ const s16 *ncoeff = NULL, *ana_fe;
+ u16 tmcc_pow = 0;
+ u16 coff_pow = 0x2800;
+ u16 init_prbs = 0xfff;
+ u16 ana_gain = 0;
+- u16 adc_target_16dB[11] = {
+- (1 << 13) - 825 - 117,
+- (1 << 13) - 837 - 117,
+- (1 << 13) - 811 - 117,
+- (1 << 13) - 766 - 117,
+- (1 << 13) - 737 - 117,
+- (1 << 13) - 693 - 117,
+- (1 << 13) - 648 - 117,
+- (1 << 13) - 619 - 117,
+- (1 << 13) - 575 - 117,
+- (1 << 13) - 531 - 117,
+- (1 << 13) - 501 - 117
+- };
+
+ if (state->ber_monitored_layer != LAYER_ALL)
+ dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & 0x60) | state->ber_monitored_layer);
+@@ -2379,10 +2421,22 @@ EXPORT_SYMBOL(dib8000_get_slave_frontend);
+
+ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
+ {
+- int k = 0;
++ int k = 0, ret = 0;
+ u8 new_addr = 0;
+ struct i2c_device client = {.adap = host };
+
++ client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
++ if (!client.i2c_write_buffer) {
++ dprintk("%s: not enough memory", __func__);
++ return -ENOMEM;
++ }
++ client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
++ if (!client.i2c_read_buffer) {
++ dprintk("%s: not enough memory", __func__);
++ ret = -ENOMEM;
++ goto error_memory;
++ }
++
+ for (k = no_of_demods - 1; k >= 0; k--) {
+ /* designated i2c address */
+ new_addr = first_addr + (k << 1);
+@@ -2394,7 +2448,8 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
+ client.addr = default_addr;
+ if (dib8000_identify(&client) == 0) {
+ dprintk("#%d: not identified", k);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto error;
+ }
+ }
+
+@@ -2420,7 +2475,12 @@ int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 defau
+ dib8000_i2c_write16(&client, 1286, 0);
+ }
+
+- return 0;
++error:
++ kfree(client.i2c_read_buffer);
++error_memory:
++ kfree(client.i2c_write_buffer);
++
++ return ret;
+ }
+
+ EXPORT_SYMBOL(dib8000_i2c_enumeration);
+@@ -2519,6 +2579,8 @@ struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, s
+ memcpy(&state->cfg, cfg, sizeof(struct dib8000_config));
+ state->i2c.adap = i2c_adap;
+ state->i2c.addr = i2c_addr;
++ state->i2c.i2c_write_buffer = state->i2c_write_buffer;
++ state->i2c.i2c_read_buffer = state->i2c_read_buffer;
+ state->gpio_val = cfg->gpio_val;
+ state->gpio_dir = cfg->gpio_dir;
+
+diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
+index 9151876..451ffa2 100644
+--- a/drivers/media/dvb/frontends/dib9000.c
++++ b/drivers/media/dvb/frontends/dib9000.c
+@@ -27,6 +27,8 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
+ struct i2c_device {
+ struct i2c_adapter *i2c_adap;
+ u8 i2c_addr;
++ u8 *i2c_read_buffer;
++ u8 *i2c_write_buffer;
+ };
+
+ /* lock */
+@@ -92,11 +94,16 @@ struct dib9000_state {
+
+ struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
+ u16 component_bus_speed;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[2];
++ u8 i2c_write_buffer[255];
++ u8 i2c_read_buffer[255];
+ };
+
+-u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+- 0, 0, 0
++ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ enum dib9000_power_mode {
+@@ -217,25 +224,33 @@ static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32
+ u32 chunk_size = 126;
+ u32 l;
+ int ret;
+- u8 wb[2] = { reg >> 8, reg & 0xff };
+- struct i2c_msg msg[2] = {
+- {.addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+- {.addr = state->i2c.i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = len},
+- };
+
+ if (state->platform.risc.fw_is_running && (reg < 1024))
+ return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len);
+
++ memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c.i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = 2;
++ state->msg[1].addr = state->i2c.i2c_addr >> 1;
++ state->msg[1].flags = I2C_M_RD;
++ state->msg[1].buf = b;
++ state->msg[1].len = len;
++
++ state->i2c_write_buffer[0] = reg >> 8;
++ state->i2c_write_buffer[1] = reg & 0xff;
++
+ if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
+- wb[0] |= (1 << 5);
++ state->i2c_write_buffer[0] |= (1 << 5);
+ if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+- wb[0] |= (1 << 4);
++ state->i2c_write_buffer[0] |= (1 << 4);
+
+ do {
+ l = len < chunk_size ? len : chunk_size;
+- msg[1].len = l;
+- msg[1].buf = b;
+- ret = i2c_transfer(state->i2c.i2c_adap, msg, 2) != 2 ? -EREMOTEIO : 0;
++ state->msg[1].len = l;
++ state->msg[1].buf = b;
++ ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
+ if (ret != 0) {
+ dprintk("i2c read error on %d", reg);
+ return -EREMOTEIO;
+@@ -253,50 +268,47 @@ static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32
+
+ static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
+ {
+- u8 b[2];
+- u8 wb[2] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[2] = {
+- {.addr = i2c->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2},
+- {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD, .buf = b, .len = 2},
++ {.addr = i2c->i2c_addr >> 1, .flags = 0,
++ .buf = i2c->i2c_write_buffer, .len = 2},
++ {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD,
++ .buf = i2c->i2c_read_buffer, .len = 2},
+ };
+
++ i2c->i2c_write_buffer[0] = reg >> 8;
++ i2c->i2c_write_buffer[1] = reg & 0xff;
++
+ if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
+ dprintk("read register %x error", reg);
+ return 0;
+ }
+
+- return (b[0] << 8) | b[1];
++ return (i2c->i2c_read_buffer[0] << 8) | i2c->i2c_read_buffer[1];
+ }
+
+ static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg)
+ {
+- u8 b[2];
+- if (dib9000_read16_attr(state, reg, b, 2, 0) != 0)
++ if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, 0) != 0)
+ return 0;
+- return (b[0] << 8 | b[1]);
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute)
+ {
+- u8 b[2];
+- if (dib9000_read16_attr(state, reg, b, 2, attribute) != 0)
++ if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2,
++ attribute) != 0)
+ return 0;
+- return (b[0] << 8 | b[1]);
++ return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+ }
+
+ #define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+
+ static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
+ {
+- u8 b[255];
+ u32 chunk_size = 126;
+ u32 l;
+ int ret;
+
+- struct i2c_msg msg = {
+- .addr = state->i2c.i2c_addr >> 1, .flags = 0, .buf = b, .len = len + 2
+- };
+-
+ if (state->platform.risc.fw_is_running && (reg < 1024)) {
+ if (dib9000_risc_apb_access_write
+ (state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0)
+@@ -304,20 +316,26 @@ static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *
+ return 0;
+ }
+
+- b[0] = (reg >> 8) & 0xff;
+- b[1] = (reg) & 0xff;
++ memset(&state->msg[0], 0, sizeof(struct i2c_msg));
++ state->msg[0].addr = state->i2c.i2c_addr >> 1;
++ state->msg[0].flags = 0;
++ state->msg[0].buf = state->i2c_write_buffer;
++ state->msg[0].len = len + 2;
++
++ state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ state->i2c_write_buffer[1] = (reg) & 0xff;
+
+ if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
+- b[0] |= (1 << 5);
++ state->i2c_write_buffer[0] |= (1 << 5);
+ if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
+- b[0] |= (1 << 4);
++ state->i2c_write_buffer[0] |= (1 << 4);
+
+ do {
+ l = len < chunk_size ? len : chunk_size;
+- msg.len = l + 2;
+- memcpy(&b[2], buf, l);
++ state->msg[0].len = l + 2;
++ memcpy(&state->i2c_write_buffer[2], buf, l);
+
+- ret = i2c_transfer(state->i2c.i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++ ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
+
+ buf += l;
+ len -= l;
+@@ -331,11 +349,16 @@ static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *
+
+ static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
+ {
+- u8 b[4] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff };
+ struct i2c_msg msg = {
+- .addr = i2c->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
++ .addr = i2c->i2c_addr >> 1, .flags = 0,
++ .buf = i2c->i2c_write_buffer, .len = 4
+ };
+
++ i2c->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ i2c->i2c_write_buffer[1] = reg & 0xff;
++ i2c->i2c_write_buffer[2] = (val >> 8) & 0xff;
++ i2c->i2c_write_buffer[3] = val & 0xff;
++
+ return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ }
+
+@@ -1015,8 +1038,8 @@ static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i)
+ return 0;
+ dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i);
+ do {
+- dib9000_risc_mem_read(state, FE_MM_RW_SYNC, &i, 1);
+- } while (i && index_loop--);
++ dib9000_risc_mem_read(state, FE_MM_RW_SYNC, state->i2c_read_buffer, 1);
++ } while (state->i2c_read_buffer[0] && index_loop--);
+
+ if (index_loop > 0)
+ return 0;
+@@ -1139,7 +1162,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+
+ s8 intlv_native;
+ };
+- struct dibDVBTChannel ch;
++ struct dibDVBTChannel *ch;
+ int ret = 0;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+@@ -1148,9 +1171,12 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ ret = -EIO;
+ }
+
+- dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION, (u8 *) &ch, sizeof(struct dibDVBTChannel));
++ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION,
++ state->i2c_read_buffer, sizeof(struct dibDVBTChannel));
++ ch = (struct dibDVBTChannel *)state->i2c_read_buffer;
++
+
+- switch (ch.spectrum_inversion & 0x7) {
++ switch (ch->spectrum_inversion & 0x7) {
+ case 1:
+ state->fe[0]->dtv_property_cache.inversion = INVERSION_ON;
+ break;
+@@ -1162,7 +1188,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO;
+ break;
+ }
+- switch (ch.nfft) {
++ switch (ch->nfft) {
+ case 0:
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+@@ -1177,7 +1203,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
+ break;
+ }
+- switch (ch.guard) {
++ switch (ch->guard) {
+ case 0:
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
+ break;
+@@ -1195,7 +1221,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
+ break;
+ }
+- switch (ch.constellation) {
++ switch (ch->constellation) {
+ case 2:
+ state->fe[0]->dtv_property_cache.modulation = QAM_64;
+ break;
+@@ -1210,7 +1236,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.modulation = QAM_AUTO;
+ break;
+ }
+- switch (ch.hrch) {
++ switch (ch->hrch) {
+ case 0:
+ state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE;
+ break;
+@@ -1222,7 +1248,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO;
+ break;
+ }
+- switch (ch.code_rate_hp) {
++ switch (ch->code_rate_hp) {
+ case 1:
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2;
+ break;
+@@ -1243,7 +1269,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_p
+ state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO;
+ break;
+ }
+- switch (ch.code_rate_lp) {
++ switch (ch->code_rate_lp) {
+ case 1:
+ state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2;
+ break;
+@@ -1439,9 +1465,10 @@ static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_paramete
+ break;
+ case CT_DEMOD_STEP_1:
+ if (search)
+- dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, (u8 *) &i, 1);
++ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, state->i2c_read_buffer, 1);
+ else
+- dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, (u8 *) &i, 1);
++ dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, state->i2c_read_buffer, 1);
++ i = (s8)state->i2c_read_buffer[0];
+ switch (i) { /* something happened */
+ case 0:
+ break;
+@@ -2038,14 +2065,17 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
+ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
+ {
+ struct dib9000_state *state = fe->demodulator_priv;
+- u16 c[16];
++ u16 *c;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+- dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
++ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
++ state->i2c_read_buffer, 16 * 2);
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
++ c = (u16 *)state->i2c_read_buffer;
++
+ *ber = c[10] << 16 | c[11];
+ return 0;
+ }
+@@ -2054,7 +2084,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
+ {
+ struct dib9000_state *state = fe->demodulator_priv;
+ u8 index_frontend;
+- u16 c[16];
++ u16 *c = (u16 *)state->i2c_read_buffer;
+ u16 val;
+
+ *strength = 0;
+@@ -2069,7 +2099,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+- dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
++ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ val = 65535 - c[4];
+@@ -2083,14 +2113,14 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
+ static u32 dib9000_get_snr(struct dvb_frontend *fe)
+ {
+ struct dib9000_state *state = fe->demodulator_priv;
+- u16 c[16];
++ u16 *c = (u16 *)state->i2c_read_buffer;
+ u32 n, s, exp;
+ u16 val;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+- dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
++ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ val = c[7];
+@@ -2137,12 +2167,12 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
+ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
+ {
+ struct dib9000_state *state = fe->demodulator_priv;
+- u16 c[16];
++ u16 *c = (u16 *)state->i2c_read_buffer;
+
+ DibAcquireLock(&state->platform.risc.mem_mbx_lock);
+ if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
+ return -EIO;
+- dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, sizeof(c));
++ dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
+ DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+
+ *unc = c[12];
+@@ -2151,10 +2181,22 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
+
+ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
+ {
+- int k = 0;
++ int k = 0, ret = 0;
+ u8 new_addr = 0;
+ struct i2c_device client = {.i2c_adap = i2c };
+
++ client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
++ if (!client.i2c_write_buffer) {
++ dprintk("%s: not enough memory", __func__);
++ return -ENOMEM;
++ }
++ client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
++ if (!client.i2c_read_buffer) {
++ dprintk("%s: not enough memory", __func__);
++ ret = -ENOMEM;
++ goto error_memory;
++ }
++
+ client.i2c_addr = default_addr + 16;
+ dib9000_i2c_write16(&client, 1796, 0x0);
+
+@@ -2178,7 +2220,8 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
+ client.i2c_addr = default_addr;
+ if (dib9000_identify(&client) == 0) {
+ dprintk("DiB9000 #%d: not identified", k);
+- return -EIO;
++ ret = -EIO;
++ goto error;
+ }
+ }
+
+@@ -2196,7 +2239,12 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
+ dib9000_i2c_write16(&client, 1795, 0);
+ }
+
+- return 0;
++error:
++ kfree(client.i2c_read_buffer);
++error_memory:
++ kfree(client.i2c_write_buffer);
++
++ return ret;
+ }
+ EXPORT_SYMBOL(dib9000_i2c_enumeration);
+
+@@ -2261,6 +2309,8 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
+ memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config));
+ st->i2c.i2c_adap = i2c_adap;
+ st->i2c.i2c_addr = i2c_addr;
++ st->i2c.i2c_write_buffer = st->i2c_write_buffer;
++ st->i2c.i2c_read_buffer = st->i2c_read_buffer;
+
+ st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS;
+ st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
+diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
+index f6938f9..dc5d17a 100644
+--- a/drivers/media/dvb/frontends/dibx000_common.c
++++ b/drivers/media/dvb/frontends/dibx000_common.c
+@@ -10,30 +10,39 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
+
+ static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
+ {
+- u8 b[4] = {
+- (reg >> 8) & 0xff, reg & 0xff,
+- (val >> 8) & 0xff, val & 0xff,
+- };
+- struct i2c_msg msg = {
+- .addr = mst->i2c_addr,.flags = 0,.buf = b,.len = 4
+- };
+-
+- return i2c_transfer(mst->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++ mst->i2c_write_buffer[0] = (reg >> 8) & 0xff;
++ mst->i2c_write_buffer[1] = reg & 0xff;
++ mst->i2c_write_buffer[2] = (val >> 8) & 0xff;
++ mst->i2c_write_buffer[3] = val & 0xff;
++
++ memset(mst->msg, 0, sizeof(struct i2c_msg));
++ mst->msg[0].addr = mst->i2c_addr;
++ mst->msg[0].flags = 0;
++ mst->msg[0].buf = mst->i2c_write_buffer;
++ mst->msg[0].len = 4;
++
++ return i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0;
+ }
+
+ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
+ {
+- u8 wb[2] = { reg >> 8, reg & 0xff };
+- u8 rb[2];
+- struct i2c_msg msg[2] = {
+- {.addr = mst->i2c_addr, .flags = 0, .buf = wb, .len = 2},
+- {.addr = mst->i2c_addr, .flags = I2C_M_RD, .buf = rb, .len = 2},
+- };
+-
+- if (i2c_transfer(mst->i2c_adap, msg, 2) != 2)
++ mst->i2c_write_buffer[0] = reg >> 8;
++ mst->i2c_write_buffer[1] = reg & 0xff;
++
++ memset(mst->msg, 0, 2 * sizeof(struct i2c_msg));
++ mst->msg[0].addr = mst->i2c_addr;
++ mst->msg[0].flags = 0;
++ mst->msg[0].buf = mst->i2c_write_buffer;
++ mst->msg[0].len = 2;
++ mst->msg[1].addr = mst->i2c_addr;
++ mst->msg[1].flags = I2C_M_RD;
++ mst->msg[1].buf = mst->i2c_read_buffer;
++ mst->msg[1].len = 2;
++
++ if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
+ dprintk("i2c read error on %d", reg);
+
+- return (rb[0] << 8) | rb[1];
++ return (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
+ }
+
+ static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst)
+@@ -248,26 +257,32 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+ {
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+- struct i2c_msg m[2 + num];
+- u8 tx_open[4], tx_close[4];
+
+- memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
++ if (num > 32) {
++ dprintk("%s: too much I2C message to be transmitted (%i).\
++ Maximum is 32", __func__, num);
++ return -ENOMEM;
++ }
++
++ memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
+ dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
+
+- dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
+- m[0].addr = mst->i2c_addr;
+- m[0].buf = tx_open;
+- m[0].len = 4;
++ /* open the gate */
++ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
++ mst->msg[0].addr = mst->i2c_addr;
++ mst->msg[0].buf = &mst->i2c_write_buffer[0];
++ mst->msg[0].len = 4;
+
+- memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
++ memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num);
+
+- dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
+- m[num + 1].addr = mst->i2c_addr;
+- m[num + 1].buf = tx_close;
+- m[num + 1].len = 4;
++ /* close the gate */
++ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0);
++ mst->msg[num + 1].addr = mst->i2c_addr;
++ mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
++ mst->msg[num + 1].len = 4;
+
+- return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
++ return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ }
+
+ static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
+@@ -279,26 +294,32 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msg[], int num)
+ {
+ struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap);
+- struct i2c_msg m[2 + num];
+- u8 tx_open[4], tx_close[4];
+
+- memset(m, 0, sizeof(struct i2c_msg) * (2 + num));
++ if (num > 32) {
++ dprintk("%s: too much I2C message to be transmitted (%i).\
++ Maximum is 32", __func__, num);
++ return -ENOMEM;
++ }
++
++ memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
+
+ dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
+
+- dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1);
+- m[0].addr = mst->i2c_addr;
+- m[0].buf = tx_open;
+- m[0].len = 4;
++ /* open the gate */
++ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1);
++ mst->msg[0].addr = mst->i2c_addr;
++ mst->msg[0].buf = &mst->i2c_write_buffer[0];
++ mst->msg[0].len = 4;
+
+- memcpy(&m[1], msg, sizeof(struct i2c_msg) * num);
++ memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num);
+
+- dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0);
+- m[num + 1].addr = mst->i2c_addr;
+- m[num + 1].buf = tx_close;
+- m[num + 1].len = 4;
++ /* close the gate */
++ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0);
++ mst->msg[num + 1].addr = mst->i2c_addr;
++ mst->msg[num + 1].buf = &mst->i2c_write_buffer[4];
++ mst->msg[num + 1].len = 4;
+
+- return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO;
++ return i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO;
+ }
+
+ static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
+diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
+index 977d343..f031165 100644
+--- a/drivers/media/dvb/frontends/dibx000_common.h
++++ b/drivers/media/dvb/frontends/dibx000_common.h
+@@ -28,6 +28,11 @@ struct dibx000_i2c_master {
+ u8 i2c_addr;
+
+ u16 base_reg;
++
++ /* for the I2C transfer */
++ struct i2c_msg msg[34];
++ u8 i2c_write_buffer[8];
++ u8 i2c_read_buffer[2];
+ };
+
+ extern int dibx000_init_i2c_master(struct dibx000_i2c_master *mst,
+diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
+index bca307e..f637d34 100644
+--- a/drivers/media/video/cx88/cx88-blackbird.c
++++ b/drivers/media/video/cx88/cx88-blackbird.c
+@@ -1122,7 +1122,6 @@ static int mpeg_release(struct file *file)
+ mutex_lock(&dev->core->lock);
+ file->private_data = NULL;
+ kfree(fh);
+- mutex_unlock(&dev->core->lock);
+
+ /* Make sure we release the hardware */
+ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
+@@ -1131,6 +1130,8 @@ static int mpeg_release(struct file *file)
+
+ atomic_dec(&dev->core->mpeg_users);
+
++ mutex_unlock(&dev->core->lock);
++
+ return 0;
+ }
+
+@@ -1334,11 +1335,9 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
+ blackbird_register_video(dev);
+
+ /* initial device configuration: needed ? */
+- mutex_lock(&dev->core->lock);
+ // init_controls(core);
+ cx88_set_tvnorm(core,core->tvnorm);
+ cx88_video_mux(core,0);
+- mutex_unlock(&dev->core->lock);
+
+ return 0;
+
+diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
+index 7b8c9d3..c69df7e 100644
+--- a/drivers/media/video/cx88/cx88-dvb.c
++++ b/drivers/media/video/cx88/cx88-dvb.c
+@@ -133,6 +133,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
+ return -EINVAL;
+ }
+
++ mutex_lock(&dev->core->lock);
+ drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
+ if (drv) {
+ if (acquire){
+@@ -143,6 +144,7 @@ static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
+ dev->frontends.active_fe_id = 0;
+ }
+ }
++ mutex_unlock(&dev->core->lock);
+
+ return ret;
+ }
+diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
+index addf954..497f26f 100644
+--- a/drivers/media/video/cx88/cx88-mpeg.c
++++ b/drivers/media/video/cx88/cx88-mpeg.c
+@@ -624,13 +624,11 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
+
+ if (drv->advise_acquire)
+ {
+- mutex_lock(&drv->core->lock);
+ core->active_ref++;
+ if (core->active_type_id == CX88_BOARD_NONE) {
+ core->active_type_id = drv->type_id;
+ drv->advise_acquire(drv);
+ }
+- mutex_unlock(&drv->core->lock);
+
+ mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ }
+@@ -643,14 +641,12 @@ static int cx8802_request_release(struct cx8802_driver *drv)
+ {
+ struct cx88_core *core = drv->core;
+
+- mutex_lock(&drv->core->lock);
+ if (drv->advise_release && --core->active_ref == 0)
+ {
+ drv->advise_release(drv);
+ core->active_type_id = CX88_BOARD_NONE;
+ mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+ }
+- mutex_unlock(&drv->core->lock);
+
+ return 0;
+ }
+@@ -713,18 +709,17 @@ int cx8802_register_driver(struct cx8802_driver *drv)
+ drv->request_release = cx8802_request_release;
+ memcpy(driver, drv, sizeof(*driver));
+
++ mutex_lock(&drv->core->lock);
+ err = drv->probe(driver);
+ if (err == 0) {
+ i++;
+- mutex_lock(&drv->core->lock);
+ list_add_tail(&driver->drvlist, &dev->drvlist);
+- mutex_unlock(&drv->core->lock);
+ } else {
+ printk(KERN_ERR
+ "%s/2: cx8802 probe failed, err = %d\n",
+ dev->core->name, err);
+ }
+-
++ mutex_unlock(&drv->core->lock);
+ }
+
+ return i ? 0 : -ENODEV;
+@@ -748,6 +743,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
+ dev->pci->subsystem_device, dev->core->board.name,
+ dev->core->boardnr);
+
++ mutex_lock(&dev->core->lock);
++
+ list_for_each_entry_safe(d, dtmp, &dev->drvlist, drvlist) {
+ /* only unregister the correct driver type */
+ if (d->type_id != drv->type_id)
+@@ -755,15 +752,14 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
+
+ err = d->remove(d);
+ if (err == 0) {
+- mutex_lock(&drv->core->lock);
+ list_del(&d->drvlist);
+- mutex_unlock(&drv->core->lock);
+ kfree(d);
+ } else
+ printk(KERN_ERR "%s/2: cx8802 driver remove "
+ "failed (%d)\n", dev->core->name, err);
+ }
+
++ mutex_unlock(&dev->core->lock);
+ }
+
+ return err;
+@@ -827,6 +823,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
+
+ flush_request_modules(dev);
+
++ mutex_lock(&dev->core->lock);
++
+ if (!list_empty(&dev->drvlist)) {
+ struct cx8802_driver *drv, *tmp;
+ int err;
+@@ -838,9 +836,7 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
+ list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
+ err = drv->remove(drv);
+ if (err == 0) {
+- mutex_lock(&drv->core->lock);
+ list_del(&drv->drvlist);
+- mutex_unlock(&drv->core->lock);
+ } else
+ printk(KERN_ERR "%s/2: cx8802 driver remove "
+ "failed (%d)\n", dev->core->name, err);
+@@ -848,6 +844,8 @@ static void __devexit cx8802_remove(struct pci_dev *pci_dev)
+ }
+ }
+
++ mutex_unlock(&dev->core->lock);
++
+ /* Destroy any 8802 reference. */
+ dev->core->dvbdev = NULL;
+
+diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
+index 9b3742a..3d32f4a 100644
+--- a/drivers/media/video/cx88/cx88.h
++++ b/drivers/media/video/cx88/cx88.h
+@@ -505,6 +505,8 @@ struct cx8802_driver {
+ int (*suspend)(struct pci_dev *pci_dev, pm_message_t state);
+ int (*resume)(struct pci_dev *pci_dev);
+
++ /* Callers to the following functions must hold core->lock */
++
+ /* MPEG 8802 -> mini driver - Driver probe and configuration */
+ int (*probe)(struct cx8802_driver *drv);
+ int (*remove)(struct cx8802_driver *drv);
+@@ -561,8 +563,9 @@ struct cx8802_dev {
+ /* for switching modulation types */
+ unsigned char ts_gen_cntrl;
+
+- /* List of attached drivers */
++ /* List of attached drivers; must hold core->lock to access */
+ struct list_head drvlist;
++
+ struct work_struct request_module_wk;
+ };
+
+@@ -685,6 +688,8 @@ int cx88_audio_thread(void *data);
+
+ int cx8802_register_driver(struct cx8802_driver *drv);
+ int cx8802_unregister_driver(struct cx8802_driver *drv);
++
++/* Caller must hold core->lock */
+ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
+
+ /* ----------------------------------------------------------- */
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 3ab9ffa..55c5d47 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -994,22 +994,33 @@ static void usbhs_disable(struct device *dev)
+ dev_dbg(dev, "operation timed out\n");
+ }
+
+- if (pdata->ehci_data->phy_reset) {
+- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+-
+- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
++ if (is_omap_usbhs_rev2(omap)) {
++ if (is_ehci_tll_mode(pdata->port_mode[0]))
++ clk_enable(omap->usbtll_p1_fck);
++ if (is_ehci_tll_mode(pdata->port_mode[1]))
++ clk_enable(omap->usbtll_p2_fck);
++ clk_disable(omap->utmi_p2_fck);
++ clk_disable(omap->utmi_p1_fck);
+ }
+
+- clk_disable(omap->utmi_p2_fck);
+- clk_disable(omap->utmi_p1_fck);
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+
++ /* The gpio_free migh sleep; so unlock the spinlock */
++ spin_unlock_irqrestore(&omap->lock, flags);
++
++ if (pdata->ehci_data->phy_reset) {
++ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
++ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
++
++ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
++ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
++ }
++ return;
++
+ end_disble:
+ spin_unlock_irqrestore(&omap->lock, flags);
+ }
+diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
+index 5060e60..e601672 100644
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -319,7 +319,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+- ops->retlen = 0;
++ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+@@ -334,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+ devops.len = subdev->size - to;
+
+ err = subdev->write_oob(subdev, to, &devops);
+- ops->retlen += devops.retlen;
++ ops->retlen += devops.oobretlen;
+ if (err)
+ return err;
+
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index c54a4cb..d1345fc 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3112,6 +3112,8 @@ ident_done:
+ chip->chip_shift += 32 - 1;
+ }
+
++ chip->badblockbits = 8;
++
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index da9a351..2c8040f 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -263,11 +263,10 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+- omap_read_buf16(mtd, buf, len);
++ omap_read_buf16(mtd, (u_char *)p, len);
+ else
+- omap_read_buf8(mtd, buf, len);
++ omap_read_buf8(mtd, (u_char *)p, len);
+ } else {
+- p = (u32 *) buf;
+ do {
+ r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = r_count >> 2;
+@@ -293,7 +292,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
+ struct omap_nand_info, mtd);
+ uint32_t w_count = 0;
+ int i = 0, ret = 0;
+- u16 *p;
++ u16 *p = (u16 *)buf;
+ unsigned long tim, limit;
+
+ /* take care of subpage writes */
+@@ -309,11 +308,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
+ if (ret) {
+ /* PFPW engine is busy, use cpu copy method */
+ if (info->nand.options & NAND_BUSWIDTH_16)
+- omap_write_buf16(mtd, buf, len);
++ omap_write_buf16(mtd, (u_char *)p, len);
+ else
+- omap_write_buf8(mtd, buf, len);
++ omap_write_buf8(mtd, (u_char *)p, len);
+ } else {
+- p = (u16 *) buf;
+ while (len) {
+ w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = w_count >> 1;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index ba71582..a20bfef 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -163,8 +163,6 @@ static int tlb_initialize(struct bonding *bond)
+ struct tlb_client_info *new_hashtbl;
+ int i;
+
+- spin_lock_init(&(bond_info->tx_hashtbl_lock));
+-
+ new_hashtbl = kzalloc(size, GFP_KERNEL);
+ if (!new_hashtbl) {
+ pr_err("%s: Error: Failed to allocate TLB hash table\n",
+@@ -764,8 +762,6 @@ static int rlb_initialize(struct bonding *bond)
+ int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
+ int i;
+
+- spin_lock_init(&(bond_info->rx_hashtbl_lock));
+-
+ new_hashtbl = kmalloc(size, GFP_KERNEL);
+ if (!new_hashtbl) {
+ pr_err("%s: Error: Failed to allocate RLB hash table\n",
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 16d6fe9..ffb0fde 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1535,12 +1535,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ bond_dev->name, slave_dev->name);
+ }
+
+- /* bond must be initialized by bond_open() before enslaving */
+- if (!(bond_dev->flags & IFF_UP)) {
+- pr_warning("%s: master_dev is not up in bond_enslave\n",
+- bond_dev->name);
+- }
+-
+ /* already enslaved */
+ if (slave_dev->flags & IFF_SLAVE) {
+ pr_debug("Error, Device was already enslaved\n");
+@@ -4975,9 +4969,19 @@ static int bond_init(struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
++ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+
+ pr_debug("Begin bond_init for %s\n", bond_dev->name);
+
++ /*
++ * Initialize locks that may be required during
++ * en/deslave operations. All of the bond_open work
++ * (of which this is part) should really be moved to
++ * a phase prior to dev_open
++ */
++ spin_lock_init(&(bond_info->tx_hashtbl_lock));
++ spin_lock_init(&(bond_info->rx_hashtbl_lock));
++
+ bond->wq = create_singlethread_workqueue(bond_dev->name);
+ if (!bond->wq)
+ return -ENOMEM;
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index de87aea..8a2717e 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -227,12 +227,6 @@ static ssize_t bonding_store_slaves(struct device *d,
+ struct net_device *dev;
+ struct bonding *bond = to_bond(d);
+
+- /* Quick sanity check -- is the bond interface up? */
+- if (!(bond->dev->flags & IFF_UP)) {
+- pr_warning("%s: doing slave updates when interface is down.\n",
+- bond->dev->name);
+- }
+-
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 78e34e9..6d357d6 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -598,8 +598,8 @@ static int macvlan_port_create(struct net_device *dev)
+ err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
+ if (err)
+ kfree(port);
+-
+- dev->priv_flags |= IFF_MACVLAN_PORT;
++ else
++ dev->priv_flags |= IFF_MACVLAN_PORT;
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index 6eadf97..37af3f4 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -652,7 +652,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+- .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
++ .opFlags = AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+@@ -922,7 +922,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+- .xpaBiasLvl = 0,
++ .xpaBiasLvl = 0xf,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+@@ -3994,6 +3994,16 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
+ );
+
++ /* Write the power for duplicated frames - HT40 */
++
++ /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */
++ REG_WRITE(ah, 0xa3e0,
++ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
++ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
++ POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
++ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
++ );
++
+ /* Write the HT20 power per rate set */
+
+ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index 8649581..fe3c10e 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -69,15 +69,21 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
+ int16_t *nfarray)
+ {
+ struct ath_common *common = ath9k_hw_common(ah);
++ struct ieee80211_conf *conf = &common->hw->conf;
+ struct ath_nf_limits *limit;
+ struct ath9k_nfcal_hist *h;
+ bool high_nf_mid = false;
++ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ int i;
+
+ h = cal->nfCalHist;
+ limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
+
+ for (i = 0; i < NUM_NF_READINGS; i++) {
++ if (!(chainmask & (1 << i)) ||
++ ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
++ continue;
++
+ h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
+
+ if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
+@@ -225,6 +231,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+ int32_t val;
+ u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
+ struct ath_common *common = ath9k_hw_common(ah);
++ struct ieee80211_conf *conf = &common->hw->conf;
+ s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+
+ if (ah->caldata)
+@@ -234,6 +241,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+ if (chainmask & (1 << i)) {
+ s16 nfval;
+
++ if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
++ continue;
++
+ if (h)
+ nfval = h[i].privNF;
+ else
+@@ -293,6 +303,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
+ ENABLE_REGWRITE_BUFFER(ah);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+ if (chainmask & (1 << i)) {
++ if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
++ continue;
++
+ val = REG_READ(ah, ah->nf_regs[i]);
+ val &= 0xFFFFFE00;
+ val |= (((u32) (-50) << 1) & 0x1ff);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index bafbe57..1755729 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -1783,6 +1783,15 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+
+ mutex_lock(&priv->mutex);
+
++ if (!ctx->vif || !iwl_is_ready_rf(priv)) {
++ /*
++ * Huh? But wait ... this can maybe happen when
++ * we're in the middle of a firmware restart!
++ */
++ err = -EBUSY;
++ goto out;
++ }
++
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+@@ -1810,6 +1819,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ /* success */
+ iwl_teardown_interface(priv, vif, true);
+ vif->type = newtype;
++ vif->p2p = newp2p;
+ err = iwl_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
+index 68b953f..c0a4cfb 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
++++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
+@@ -1658,21 +1658,24 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+ ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
+ if (priv->valid_contexts & BIT(ctx->ctxid))
+
+-static inline int iwl_is_associated(struct iwl_priv *priv,
+- enum iwl_rxon_context_id ctxid)
++static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
+ {
+- return (priv->contexts[ctxid].active.filter_flags &
+- RXON_FILTER_ASSOC_MSK) ? 1 : 0;
++ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+ }
+
+-static inline int iwl_is_any_associated(struct iwl_priv *priv)
++static inline int iwl_is_associated(struct iwl_priv *priv,
++ enum iwl_rxon_context_id ctxid)
+ {
+- return iwl_is_associated(priv, IWL_RXON_CTX_BSS);
++ return iwl_is_associated_ctx(&priv->contexts[ctxid]);
+ }
+
+-static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
++static inline int iwl_is_any_associated(struct iwl_priv *priv)
+ {
+- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
++ struct iwl_rxon_context *ctx;
++ for_each_context(priv, ctx)
++ if (iwl_is_associated_ctx(ctx))
++ return true;
++ return false;
+ }
+
+ static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index e183587..a8f3bc7 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -82,6 +82,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
++ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
+ {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 5129ed6..4b2bbe8 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2784,6 +2784,16 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+ #endif
+
++static void __devinit fixup_ti816x_class(struct pci_dev* dev)
++{
++ /* TI 816x devices do not have class code set when in PCIe boot mode */
++ if (dev->class == PCI_CLASS_NOT_DEFINED) {
++ dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
++ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
++ }
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
++
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
+index f0b8951..a8a2b6b 100644
+--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
+@@ -1274,6 +1274,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+ iscsi_init.dummy_buffer_addr_hi =
+ (u32) ((u64) hba->dummy_buf_dma >> 32);
+
++ hba->num_ccell = hba->max_sqes >> 1;
+ hba->ctx_ccell_tasks =
+ ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+ iscsi_init.num_ccells_per_conn = hba->num_ccell;
+diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
+index 1d24a28..6adbdc3 100644
+--- a/drivers/scsi/bnx2i/bnx2i_init.c
++++ b/drivers/scsi/bnx2i/bnx2i_init.c
+@@ -244,7 +244,7 @@ void bnx2i_stop(void *handle)
+ wait_event_interruptible_timeout(hba->eh_wait,
+ (list_empty(&hba->ep_ofld_list) &&
+ list_empty(&hba->ep_destroy_list)),
+- 10 * HZ);
++ 2 * HZ);
+ /* Wait for all endpoints to be torn down, Chip will be reset once
+ * control returns to network driver. So it is required to cleanup and
+ * release all connection resources before returning from this routine.
+diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+index 1809f9c..51a970f 100644
+--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
++++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
+@@ -858,7 +858,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+ mutex_init(&hba->net_dev_lock);
+ init_waitqueue_head(&hba->eh_wait);
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+- hba->hba_shutdown_tmo = 20 * HZ;
++ hba->hba_shutdown_tmo = 30 * HZ;
+ hba->conn_teardown_tmo = 20 * HZ;
+ hba->conn_ctx_destroy_tmo = 6 * HZ;
+ } else { /* 5706/5708/5709 */
+@@ -1208,6 +1208,9 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
+ struct bnx2i_cmd *cmd = task->dd_data;
+ struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
++ if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
++ return -ENOMEM;
++
+ /*
+ * If there is no scsi_cmnd this must be a mgmt task
+ */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index d2064a0..9aab26a 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -113,6 +113,7 @@ struct sense_info {
+ };
+
+
++#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
+ #define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
+
+ /**
+@@ -121,6 +122,7 @@ struct sense_info {
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
++ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+@@ -134,6 +136,7 @@ struct fw_event_work {
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+ struct MPT2SAS_ADAPTER *ioc;
++ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+@@ -3708,17 +3711,75 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ #endif
+
+ /**
+- * _scsih_smart_predicted_fault - illuminate Fault LED
++ * _scsih_turn_on_fault_led - illuminate Fault LED
+ * @ioc: per adapter object
+ * @handle: device handle
++ * Context: process
+ *
+ * Return nothing.
+ */
+ static void
+-_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
++_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ {
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
++
++ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
++ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
++ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
++ mpi_request.SlotStatus =
++ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
++ mpi_request.DevHandle = cpu_to_le16(handle);
++ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
++ if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
++ &mpi_request)) != 0) {
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
++ __FILE__, __LINE__, __func__);
++ return;
++ }
++
++ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
++ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
++ "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name,
++ le16_to_cpu(mpi_reply.IOCStatus),
++ le32_to_cpu(mpi_reply.IOCLogInfo)));
++ return;
++ }
++}
++
++/**
++ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
++ * @ioc: per adapter object
++ * @handle: device handle
++ * Context: interrupt.
++ *
++ * Return nothing.
++ */
++static void
++_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
++{
++ struct fw_event_work *fw_event;
++
++ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
++ if (!fw_event)
++ return;
++ fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
++ fw_event->device_handle = handle;
++ fw_event->ioc = ioc;
++ _scsih_fw_event_add(ioc, fw_event);
++}
++
++/**
++ * _scsih_smart_predicted_fault - process smart errors
++ * @ioc: per adapter object
++ * @handle: device handle
++ * Context: interrupt.
++ *
++ * Return nothing.
++ */
++static void
++_scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
++{
+ struct scsi_target *starget;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+@@ -3745,30 +3806,8 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) {
+- memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+- mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+- mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+- mpi_request.SlotStatus =
+- cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+- mpi_request.DevHandle = cpu_to_le16(handle);
+- mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+- if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+- &mpi_request)) != 0) {
+- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+- ioc->name, __FILE__, __LINE__, __func__);
+- return;
+- }
+-
+- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+- "enclosure_processor: ioc_status (0x%04x), "
+- "loginfo(0x%08x)\n", ioc->name,
+- le16_to_cpu(mpi_reply.IOCStatus),
+- le32_to_cpu(mpi_reply.IOCLogInfo)));
+- return;
+- }
+- }
++ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
++ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+@@ -6330,6 +6369,9 @@ _firmware_event_work(struct work_struct *work)
+ }
+
+ switch (fw_event->event) {
++ case MPT2SAS_TURN_ON_FAULT_LED:
++ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
++ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index d3e58d7..c52a0a2 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -1877,14 +1877,15 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
+
+ scsi_remove_host(vha->host);
+
++ /* Allow timer to run to drain queued items, when removing vp */
++ qla24xx_deallocate_vp_id(vha);
++
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
+ " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ }
+
+- qla24xx_deallocate_vp_id(vha);
+-
+ /* No pending activities shall be there on the vha now */
+ DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ * the net we have placed below */
+diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
+index f5ba09c..5223c1d 100644
+--- a/drivers/scsi/qla2xxx/qla_fw.h
++++ b/drivers/scsi/qla2xxx/qla_fw.h
+@@ -416,8 +416,7 @@ struct cmd_type_6 {
+ uint8_t vp_index;
+
+ uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
+- uint16_t fcp_data_dseg_len; /* Data segment length. */
+- uint16_t reserved_1; /* MUST be set to 0. */
++ uint32_t fcp_data_dseg_len; /* Data segment length. */
+ };
+
+ #define COMMAND_TYPE_7 0x18 /* Command Type 7 entry */
+diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
+index 455fe13..eb31213 100644
+--- a/drivers/scsi/qla2xxx/qla_nx.c
++++ b/drivers/scsi/qla2xxx/qla_nx.c
+@@ -2548,11 +2548,11 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+- cmd_pkt->fcp_data_dseg_len = dsd_list_len;
++ *dsd_seg++ = cpu_to_le32(dsd_list_len);
+ } else {
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+- *cur_dsd++ = dsd_list_len;
++ *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index aa77475..4c3f5e8 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2360,21 +2360,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
+
+- spin_lock_irqsave(&ha->vport_slock, flags);
+- list_for_each_entry(vha, &ha->vp_list, list) {
+- atomic_inc(&vha->vref_count);
++ mutex_lock(&ha->vport_lock);
++ while (ha->cur_vport_count) {
++ struct Scsi_Host *scsi_host;
+
+- if (vha->fc_vport) {
+- spin_unlock_irqrestore(&ha->vport_slock, flags);
++ spin_lock_irqsave(&ha->vport_slock, flags);
+
+- fc_vport_terminate(vha->fc_vport);
++ BUG_ON(base_vha->list.next == &ha->vp_list);
++ /* This assumes first entry in ha->vp_list is always base vha */
++ vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
++ scsi_host = scsi_host_get(vha->host);
+
+- spin_lock_irqsave(&ha->vport_slock, flags);
+- }
++ spin_unlock_irqrestore(&ha->vport_slock, flags);
++ mutex_unlock(&ha->vport_lock);
++
++ fc_vport_terminate(vha->fc_vport);
++ scsi_host_put(vha->host);
+
+- atomic_dec(&vha->vref_count);
++ mutex_lock(&ha->vport_lock);
+ }
+- spin_unlock_irqrestore(&ha->vport_slock, flags);
++ mutex_unlock(&ha->vport_lock);
+
+ set_bit(UNLOADING, &base_vha->dpc_flags);
+
+@@ -3604,7 +3609,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+
+- if (IS_QLA82XX(ha)) {
++ /* Make sure qla82xx_watchdog is run only for physical port */
++ if (!vha->vp_idx && IS_QLA82XX(ha)) {
+ if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
+ start_dpc++;
+ qla82xx_watchdog(vha);
+@@ -3675,8 +3681,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
+ atomic_read(&vha->loop_down_timer)));
+ }
+
+- /* Check if beacon LED needs to be blinked */
+- if (ha->beacon_blink_led == 1) {
++ /* Check if beacon LED needs to be blinked for physical host only */
++ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
+ set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 95019c7..4778e27 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -636,7 +636,7 @@ static int sr_probe(struct device *dev)
+ disk->first_minor = minor;
+ sprintf(disk->disk_name, "sr%d", minor);
+ disk->fops = &sr_bdops;
+- disk->flags = GENHD_FL_CD;
++ disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+
+ blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
+diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
+index 9f4b58b..7e22b73 100644
+--- a/drivers/scsi/ultrastor.c
++++ b/drivers/scsi/ultrastor.c
+@@ -307,7 +307,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
+ "0: bsfw %1,%w0\n\t"
+ "btr %0,%1\n\t"
+ "jnc 0b"
+- : "=&r" (rv), "=m" (*field) :);
++ : "=&r" (rv), "+m" (*field) :);
+
+ return rv;
+ }
+diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
+index 6172335..82dd6fb 100644
+--- a/drivers/sh/clk/cpg.c
++++ b/drivers/sh/clk/cpg.c
+@@ -105,7 +105,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
+
+ /* Rebuild the frequency table */
+ clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
+- table, &clk->arch_flags);
++ table, NULL);
+
+ return 0;
+ }
+diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c b/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
+index f008659..f7bff4e 100644
+--- a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
++++ b/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
+@@ -1123,21 +1123,12 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ ini->txretry[index] = 0;
+
+ /* ampdu_ack_len: number of acked aggregated frames */
+- /* ampdu_ack_map: block ack bit map for the aggregation */
+ /* ampdu_len: number of aggregated frames */
+ rate_status(wlc, tx_info, txs, mcs);
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
+-
+- /* XXX TODO: Make these accurate. */
+ tx_info->status.ampdu_ack_len =
+- (txs->
+- status & TX_STATUS_FRM_RTX_MASK) >>
+- TX_STATUS_FRM_RTX_SHIFT;
+- tx_info->status.ampdu_len =
+- (txs->
+- status & TX_STATUS_FRM_RTX_MASK) >>
+- TX_STATUS_FRM_RTX_SHIFT;
++ tx_info->status.ampdu_len = 1;
+
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
+@@ -1163,6 +1154,8 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ /* Retry timeout */
+ ini->tx_in_transit--;
+ ieee80211_tx_info_clear_status(tx_info);
++ tx_info->status.ampdu_ack_len = 0;
++ tx_info->status.ampdu_len = 1;
+ tx_info->flags |=
+ IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ skb_pull(p, D11_PHY_HDR_LEN);
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+index 8b1451d..8486eb1 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
++++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+@@ -68,7 +68,10 @@ static u8 do_join(struct _adapter *padapter)
+ pmlmepriv->fw_state |= _FW_UNDER_LINKING;
+ pmlmepriv->pscanned = plist;
+ pmlmepriv->to_join = true;
+- if (_queue_empty(queue) == true) {
++
++ /* adhoc mode will start with an empty queue, but skip checking */
++ if (!check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) &&
++ _queue_empty(queue)) {
+ if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
+ pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
+ /* when set_ssid/set_bssid for do_join(), but scanning queue
+diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
+index 7b1fe45..37b650b 100644
+--- a/drivers/staging/usbip/usbip_common.c
++++ b/drivers/staging/usbip/usbip_common.c
+@@ -604,7 +604,7 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
+ be32_to_cpus(&pdu->status);
+ be32_to_cpus(&pdu->actual_length);
+ be32_to_cpus(&pdu->start_frame);
+- cpu_to_be32s(&pdu->number_of_packets);
++ be32_to_cpus(&pdu->number_of_packets);
+ be32_to_cpus(&pdu->error_count);
+ }
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index d25e208..fc10ed4 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -150,13 +150,13 @@ out:
+
+ {
+ struct se_device *dev = se_lun->lun_se_dev;
+- spin_lock(&dev->stats_lock);
++ spin_lock_irq(&dev->stats_lock);
+ dev->num_cmds++;
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ dev->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ dev->read_bytes += se_cmd->data_length;
+- spin_unlock(&dev->stats_lock);
++ spin_unlock_irq(&dev->stats_lock);
+ }
+
+ /*
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 4a10983..59b8b9c 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -55,7 +55,8 @@ struct se_tmr_req *core_tmr_alloc_req(
+ {
+ struct se_tmr_req *tmr;
+
+- tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
++ tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
++ GFP_ATOMIC : GFP_KERNEL);
+ if (!(tmr)) {
+ printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ return ERR_PTR(-ENOMEM);
+@@ -398,9 +399,9 @@ int core_tmr_lun_reset(
+ printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ }
+
+- spin_lock(&dev->stats_lock);
++ spin_lock_irq(&dev->stats_lock);
+ dev->num_resets++;
+- spin_unlock(&dev->stats_lock);
++ spin_unlock_irq(&dev->stats_lock);
+
+ DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ (preempt_and_abort_list) ? "Preempt" : "TMR",
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9583b23..beaf8fa 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -762,7 +762,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+ transport_all_task_dev_remove_state(cmd);
+ spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+- transport_free_dev_tasks(cmd);
+
+ check_lun:
+ spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+@@ -1195,6 +1194,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
+ break;
+
+ list_del(&task->t_execute_list);
++ atomic_set(&task->task_execute_queue, 0);
+ atomic_dec(&dev->execute_tasks);
+
+ return task;
+@@ -1210,8 +1210,14 @@ void transport_remove_task_from_execute_queue(
+ {
+ unsigned long flags;
+
++ if (atomic_read(&task->task_execute_queue) == 0) {
++ dump_stack();
++ return;
++ }
++
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ list_del(&task->t_execute_list);
++ atomic_set(&task->task_execute_queue, 0);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ }
+@@ -2058,6 +2064,13 @@ int transport_generic_handle_tmr(
+ }
+ EXPORT_SYMBOL(transport_generic_handle_tmr);
+
++void transport_generic_free_cmd_intr(
++ struct se_cmd *cmd)
++{
++ transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
++}
++EXPORT_SYMBOL(transport_generic_free_cmd_intr);
++
+ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+ {
+ struct se_task *task, *task_tmp;
+@@ -4776,18 +4789,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
+ sg_end_cur->page_link &= ~0x02;
+
+ sg_chain(sg_head, task_sg_num, sg_head_cur);
+- sg_count += (task->task_sg_num + 1);
+- } else
+ sg_count += task->task_sg_num;
++ task_sg_num = (task->task_sg_num + 1);
++ } else {
++ sg_chain(sg_head, task_sg_num, sg_head_cur);
++ sg_count += task->task_sg_num;
++ task_sg_num = task->task_sg_num;
++ }
+
+ sg_head = sg_head_cur;
+ sg_link = sg_link_cur;
+- task_sg_num = task->task_sg_num;
+ continue;
+ }
+ sg_head = sg_first = &task->task_sg[0];
+ sg_link = &task->task_sg[task->task_sg_num];
+- task_sg_num = task->task_sg_num;
+ /*
+ * Check for single task..
+ */
+@@ -4798,9 +4813,12 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
+ */
+ sg_end = &task->task_sg[task->task_sg_num - 1];
+ sg_end->page_link &= ~0x02;
+- sg_count += (task->task_sg_num + 1);
+- } else
+ sg_count += task->task_sg_num;
++ task_sg_num = (task->task_sg_num + 1);
++ } else {
++ sg_count += task->task_sg_num;
++ task_sg_num = task->task_sg_num;
++ }
+ }
+ /*
+ * Setup the starting pointer and total t_tasks_sg_linked_no including
+@@ -4809,21 +4827,20 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
+ T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+ T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+- DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+- " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
++ DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
++ " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
+ T_TASK(cmd)->t_tasks_sg_chained_no);
+
+ for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+ T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+- DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+- sg, sg_page(sg), sg->length, sg->offset);
++ DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
++ i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
+ if (sg_is_chain(sg))
+ DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ if (sg_is_last(sg))
+ DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ }
+-
+ }
+ EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+@@ -5297,6 +5314,8 @@ void transport_generic_free_cmd(
+ if (wait_for_tasks && cmd->transport_wait_for_tasks)
+ cmd->transport_wait_for_tasks(cmd, 0, 0);
+
++ transport_free_dev_tasks(cmd);
++
+ transport_generic_remove(cmd, release_to_pool,
+ session_reinstatement);
+ }
+@@ -6132,6 +6151,9 @@ get_cmd:
+ case TRANSPORT_REMOVE:
+ transport_generic_remove(cmd, 1, 0);
+ break;
++ case TRANSPORT_FREE_CMD_INTR:
++ transport_generic_free_cmd(cmd, 0, 1, 0);
++ break;
+ case TRANSPORT_PROCESS_TMR:
+ transport_generic_do_tmr(cmd);
+ break;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e057e53..caa2535 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -946,7 +946,7 @@ static int acm_probe(struct usb_interface *intf,
+ u8 ac_management_function = 0;
+ u8 call_management_function = 0;
+ int call_interface_num = -1;
+- int data_interface_num;
++ int data_interface_num = -1;
+ unsigned long quirks;
+ int num_rx_buf;
+ int i;
+@@ -1030,7 +1030,11 @@ next_desc:
+ if (!union_header) {
+ if (call_interface_num > 0) {
+ dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
+- data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
++ /* quirks for Droids MuIn LCD */
++ if (quirks & NO_DATA_INTERFACE)
++ data_interface = usb_ifnum_to_if(usb_dev, 0);
++ else
++ data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
+ control_interface = intf;
+ } else {
+ if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
+@@ -1622,6 +1626,11 @@ static const struct usb_device_id acm_ids[] = {
+ .driver_info = NOT_A_MODEM,
+ },
+
++ /* Support for Droids MuIn LCD */
++ { USB_DEVICE(0x04d8, 0x000b),
++ .driver_info = NO_DATA_INTERFACE,
++ },
++
+ /* control interfaces without any protocol set */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ USB_CDC_PROTO_NONE) },
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index b4ea54d..683104a 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -137,3 +137,4 @@ struct acm {
+ #define SINGLE_RX_URB 2
+ #define NO_CAP_LINE 4
+ #define NOT_A_MODEM 8
++#define NO_DATA_INTERFACE 16
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 77a7fae..cddc533 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -986,7 +986,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ spin_unlock_irq (&hcd_root_hub_lock);
+
+ /* Did the HC die before the root hub was registered? */
+- if (HCD_DEAD(hcd) || hcd->state == HC_STATE_HALT)
++ if (HCD_DEAD(hcd))
+ usb_hc_died (hcd); /* This time clean up */
+ }
+
+@@ -2128,9 +2128,6 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (hcd->shared_hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
+-
+- if (unlikely(hcd->state == HC_STATE_HALT))
+- usb_hc_died(hcd);
+ rc = IRQ_HANDLED;
+ }
+
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 9b7cdb1..41dc093 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1767,7 +1767,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
+ }
+
+ /* newer chips have more FIFO memory than rm9200 */
+- if (cpu_is_at91sam9260()) {
++ if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
+ udc->ep[0].maxpacket = 64;
+ udc->ep[3].maxpacket = 64;
+ udc->ep[4].maxpacket = 512;
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index 882484a..fa12ec8 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -420,8 +420,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+ */
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+- if (w_length > req->length || w_value
+- || w_index != rndis->ctrl_id)
++ if (w_value || w_index != rndis->ctrl_id)
+ goto invalid;
+ /* read the request; process it later */
+ value = w_length;
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 78561d1..c606b02 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -777,8 +777,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ goto dead;
+ }
+
++ /* Shared IRQ? */
+ masked_status = status & INTR_MASK;
+- if (!masked_status) { /* irq sharing? */
++ if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) {
+ spin_unlock(&ehci->lock);
+ return IRQ_NONE;
+ }
+@@ -873,6 +874,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ dead:
+ ehci_reset(ehci);
+ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
++ usb_hc_died(hcd);
+ /* generic layer kills/unlinks all urbs, then
+ * uses ehci_stop to clean up the rest
+ */
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 1543c83..d12426f 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -471,8 +471,10 @@ static int enable_periodic (struct ehci_hcd *ehci)
+ */
+ status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
+ STS_PSS, 0, 9 * 125);
+- if (status)
++ if (status) {
++ usb_hc_died(ehci_to_hcd(ehci));
+ return status;
++ }
+
+ cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
+ ehci_writel(ehci, cmd, &ehci->regs->command);
+@@ -510,8 +512,10 @@ static int disable_periodic (struct ehci_hcd *ehci)
+ */
+ status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
+ STS_PSS, STS_PSS, 9 * 125);
+- if (status)
++ if (status) {
++ usb_hc_died(ehci_to_hcd(ehci));
+ return status;
++ }
+
+ cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
+ ehci_writel(ehci, cmd, &ehci->regs->command);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index c0e22f2..baae4cc 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -612,6 +612,7 @@ static irqreturn_t isp116x_irq(struct usb_hcd *hcd)
+ /* IRQ's are off, we do no DMA,
+ perfectly ready to die ... */
+ hcd->state = HC_STATE_HALT;
++ usb_hc_died(hcd);
+ ret = IRQ_HANDLED;
+ goto done;
+ }
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index d557235..c001fff 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -764,6 +764,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ if (ints == ~(u32)0) {
+ disable (ohci);
+ ohci_dbg (ohci, "device removed!\n");
++ usb_hc_died(hcd);
+ return IRQ_HANDLED;
+ }
+
+@@ -771,7 +772,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ ints &= ohci_readl(ohci, &regs->intrenable);
+
+ /* interrupt for some other device? */
+- if (ints == 0)
++ if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT))
+ return IRQ_NOTMINE;
+
+ if (ints & OHCI_INTR_UE) {
+@@ -788,6 +789,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
+ } else {
+ disable (ohci);
+ ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
++ usb_hc_died(hcd);
+ }
+
+ ohci_dump (ohci, 1);
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index d84d6f0..ad8166c 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -181,10 +181,18 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+ */
+ static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
+ {
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
+- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
++ /* Evidently nVidia fixed their later hardware; this is a guess at
++ * the changeover point.
++ */
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
++
++ if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
++ ohci->flags |= OHCI_QUIRK_SHUTDOWN;
++ ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
++ }
+
+ return 0;
+ }
+diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
+index 4a771f6..5fbe997 100644
+--- a/drivers/usb/host/oxu210hp-hcd.c
++++ b/drivers/usb/host/oxu210hp-hcd.c
+@@ -1884,6 +1884,7 @@ static int enable_periodic(struct oxu_hcd *oxu)
+ status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
+ if (status != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
++ usb_hc_died(oxu_to_hcd(oxu));
+ return status;
+ }
+
+@@ -1909,6 +1910,7 @@ static int disable_periodic(struct oxu_hcd *oxu)
+ status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
+ if (status != 0) {
+ oxu_to_hcd(oxu)->state = HC_STATE_HALT;
++ usb_hc_died(oxu_to_hcd(oxu));
+ return status;
+ }
+
+@@ -2449,8 +2451,9 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ goto dead;
+ }
+
++ /* Shared IRQ? */
+ status &= INTR_MASK;
+- if (!status) { /* irq sharing? */
++ if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
+ spin_unlock(&oxu->lock);
+ return IRQ_NONE;
+ }
+@@ -2516,6 +2519,7 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ dead:
+ ehci_reset(oxu);
+ writel(0, &oxu->regs->configured_flag);
++ usb_hc_died(hcd);
+ /* generic layer kills/unlinks all urbs, then
+ * uses oxu_stop to clean up the rest
+ */
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 627f343..783e5e0 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -207,14 +207,13 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+- virt_dev->num_rings_cached++;
+- rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
++ virt_dev->num_rings_cached++;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+- rings_cached,
+- (rings_cached > 1) ? "s" : "");
++ virt_dev->num_rings_cached,
++ (virt_dev->num_rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+@@ -1046,12 +1045,12 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ break;
+
+ case USB_SPEED_FULL:
+- if (usb_endpoint_xfer_int(&ep->desc)) {
++ if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = xhci_parse_exponent_interval(udev, ep);
+ break;
+ }
+ /*
+- * Fall through for isochronous endpoint interval decoding
++ * Fall through for interrupt endpoint interval decoding
+ * since it uses the same rules as low speed interrupt
+ * endpoints.
+ */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7437386..078b566 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1632,6 +1632,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ else
+ *status = 0;
+ break;
++ case COMP_STOP_INVAL:
++ case COMP_STOP:
++ return finish_td(xhci, td, event_trb, event, ep, status, false);
+ default:
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
+@@ -1676,15 +1679,12 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ }
+ } else {
+ /* Maybe the event was for the data stage? */
+- if (trb_comp_code != COMP_STOP_INVAL) {
+- /* We didn't stop on a link TRB in the middle */
+- td->urb->actual_length =
+- td->urb->transfer_buffer_length -
+- TRB_LEN(event->transfer_len);
+- xhci_dbg(xhci, "Waiting for status "
+- "stage event\n");
+- return 0;
+- }
++ td->urb->actual_length =
++ td->urb->transfer_buffer_length -
++ TRB_LEN(le32_to_cpu(event->transfer_len));
++ xhci_dbg(xhci, "Waiting for status "
++ "stage event\n");
++ return 0;
+ }
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 81b976e..d2cd3ce 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1692,8 +1692,17 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx,
+ LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
+
++ /* Free any rings that were dropped, but not changed. */
++ for (i = 1; i < 31; ++i) {
++ if ((ctrl_ctx->drop_flags & (1 << (i + 1))) &&
++ !(ctrl_ctx->add_flags & (1 << (i + 1))))
++ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
++ }
+ xhci_zero_in_ctx(xhci, virt_dev);
+- /* Install new rings and free or cache any old rings */
++ /*
++ * Install any rings for completely new endpoints or changed endpoints,
++ * and free or cache any old rings from changed endpoints.
++ */
+ for (i = 1; i < 31; ++i) {
+ if (!virt_dev->eps[i].new_ring)
+ continue;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 0f11afd..ebeccb7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -112,6 +112,10 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
++ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
++ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
++ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 4de6ef0..e8dbde5 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -566,6 +566,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
+ /*
+ * ELV devices:
+ */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index efffc23..1d946cd 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -491,6 +491,11 @@
+ /* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
+ #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
+
++/*
++ * TavIR AVR product ids (FTDI_VID)
++ */
++#define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
++
+
+
+ /********************************/
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 26710b1..456447e033 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1,7 +1,7 @@
+ /*
+ * Garmin GPS driver
+ *
+- * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
++ * Copyright (C) 2006-2011 Hermann Kneissel herkne@gmx.de
+ *
+ * The latest version of the driver can be found at
+ * http://sourceforge.net/projects/garmin-gps/
+@@ -51,7 +51,7 @@ static int debug;
+ */
+
+ #define VERSION_MAJOR 0
+-#define VERSION_MINOR 33
++#define VERSION_MINOR 36
+
+ #define _STR(s) #s
+ #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
+@@ -410,6 +410,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
+ */
+ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
+ {
++ unsigned long flags;
+ const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
+ __le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
+
+@@ -458,7 +459,9 @@ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
+ /* if this was an abort-transfer command, flush all
+ queued data. */
+ if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
++ spin_lock_irqsave(&garmin_data_p->lock, flags);
+ garmin_data_p->flags |= FLAGS_DROP_DATA;
++ spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ pkt_clear(garmin_data_p);
+ }
+
+@@ -943,7 +946,7 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
+ spin_lock_irqsave(&garmin_data_p->lock, flags);
+ garmin_data_p->mode = initial_mode;
+ garmin_data_p->count = 0;
+- garmin_data_p->flags = 0;
++ garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
+ spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
+ /* shutdown any bulk reads that might be going on */
+@@ -1178,7 +1181,8 @@ static int garmin_write_room(struct tty_struct *tty)
+
+
+ static void garmin_read_process(struct garmin_data *garmin_data_p,
+- unsigned char *data, unsigned data_length)
++ unsigned char *data, unsigned data_length,
++ int bulk_data)
+ {
+ unsigned long flags;
+
+@@ -1193,7 +1197,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
+ send it directly to the tty port */
+ if (garmin_data_p->flags & FLAGS_QUEUING) {
+ pkt_add(garmin_data_p, data, data_length);
+- } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
++ } else if (bulk_data ||
++ getLayerId(data) == GARMIN_LAYERID_APPL) {
+
+ spin_lock_irqsave(&garmin_data_p->lock, flags);
+ garmin_data_p->flags |= APP_RESP_SEEN;
+@@ -1237,7 +1242,7 @@ static void garmin_read_bulk_callback(struct urb *urb)
+ usb_serial_debug_data(debug, &port->dev,
+ __func__, urb->actual_length, data);
+
+- garmin_read_process(garmin_data_p, data, urb->actual_length);
++ garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
+
+ if (urb->actual_length == 0 &&
+ 0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
+@@ -1346,7 +1351,7 @@ static void garmin_read_int_callback(struct urb *urb)
+ __func__, garmin_data_p->serial_num);
+ }
+
+- garmin_read_process(garmin_data_p, data, urb->actual_length);
++ garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
+
+ port->interrupt_in_urb->dev = port->serial->dev;
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -1461,6 +1466,7 @@ static int garmin_attach(struct usb_serial *serial)
+ garmin_data_p->timer.function = timeout_handler;
+ garmin_data_p->port = port;
+ garmin_data_p->state = 0;
++ garmin_data_p->flags = 0;
+ garmin_data_p->count = 0;
+ usb_set_serial_port_data(port, garmin_data_p);
+
+diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
+index 653465f..e2bfecc 100644
+--- a/drivers/usb/serial/moto_modem.c
++++ b/drivers/usb/serial/moto_modem.c
+@@ -25,6 +25,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
+ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
+ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
++ { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */
+ { USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
+ { },
+ };
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d77ff04..318dd00 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -149,6 +149,7 @@ static void option_instat_callback(struct urb *urb);
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_E14AC 0x14AC
+ #define HUAWEI_PRODUCT_ETS1220 0x1803
++#define HUAWEI_PRODUCT_E353 0x1506
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -532,6 +533,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+@@ -972,7 +974,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+- { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
++ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+@@ -1109,6 +1111,12 @@ static int option_probe(struct usb_serial *serial,
+ serial->interface->cur_altsetting->desc.bInterfaceNumber == 1)
+ return -ENODEV;
+
++ /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
++ if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID &&
++ serial->dev->descriptor.idProduct == SAMSUNG_PRODUCT_GT_B3730 &&
++ serial->interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA)
++ return -ENODEV;
++
+ data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+
+ if (!data)
+diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
+index 3236e03..e41f50c 100644
+--- a/drivers/usb/storage/unusual_realtek.h
++++ b/drivers/usb/storage/unusual_realtek.h
+@@ -23,19 +23,19 @@
+ #if defined(CONFIG_USB_STORAGE_REALTEK) || \
+ defined(CONFIG_USB_STORAGE_REALTEK_MODULE)
+
+-UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
++UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+- USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+ UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+- USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+-UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
++UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+- USB_SC_SCSI, USB_PR_BULK, init_realtek_cr, 0),
++ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+ #endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 257b00e..1f2b199 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1120,6 +1120,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ goto restart;
+ }
+ }
++
++ if (!ret && !bdev->bd_openers) {
++ bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
++ bdi = blk_get_backing_dev_info(bdev);
++ if (bdi == NULL)
++ bdi = &default_backing_dev_info;
++ bdev_inode_switch_bdi(bdev->bd_inode, bdi);
++ }
++
+ /*
+ * If the device is invalidated, rescan partition
+ * if open succeeded or failed with -ENOMEDIUM.
+@@ -1130,14 +1139,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ rescan_partitions(disk, bdev);
+ if (ret)
+ goto out_clear;
+-
+- if (!bdev->bd_openers) {
+- bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+- bdi = blk_get_backing_dev_info(bdev);
+- if (bdi == NULL)
+- bdi = &default_backing_dev_info;
+- bdev_inode_switch_bdi(bdev->bd_inode, bdi);
+- }
+ } else {
+ struct block_device *whole;
+ whole = bdget_disk(disk, 0);
+@@ -1237,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
+ res = __blkdev_get(bdev, mode, 0);
+
+ if (whole) {
++ struct gendisk *disk = whole->bd_disk;
++
+ /* finish claiming */
+ mutex_lock(&bdev->bd_mutex);
+ spin_lock(&bdev_lock);
+@@ -1263,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
+ spin_unlock(&bdev_lock);
+
+ /*
+- * Block event polling for write claims. Any write
+- * holder makes the write_holder state stick until all
+- * are released. This is good enough and tracking
+- * individual writeable reference is too fragile given
+- * the way @mode is used in blkdev_get/put().
++ * Block event polling for write claims if requested. Any
++ * write holder makes the write_holder state stick until
++ * all are released. This is good enough and tracking
++ * individual writeable reference is too fragile given the
++ * way @mode is used in blkdev_get/put().
+ */
+- if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
++ if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
++ !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
+ bdev->bd_write_holder = true;
+- disk_block_events(bdev->bd_disk);
++ disk_block_events(disk);
+ }
+
+ mutex_unlock(&bdev->bd_mutex);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 277262a..29fac128 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2447,7 +2447,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+
+ if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
+ __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+-
++ cFYI(1, "unix caps which server supports %lld", cap);
+ /* check for reconnect case in which we do not
+ want to change the mount behavior if we can avoid it */
+ if (vol_info == NULL) {
+@@ -2465,6 +2465,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+ }
+ }
+
++ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
++ cERROR(1, "per-share encryption not supported yet");
++
+ cap &= CIFS_UNIX_CAP_MASK;
+ if (vol_info && vol_info->no_psx_acl)
+ cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
+@@ -2513,6 +2516,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+ cFYI(1, "very large read cap");
+ if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
+ cFYI(1, "very large write cap");
++ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
++ cFYI(1, "transport encryption cap");
++ if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
++ cFYI(1, "mandatory transport encryption cap");
+ #endif /* CIFS_DEBUG2 */
+ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
+ if (vol_info == NULL) {
+@@ -2831,20 +2838,26 @@ try_mount_again:
+ goto remote_path_check;
+ }
+
+- /* do not care if following two calls succeed - informational */
+- if (!tcon->ipc) {
+- CIFSSMBQFSDeviceInfo(xid, tcon);
+- CIFSSMBQFSAttributeInfo(xid, tcon);
+- }
+-
+ /* tell server which Unix caps we support */
+- if (tcon->ses->capabilities & CAP_UNIX)
++ if (tcon->ses->capabilities & CAP_UNIX) {
+ /* reset of caps checks mount to see if unix extensions
+ disabled for just this mount */
+ reset_cifs_unix_caps(xid, tcon, sb, volume_info);
+- else
++ if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
++ (le64_to_cpu(tcon->fsUnixInfo.Capability) &
++ CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
++ rc = -EACCES;
++ goto mount_fail_check;
++ }
++ } else
+ tcon->unix_ext = 0; /* server does not support them */
+
++ /* do not care if following two calls succeed - informational */
++ if (!tcon->ipc) {
++ CIFSSMBQFSDeviceInfo(xid, tcon);
++ CIFSSMBQFSAttributeInfo(xid, tcon);
++ }
++
+ /* convert forward to back slashes in prepath here if needed */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
+ convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 4d4cc6a..94ab3c0 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -527,6 +527,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
+ dget(lower_dentry);
+ rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
+ dput(lower_dentry);
++ if (!rc && dentry->d_inode)
++ clear_nlink(dentry->d_inode);
+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
+ dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
+ unlock_dir(lower_dir_dentry);
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 03e609c..27a7fef 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
+ struct mutex *tfm_mutex;
+ char *block_aligned_filename;
+ struct ecryptfs_auth_tok *auth_tok;
+- struct scatterlist src_sg;
+- struct scatterlist dst_sg;
++ struct scatterlist src_sg[2];
++ struct scatterlist dst_sg[2];
+ struct blkcipher_desc desc;
+ char iv[ECRYPTFS_MAX_IV_BYTES];
+ char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
+@@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
+ memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
+ filename_size);
+ rc = virt_to_scatterlist(s->block_aligned_filename,
+- s->block_aligned_filename_size, &s->src_sg, 1);
+- if (rc != 1) {
++ s->block_aligned_filename_size, s->src_sg, 2);
++ if (rc < 1) {
+ printk(KERN_ERR "%s: Internal error whilst attempting to "
+- "convert filename memory to scatterlist; "
+- "expected rc = 1; got rc = [%d]. "
++ "convert filename memory to scatterlist; rc = [%d]. "
+ "block_aligned_filename_size = [%zd]\n", __func__, rc,
+ s->block_aligned_filename_size);
+ goto out_release_free_unlock;
+ }
+ rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
+- &s->dst_sg, 1);
+- if (rc != 1) {
++ s->dst_sg, 2);
++ if (rc < 1) {
+ printk(KERN_ERR "%s: Internal error whilst attempting to "
+ "convert encrypted filename memory to scatterlist; "
+- "expected rc = 1; got rc = [%d]. "
+- "block_aligned_filename_size = [%zd]\n", __func__, rc,
+- s->block_aligned_filename_size);
++ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++ __func__, rc, s->block_aligned_filename_size);
+ goto out_release_free_unlock;
+ }
+ /* The characters in the first block effectively do the job
+@@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
+ mount_crypt_stat->global_default_fn_cipher_key_bytes);
+ goto out_release_free_unlock;
+ }
+- rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
++ rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ s->block_aligned_filename_size);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to encrypt filename; "
+@@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
+ struct mutex *tfm_mutex;
+ char *decrypted_filename;
+ struct ecryptfs_auth_tok *auth_tok;
+- struct scatterlist src_sg;
+- struct scatterlist dst_sg;
++ struct scatterlist src_sg[2];
++ struct scatterlist dst_sg[2];
+ struct blkcipher_desc desc;
+ char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
+ char iv[ECRYPTFS_MAX_IV_BYTES];
+@@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ }
+ mutex_lock(s->tfm_mutex);
+ rc = virt_to_scatterlist(&data[(*packet_size)],
+- s->block_aligned_filename_size, &s->src_sg, 1);
+- if (rc != 1) {
++ s->block_aligned_filename_size, s->src_sg, 2);
++ if (rc < 1) {
+ printk(KERN_ERR "%s: Internal error whilst attempting to "
+ "convert encrypted filename memory to scatterlist; "
+- "expected rc = 1; got rc = [%d]. "
+- "block_aligned_filename_size = [%zd]\n", __func__, rc,
+- s->block_aligned_filename_size);
++ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++ __func__, rc, s->block_aligned_filename_size);
+ goto out_unlock;
+ }
+ (*packet_size) += s->block_aligned_filename_size;
+@@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ goto out_unlock;
+ }
+ rc = virt_to_scatterlist(s->decrypted_filename,
+- s->block_aligned_filename_size, &s->dst_sg, 1);
+- if (rc != 1) {
++ s->block_aligned_filename_size, s->dst_sg, 2);
++ if (rc < 1) {
+ printk(KERN_ERR "%s: Internal error whilst attempting to "
+ "convert decrypted filename memory to scatterlist; "
+- "expected rc = 1; got rc = [%d]. "
+- "block_aligned_filename_size = [%zd]\n", __func__, rc,
+- s->block_aligned_filename_size);
++ "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++ __func__, rc, s->block_aligned_filename_size);
+ goto out_free_unlock;
+ }
+ /* The characters in the first block effectively do the job of
+@@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ mount_crypt_stat->global_default_fn_cipher_key_bytes);
+ goto out_free_unlock;
+ }
+- rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
++ rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ s->block_aligned_filename_size);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to decrypt filename; "
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index 32f3b86..93f9fd0 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -1416,10 +1416,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ frame->at = entries;
+ frame->bh = bh;
+ bh = bh2;
++ /*
++ * Mark buffers dirty here so that if do_split() fails we write a
++ * consistent set of buffers to disk.
++ */
++ ext3_journal_dirty_metadata(handle, frame->bh);
++ ext3_journal_dirty_metadata(handle, bh);
+ de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+- dx_release (frames);
+- if (!(de))
++ if (!de) {
++ ext3_mark_inode_dirty(handle, dir);
++ dx_release(frames);
+ return retval;
++ }
++ dx_release(frames);
+
+ return add_dirent_to_buf(handle, dentry, inode, de, bh);
+ }
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 4daaf2b..1e37c09 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1590,12 +1590,8 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
+ */
+ struct ext4_lazy_init {
+ unsigned long li_state;
+-
+- wait_queue_head_t li_wait_daemon;
+ wait_queue_head_t li_wait_task;
+- struct timer_list li_timer;
+ struct task_struct *li_task;
+-
+ struct list_head li_request_list;
+ struct mutex li_list_mtx;
+ };
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index d8a16ee..15bfa44 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1273,6 +1273,8 @@ repeat_load_buddy:
+ return 0;
+
+ err:
++ if (page)
++ page_cache_release(page);
+ if (e4b->bd_bitmap_page)
+ page_cache_release(e4b->bd_bitmap_page);
+ if (e4b->bd_buddy_page)
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index b6dbd05..7bb8f76 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error)
+ for (i = 0; i < io_end->num_io_pages; i++) {
+ struct page *page = io_end->pages[i]->p_page;
+ struct buffer_head *bh, *head;
+- int partial_write = 0;
++ loff_t offset;
++ loff_t io_end_offset;
+
+- head = page_buffers(page);
+- if (error)
++ if (error) {
+ SetPageError(page);
+- BUG_ON(!head);
+- if (head->b_size != PAGE_CACHE_SIZE) {
+- loff_t offset;
+- loff_t io_end_offset = io_end->offset + io_end->size;
++ set_bit(AS_EIO, &page->mapping->flags);
++ head = page_buffers(page);
++ BUG_ON(!head);
++
++ io_end_offset = io_end->offset + io_end->size;
+
+ offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
+ bh = head;
+ do {
+ if ((offset >= io_end->offset) &&
+- (offset+bh->b_size <= io_end_offset)) {
+- if (error)
+- buffer_io_error(bh);
+-
+- }
+- if (buffer_delay(bh))
+- partial_write = 1;
+- else if (!buffer_mapped(bh))
+- clear_buffer_dirty(bh);
+- else if (buffer_dirty(bh))
+- partial_write = 1;
++ (offset+bh->b_size <= io_end_offset))
++ buffer_io_error(bh);
++
+ offset += bh->b_size;
+ bh = bh->b_this_page;
+ } while (bh != head);
+ }
+
+- /*
+- * If this is a partial write which happened to make
+- * all buffers uptodate then we can optimize away a
+- * bogus readpage() for the next read(). Here we
+- * 'discover' whether the page went uptodate as a
+- * result of this (potentially partial) write.
+- */
+- if (!partial_write)
+- SetPageUptodate(page);
+-
+ put_io_page(io_end->pages[i]);
+ }
+ io_end->num_io_pages = 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 8553dfb..e28c0f2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2659,12 +2659,6 @@ static void print_daily_error_info(unsigned long arg)
+ mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
+ }
+
+-static void ext4_lazyinode_timeout(unsigned long data)
+-{
+- struct task_struct *p = (struct task_struct *)data;
+- wake_up_process(p);
+-}
+-
+ /* Find next suitable group and run ext4_init_inode_table */
+ static int ext4_run_li_request(struct ext4_li_request *elr)
+ {
+@@ -2712,7 +2706,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
+
+ /*
+ * Remove lr_request from the list_request and free the
+- * request tructure. Should be called with li_list_mtx held
++ * request structure. Should be called with li_list_mtx held
+ */
+ static void ext4_remove_li_request(struct ext4_li_request *elr)
+ {
+@@ -2730,14 +2724,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr)
+
+ static void ext4_unregister_li_request(struct super_block *sb)
+ {
+- struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request;
+-
+- if (!ext4_li_info)
++ mutex_lock(&ext4_li_mtx);
++ if (!ext4_li_info) {
++ mutex_unlock(&ext4_li_mtx);
+ return;
++ }
+
+ mutex_lock(&ext4_li_info->li_list_mtx);
+- ext4_remove_li_request(elr);
++ ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
+ mutex_unlock(&ext4_li_info->li_list_mtx);
++ mutex_unlock(&ext4_li_mtx);
+ }
+
+ static struct task_struct *ext4_lazyinit_task;
+@@ -2756,14 +2752,10 @@ static int ext4_lazyinit_thread(void *arg)
+ struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
+ struct list_head *pos, *n;
+ struct ext4_li_request *elr;
+- unsigned long next_wakeup;
+- DEFINE_WAIT(wait);
++ unsigned long next_wakeup, cur;
+
+ BUG_ON(NULL == eli);
+
+- eli->li_timer.data = (unsigned long)current;
+- eli->li_timer.function = ext4_lazyinode_timeout;
+-
+ eli->li_task = current;
+ wake_up(&eli->li_wait_task);
+
+@@ -2797,19 +2789,15 @@ cont_thread:
+ if (freezing(current))
+ refrigerator();
+
+- if ((time_after_eq(jiffies, next_wakeup)) ||
++ cur = jiffies;
++ if ((time_after_eq(cur, next_wakeup)) ||
+ (MAX_JIFFY_OFFSET == next_wakeup)) {
+ cond_resched();
+ continue;
+ }
+
+- eli->li_timer.expires = next_wakeup;
+- add_timer(&eli->li_timer);
+- prepare_to_wait(&eli->li_wait_daemon, &wait,
+- TASK_INTERRUPTIBLE);
+- if (time_before(jiffies, next_wakeup))
+- schedule();
+- finish_wait(&eli->li_wait_daemon, &wait);
++ schedule_timeout_interruptible(next_wakeup - cur);
++
+ if (kthread_should_stop()) {
+ ext4_clear_request_list();
+ goto exit_thread;
+@@ -2833,12 +2821,10 @@ exit_thread:
+ goto cont_thread;
+ }
+ mutex_unlock(&eli->li_list_mtx);
+- del_timer_sync(&ext4_li_info->li_timer);
+ eli->li_task = NULL;
+ wake_up(&eli->li_wait_task);
+
+ kfree(ext4_li_info);
+- ext4_lazyinit_task = NULL;
+ ext4_li_info = NULL;
+ mutex_unlock(&ext4_li_mtx);
+
+@@ -2866,7 +2852,6 @@ static int ext4_run_lazyinit_thread(void)
+ if (IS_ERR(ext4_lazyinit_task)) {
+ int err = PTR_ERR(ext4_lazyinit_task);
+ ext4_clear_request_list();
+- del_timer_sync(&ext4_li_info->li_timer);
+ kfree(ext4_li_info);
+ ext4_li_info = NULL;
+ printk(KERN_CRIT "EXT4: error %d creating inode table "
+@@ -2915,9 +2900,7 @@ static int ext4_li_info_new(void)
+ INIT_LIST_HEAD(&eli->li_request_list);
+ mutex_init(&eli->li_list_mtx);
+
+- init_waitqueue_head(&eli->li_wait_daemon);
+ init_waitqueue_head(&eli->li_wait_task);
+- init_timer(&eli->li_timer);
+ eli->li_state |= EXT4_LAZYINIT_QUIT;
+
+ ext4_li_info = eli;
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 69b1804..f486ff6 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -722,8 +722,13 @@ wait_for_iobuf:
+ required. */
+ JBUFFER_TRACE(jh, "file as BJ_Forget");
+ journal_file_buffer(jh, commit_transaction, BJ_Forget);
+- /* Wake up any transactions which were waiting for this
+- IO to complete */
++ /*
++ * Wake up any transactions which were waiting for this
++ * IO to complete. The barrier must be here so that changes
++ * by journal_file_buffer() take effect before wake_up_bit()
++ * does the waitqueue check.
++ */
++ smp_mb();
+ wake_up_bit(&bh->b_state, BH_Unshadow);
+ JBUFFER_TRACE(jh, "brelse shadowed buffer");
+ __brelse(bh);
+diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
+index b3713af..e2d4285 100644
+--- a/fs/jbd/journal.c
++++ b/fs/jbd/journal.c
+@@ -437,9 +437,12 @@ int __log_space_left(journal_t *journal)
+ int __log_start_commit(journal_t *journal, tid_t target)
+ {
+ /*
+- * Are we already doing a recent enough commit?
++ * The only transaction we can possibly wait upon is the
++ * currently running transaction (if it exists). Otherwise,
++ * the target tid must be an old one.
+ */
+- if (!tid_geq(journal->j_commit_request, target)) {
++ if (journal->j_running_transaction &&
++ journal->j_running_transaction->t_tid == target) {
+ /*
+ * We want a new commit: OK, mark the request and wakeup the
+ * commit thread. We do _not_ do the commit ourselves.
+@@ -451,7 +454,14 @@ int __log_start_commit(journal_t *journal, tid_t target)
+ journal->j_commit_sequence);
+ wake_up(&journal->j_wait_commit);
+ return 1;
+- }
++ } else if (!tid_geq(journal->j_commit_request, target))
++ /* This should never happen, but if it does, preserve
++ the evidence before kjournald goes into a loop and
++ increments j_commit_sequence beyond all recognition. */
++ WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
++ journal->j_commit_request, journal->j_commit_sequence,
++ target, journal->j_running_transaction ?
++ journal->j_running_transaction->t_tid : 0);
+ return 0;
+ }
+
+diff --git a/fs/namei.c b/fs/namei.c
+index e3c4f11..6ff858c 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1378,12 +1378,12 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+ {
+ int res;
+
+- BUG_ON(nd->depth >= MAX_NESTED_LINKS);
+ if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
+ path_put_conditional(path, nd);
+ path_put(&nd->path);
+ return -ELOOP;
+ }
++ BUG_ON(nd->depth >= MAX_NESTED_LINKS);
+
+ nd->depth++;
+ current->link_count++;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index cf1b339..d0e15db 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -267,9 +267,11 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+ goto wait_on_recovery;
++ case -NFS4ERR_EXPIRED:
++ if (state != NULL)
++ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_STALE_CLIENTID:
+- case -NFS4ERR_EXPIRED:
+ nfs4_schedule_lease_recovery(clp);
+ goto wait_on_recovery;
+ #if defined(CONFIG_NFS_V4_1)
+@@ -3670,9 +3672,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+ goto wait_on_recovery;
++ case -NFS4ERR_EXPIRED:
++ if (state != NULL)
++ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_STALE_CLIENTID:
+- case -NFS4ERR_EXPIRED:
+ nfs4_schedule_lease_recovery(clp);
+ goto wait_on_recovery;
+ #if defined(CONFIG_NFS_V4_1)
+@@ -4543,6 +4547,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_EXPIRED:
++ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ nfs4_schedule_lease_recovery(server->nfs_client);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 036f5ad..e97dd21 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1466,7 +1466,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
+ #ifdef CONFIG_NFS_V4_1
+ void nfs4_schedule_session_recovery(struct nfs4_session *session)
+ {
+- nfs4_schedule_lease_recovery(session->clp);
++ struct nfs_client *clp = session->clp;
++
++ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
++ nfs4_schedule_lease_recovery(clp);
+ }
+ EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
+
+@@ -1549,6 +1552,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
+ status = nfs4_recovery_handle_error(clp, status);
+ goto out;
+ }
++ clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* create_session negotiated new slot table */
+ clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index f57f528..101c85a 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1009,7 +1009,7 @@ void
+ pnfs_set_layoutcommit(struct nfs_write_data *wdata)
+ {
+ struct nfs_inode *nfsi = NFS_I(wdata->inode);
+- loff_t end_pos = wdata->args.offset + wdata->res.count;
++ loff_t end_pos = wdata->mds_offset + wdata->res.count;
+ bool mark_as_dirty = false;
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
+index ce4f624..a29d5cc 100644
+--- a/fs/partitions/ldm.c
++++ b/fs/partitions/ldm.c
+@@ -1335,6 +1335,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+
+ list_add_tail (&f->list, frags);
+ found:
++ if (rec >= f->num) {
++ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
++ return false;
++ }
++
+ if (f->map & (1 << rec)) {
+ ldm_error ("Duplicate VBLK, part %d.", rec);
+ f->map &= 0x7F; /* Mark the group as broken */
+diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
+index bf31b47..cad60b5 100644
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -475,7 +475,8 @@ failed:
+ * @c: UBIFS file-system description object
+ *
+ * This function returns a pointer to the superblock node or a negative error
+- * code.
++ * code. Note, the user of this function is responsible of kfree()'ing the
++ * returned superblock buffer.
+ */
+ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
+ {
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 04ad07f..328e6fc 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1584,6 +1584,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ }
+ sup->leb_cnt = cpu_to_le32(c->leb_cnt);
+ err = ubifs_write_sb_node(c, sup);
++ kfree(sup);
+ if (err)
+ goto out;
+ }
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 2ad95fa..ae9091a 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -257,7 +257,7 @@ struct queue_limits {
+ unsigned char misaligned;
+ unsigned char discard_misaligned;
+ unsigned char cluster;
+- signed char discard_zeroes_data;
++ unsigned char discard_zeroes_data;
+ };
+
+ struct request_queue
+@@ -364,6 +364,8 @@ struct request_queue
+ * for flush operations
+ */
+ unsigned int flush_flags;
++ unsigned int flush_not_queueable:1;
++ unsigned int flush_queue_delayed:1;
+ unsigned int flush_pending_idx:1;
+ unsigned int flush_running_idx:1;
+ unsigned long flush_pending_since;
+@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
+ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+ extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
+ extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
++extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+
+ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+@@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
+ {
+ unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+
++ if (!lim->max_discard_sectors)
++ return 0;
++
+ return (lim->discard_granularity + lim->discard_alignment - alignment)
+ & (lim->discard_granularity - 1);
+ }
+
+ static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+ {
+- if (q->limits.discard_zeroes_data == 1)
++ if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
+ return 1;
+
+ return 0;
+@@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
+ return bdev->bd_block_size;
+ }
+
++static inline bool queue_flush_queueable(struct request_queue *q)
++{
++ return !q->flush_not_queueable;
++}
++
+ typedef struct {struct page *v;} Sector;
+
+ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 33fa120..e376270 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
+ struct resource *data_resource, struct resource *bss_resource);
+ extern unsigned long efi_get_time(void);
+ extern int efi_set_rtc_mmss(unsigned long nowtime);
++extern void efi_reserve_boot_services(void);
+ extern struct efi_memory_map memmap;
+
+ /**
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index d764a42..300d758 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -127,6 +127,7 @@ struct hd_struct {
+ #define GENHD_FL_SUPPRESS_PARTITION_INFO 32
+ #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
+ #define GENHD_FL_NATIVE_CAPACITY 128
++#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
+
+ enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 8abe8d7..8652a4f 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -608,6 +608,8 @@
+ #define PCI_DEVICE_ID_MATROX_G550 0x2527
+ #define PCI_DEVICE_ID_MATROX_VIA 0x4536
+
++#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
++
+ #define PCI_VENDOR_ID_CT 0x102c
+ #define PCI_DEVICE_ID_CT_69000 0x00c0
+ #define PCI_DEVICE_ID_CT_65545 0x00d8
+diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
+index 77cbddb..a7d87f9 100644
+--- a/include/linux/pm_qos_params.h
++++ b/include/linux/pm_qos_params.h
+@@ -16,6 +16,10 @@
+ #define PM_QOS_NUM_CLASSES 4
+ #define PM_QOS_DEFAULT_VALUE -1
+
++#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
++#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
++#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
++
+ struct pm_qos_request_list {
+ struct plist_node list;
+ int pm_qos_class;
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index e98cd2e..06d6964 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+ unsigned ret;
+
+ repeat:
+- ret = sl->sequence;
+- smp_rmb();
++ ret = ACCESS_ONCE(sl->sequence);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
++ smp_rmb();
+
+ return ret;
+ }
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 75b95df..b3ad020 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -120,6 +120,8 @@ static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+ {
+ unsigned long p = dst->_metrics;
+
++ BUG_ON(!p);
++
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 1d3b5b2..561ac99 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -98,6 +98,7 @@ enum transport_state_table {
+ TRANSPORT_REMOVE = 14,
+ TRANSPORT_FREE = 15,
+ TRANSPORT_NEW_CMD_MAP = 16,
++ TRANSPORT_FREE_CMD_INTR = 17,
+ };
+
+ /* Used for struct se_cmd->se_cmd_flags */
+diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
+index 59aa464..24a1c6c 100644
+--- a/include/target/target_core_transport.h
++++ b/include/target/target_core_transport.h
+@@ -172,6 +172,7 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
+ extern int transport_generic_handle_data(struct se_cmd *);
+ extern void transport_new_cmd_failure(struct se_cmd *);
+ extern int transport_generic_handle_tmr(struct se_cmd *);
++extern void transport_generic_free_cmd_intr(struct se_cmd *);
+ extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+ extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
+ extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
+diff --git a/init/main.c b/init/main.c
+index 4a9479e..48df882 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -580,8 +580,8 @@ asmlinkage void __init start_kernel(void)
+ #endif
+ page_cgroup_init();
+ enable_debug_pagealloc();
+- kmemleak_init();
+ debug_objects_mem_init();
++ kmemleak_init();
+ setup_per_cpu_pageset();
+ numa_policy_init();
+ if (late_time_init)
+diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
+index 0da058b..a9582ef 100644
+--- a/kernel/pm_qos_params.c
++++ b/kernel/pm_qos_params.c
+@@ -53,11 +53,17 @@ enum pm_qos_type {
+ PM_QOS_MIN /* return the smallest value */
+ };
+
++/*
++ * Note: The lockless read path depends on the CPU accessing
++ * target_value atomically. Atomic access is only guaranteed on all CPU
++ * types linux supports for 32 bit quantites
++ */
+ struct pm_qos_object {
+ struct plist_head requests;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
++ s32 target_value; /* Do not change to 64 bit */
+ s32 default_value;
+ enum pm_qos_type type;
+ };
+@@ -70,7 +76,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
+ .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
+ .notifiers = &cpu_dma_lat_notifier,
+ .name = "cpu_dma_latency",
+- .default_value = 2000 * USEC_PER_SEC,
++ .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
++ .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .type = PM_QOS_MIN,
+ };
+
+@@ -79,7 +86,8 @@ static struct pm_qos_object network_lat_pm_qos = {
+ .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
+ .notifiers = &network_lat_notifier,
+ .name = "network_latency",
+- .default_value = 2000 * USEC_PER_SEC,
++ .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
++ .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .type = PM_QOS_MIN
+ };
+
+@@ -89,7 +97,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
+ .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
+ .notifiers = &network_throughput_notifier,
+ .name = "network_throughput",
+- .default_value = 0,
++ .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
++ .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .type = PM_QOS_MAX,
+ };
+
+@@ -135,6 +144,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
+ }
+ }
+
++static inline s32 pm_qos_read_value(struct pm_qos_object *o)
++{
++ return o->target_value;
++}
++
++static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
++{
++ o->target_value = value;
++}
++
+ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+ int del, int value)
+ {
+@@ -159,6 +178,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
+ plist_add(node, &o->requests);
+ }
+ curr_value = pm_qos_get_value(o);
++ pm_qos_set_value(o, curr_value);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (prev_value != curr_value)
+@@ -193,18 +213,11 @@ static int find_pm_qos_object_by_minor(int minor)
+ * pm_qos_request - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+- * This function returns the current target value in an atomic manner.
++ * This function returns the current target value.
+ */
+ int pm_qos_request(int pm_qos_class)
+ {
+- unsigned long flags;
+- int value;
+-
+- spin_lock_irqsave(&pm_qos_lock, flags);
+- value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
+- spin_unlock_irqrestore(&pm_qos_lock, flags);
+-
+- return value;
++ return pm_qos_read_value(pm_qos_array[pm_qos_class]);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_request);
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index ee24fa1..666880d 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2413,14 +2413,16 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
+ ftrace_match_records(parser->buffer, parser->idx, enable);
+ }
+
+- mutex_lock(&ftrace_lock);
+- if (ftrace_start_up && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+- mutex_unlock(&ftrace_lock);
+-
+ trace_parser_put(parser);
+ kfree(iter);
+
++ if (file->f_mode & FMODE_WRITE) {
++ mutex_lock(&ftrace_lock);
++ if (ftrace_start_up && ftrace_enabled)
++ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ mutex_unlock(&ftrace_lock);
++ }
++
+ mutex_unlock(&ftrace_regex_lock);
+ return 0;
+ }
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 619313e..507a22f 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -144,7 +144,7 @@ static void init_shared_classes(void)
+
+ #define HARDIRQ_ENTER() \
+ local_irq_disable(); \
+- irq_enter(); \
++ __irq_enter(); \
+ WARN_ON(!in_irq());
+
+ #define HARDIRQ_EXIT() \
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index c1d5867..aacee45 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1414,9 +1414,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ ++(*pos);
+
+ list_for_each_continue_rcu(n, &object_list) {
+- next_obj = list_entry(n, struct kmemleak_object, object_list);
+- if (get_object(next_obj))
++ struct kmemleak_object *obj =
++ list_entry(n, struct kmemleak_object, object_list);
++ if (get_object(obj)) {
++ next_obj = obj;
+ break;
++ }
+ }
+
+ put_object(prev_obj);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 3f8bce2..e78b324 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2064,6 +2064,7 @@ restart:
+ first_zones_zonelist(zonelist, high_zoneidx, NULL,
+ &preferred_zone);
+
++rebalance:
+ /* This is the last chance, in general, before the goto nopage. */
+ page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+ high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+@@ -2071,7 +2072,6 @@ restart:
+ if (page)
+ goto got_pg;
+
+-rebalance:
+ /* Allocate without watermarks if the context allows */
+ if (alloc_flags & ALLOC_NO_WATERMARKS) {
+ page = __alloc_pages_high_priority(gfp_mask, order,
+diff --git a/mm/shmem.c b/mm/shmem.c
+index dfc7069..ba12be4 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -916,11 +916,12 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
+ if (size > ENTRIES_PER_PAGE)
+ size = ENTRIES_PER_PAGE;
+ offset = shmem_find_swp(entry, ptr, ptr+size);
++ shmem_swp_unmap(ptr);
+ if (offset >= 0) {
+ shmem_dir_unmap(dir);
++ ptr = shmem_swp_map(subdir);
+ goto found;
+ }
+- shmem_swp_unmap(ptr);
+ }
+ }
+ lost1:
+@@ -1100,8 +1101,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ delete_from_page_cache(page);
+ shmem_swp_set(info, entry, swap.val);
+ shmem_swp_unmap(entry);
+- spin_unlock(&info->lock);
+ swap_shmem_alloc(swap);
++ spin_unlock(&info->lock);
+ BUG_ON(page_mapped(page));
+ swap_writepage(page, wbc);
+ return 0;
+diff --git a/mm/slub.c b/mm/slub.c
+index 9d2e5e4..f6cb6cd 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
+ return *(void **)(object + s->offset);
+ }
+
++static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
++{
++ void *p;
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
++#else
++ p = get_freepointer(s, object);
++#endif
++ return p;
++}
++
+ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
+ {
+ *(void **)(object + s->offset) = fp;
+@@ -1943,7 +1955,7 @@ redo:
+ if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ s->cpu_slab->freelist, s->cpu_slab->tid,
+ object, tid,
+- get_freepointer(s, object), next_tid(tid)))) {
++ get_freepointer_safe(s, object), next_tid(tid)))) {
+
+ note_cmpxchg_failure("slab_alloc", s, tid);
+ goto redo;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8bfd450..cc1470b 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -230,8 +230,11 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ if (scanned == 0)
+ scanned = SWAP_CLUSTER_MAX;
+
+- if (!down_read_trylock(&shrinker_rwsem))
+- return 1; /* Assume we'll be able to shrink next time */
++ if (!down_read_trylock(&shrinker_rwsem)) {
++ /* Assume we'll be able to shrink next time */
++ ret = 1;
++ goto out;
++ }
+
+ list_for_each_entry(shrinker, &shrinker_list, list) {
+ unsigned long long delta;
+@@ -282,6 +285,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ shrinker->nr += total_scan;
+ }
+ up_read(&shrinker_rwsem);
++out:
++ cond_resched();
+ return ret;
+ }
+
+@@ -2286,7 +2291,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+ * must be balanced
+ */
+ if (order)
+- return pgdat_balanced(pgdat, balanced, classzone_idx);
++ return !pgdat_balanced(pgdat, balanced, classzone_idx);
+ else
+ return !all_zones_ok;
+ }
+diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
+index f7fa67c..f49da58 100644
+--- a/net/atm/atm_sysfs.c
++++ b/net/atm/atm_sysfs.c
+@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
+ return pos - buf;
+ }
+
++static ssize_t show_atmindex(struct device *cdev,
++ struct device_attribute *attr, char *buf)
++{
++ struct atm_dev *adev = to_atm_dev(cdev);
++
++ return sprintf(buf, "%d\n", adev->number);
++}
++
+ static ssize_t show_carrier(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
+
+ static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
+ static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
++static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
+ static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
+ static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
+@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
+ static struct device_attribute *atm_attrs[] = {
+ &dev_attr_atmaddress,
+ &dev_attr_address,
++ &dev_attr_atmindex,
+ &dev_attr_carrier,
+ &dev_attr_type,
+ &dev_attr_link_rate,
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 74ef4d4..5f9c091 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = {
+ * ipt_REJECT needs it. Future netfilter modules might
+ * require us to fill additional fields.
+ */
++static const u32 br_dst_default_metrics[RTAX_MAX] = {
++ [RTAX_MTU - 1] = 1500,
++};
++
+ void br_netfilter_rtable_init(struct net_bridge *br)
+ {
+ struct rtable *rt = &br->fake_rtable;
+@@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.dev = br->dev;
+ rt->dst.path = &rt->dst;
+- dst_metric_set(&rt->dst, RTAX_MTU, 1500);
++ dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
+ rt->dst.flags = DST_NOXFRM;
+ rt->dst.ops = &fake_dst_ops;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index b624fe4..acd7423 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1007,7 +1007,7 @@ rollback:
+ }
+
+ write_lock_bh(&dev_base_lock);
+- hlist_del(&dev->name_hlist);
++ hlist_del_rcu(&dev->name_hlist);
+ write_unlock_bh(&dev_base_lock);
+
+ synchronize_rcu();
+@@ -5258,7 +5258,7 @@ void netdev_update_features(struct net_device *dev)
+ if (dev->features == features)
+ return;
+
+- netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
++ netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
+ dev->features, features);
+
+ if (dev->netdev_ops->ndo_set_features)
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 91104d3..b71b7a3 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -314,7 +314,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+ {
+ unsigned long prev, new;
+
+- new = (unsigned long) dst_default_metrics;
++ new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+ prev = cmpxchg(&dst->_metrics, old, new);
+ if (prev == old)
+ kfree(__DST_METRICS_PTR(old));
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 74ead9e..f337525 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -330,7 +330,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
+ /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
+ /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
+ /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
+- /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6",
++ /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
+ /* NETIF_F_HIGHDMA */ "highdma",
+ /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
+ /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 7ebeed0..3e934fe 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2993,6 +2993,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+ skb->destructor = sock_rmem_free;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+
++ /* before exiting rcu section, make sure dst is refcounted */
++ skb_dst_force(skb);
++
+ skb_queue_tail(&sk->sk_error_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb->len);
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 1fd3d9c..57ca93a 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1169,20 +1169,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
+
+ if (!in_dev->dead) {
+ if (IGMP_V1_SEEN(in_dev))
+- goto done;
++ return;
+ if (IGMP_V2_SEEN(in_dev)) {
+ if (reporter)
+ igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
+- goto done;
++ return;
+ }
+ /* IGMPv3 */
+ igmpv3_add_delrec(in_dev, im);
+
+ igmp_ifc_event(in_dev);
+ }
+-done:
+ #endif
+- ip_mc_clear_src(im);
+ }
+
+ static void igmp_group_added(struct ip_mc_list *im)
+@@ -1319,6 +1317,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
+ *ip = i->next_rcu;
+ in_dev->mc_count--;
+ igmp_group_dropped(i);
++ ip_mc_clear_src(i);
+
+ if (!in_dev->dead)
+ ip_rt_multicast_event(in_dev);
+@@ -1428,7 +1427,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
+ in_dev->mc_list = i->next_rcu;
+ in_dev->mc_count--;
+
+- igmp_group_dropped(i);
++ /* We've dropped the groups in ip_mc_down already */
++ ip_mc_clear_src(i);
+ ip_ma_put(i);
+ }
+ }
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 4404973..3740403 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -228,11 +228,11 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+ goto out;
+
+ if (pairwise)
+- key = sta->ptk;
++ key = rcu_dereference(sta->ptk);
+ else if (key_idx < NUM_DEFAULT_KEYS)
+- key = sta->gtk[key_idx];
++ key = rcu_dereference(sta->gtk[key_idx]);
+ } else
+- key = sdata->keys[key_idx];
++ key = rcu_dereference(sdata->keys[key_idx]);
+
+ if (!key)
+ goto out;
+@@ -921,8 +921,10 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
+ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
+ struct mpath_info *pinfo)
+ {
+- if (mpath->next_hop)
+- memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
++ struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop);
++
++ if (next_hop_sta)
++ memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
+ else
+ memset(next_hop, 0, ETH_ALEN);
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 64d92d5..7ffcb55 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -789,7 +789,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+
+- netif_tx_start_all_queues(sdata->dev);
++ netif_tx_wake_all_queues(sdata->dev);
+ }
+
+ void ieee80211_dynamic_ps_timer(unsigned long data)
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 237cc19..cb5a285 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1419,6 +1419,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+ const char *dptr, *end;
+ s16 diff, tdiff = 0;
+ int ret = NF_ACCEPT;
++ bool term;
+ typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
+
+ if (ctinfo != IP_CT_ESTABLISHED &&
+@@ -1453,14 +1454,21 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+ if (dptr + matchoff == end)
+ break;
+
+- if (end + strlen("\r\n\r\n") > dptr + datalen)
+- break;
+- if (end[0] != '\r' || end[1] != '\n' ||
+- end[2] != '\r' || end[3] != '\n')
++ term = false;
++ for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
++ if (end[0] == '\r' && end[1] == '\n' &&
++ end[2] == '\r' && end[3] == '\n') {
++ term = true;
++ break;
++ }
++ }
++ if (!term)
+ break;
+ end += strlen("\r\n\r\n") + clen;
+
+ msglen = origlen = end - dptr;
++ if (msglen > datalen)
++ return NF_DROP;
+
+ ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+ if (ret != NF_ACCEPT)
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index c2e628d..6d96275 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ struct sfq_sched_data *q = qdisc_priv(sch);
+ unsigned int hash;
+- sfq_index x;
++ sfq_index x, qlen;
+ struct sfq_slot *slot;
+ int uninitialized_var(ret);
+
+@@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ if (++sch->q.qlen <= q->limit)
+ return NET_XMIT_SUCCESS;
+
++ qlen = slot->qlen;
+ sfq_drop(sch);
+- return NET_XMIT_CN;
+-}
+-
+-static struct sk_buff *
+-sfq_peek(struct Qdisc *sch)
+-{
+- struct sfq_sched_data *q = qdisc_priv(sch);
+-
+- /* No active slots */
+- if (q->tail == NULL)
+- return NULL;
+-
+- return q->slots[q->tail->next].skblist_next;
++ /* Return Congestion Notification only if we dropped a packet
++ * from this flow.
++ */
++ return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ }
+
+ static struct sk_buff *
+@@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
+ .priv_size = sizeof(struct sfq_sched_data),
+ .enqueue = sfq_enqueue,
+ .dequeue = sfq_dequeue,
+- .peek = sfq_peek,
++ .peek = qdisc_peek_dequeued,
+ .drop = sfq_drop,
+ .init = sfq_init,
+ .reset = sfq_reset,
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 1a21c57..525f97c 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -64,6 +64,7 @@
+ /* Forward declarations for internal functions. */
+ static void sctp_assoc_bh_rcv(struct work_struct *work);
+ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
++static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
+
+ /* Keep track of the new idr low so that we don't re-use association id
+ * numbers too fast. It is protected by they idr spin lock is in the
+@@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc)
+ /* Free any cached ASCONF_ACK chunk. */
+ sctp_assoc_free_asconf_acks(asoc);
+
++ /* Free the ASCONF queue. */
++ sctp_assoc_free_asconf_queue(asoc);
++
+ /* Free any cached ASCONF chunk. */
+ if (asoc->addip_last_asconf)
+ sctp_chunk_free(asoc->addip_last_asconf);
+@@ -1578,6 +1582,18 @@ retry:
+ return error;
+ }
+
++/* Free the ASCONF queue */
++static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
++{
++ struct sctp_chunk *asconf;
++ struct sctp_chunk *tmp;
++
++ list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
++ list_del_init(&asconf->list);
++ sctp_chunk_free(asconf);
++ }
++}
++
+ /* Free asconf_ack cache */
+ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
+ {
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index faf71d1..6150ac5 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -140,14 +140,12 @@ void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port)
+ /* Dispose of the address list. */
+ static void sctp_bind_addr_clean(struct sctp_bind_addr *bp)
+ {
+- struct sctp_sockaddr_entry *addr;
+- struct list_head *pos, *temp;
++ struct sctp_sockaddr_entry *addr, *temp;
+
+ /* Empty the bind address list. */
+- list_for_each_safe(pos, temp, &bp->address_list) {
+- addr = list_entry(pos, struct sctp_sockaddr_entry, list);
+- list_del(pos);
+- kfree(addr);
++ list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
++ list_del_rcu(&addr->list);
++ call_rcu(&addr->rcu, sctp_local_addr_free);
+ SCTP_DBG_OBJCNT_DEC(addr);
+ }
+ }
+diff --git a/net/socket.c b/net/socket.c
+index 310d16b..65b2310 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2122,14 +2122,16 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ */
+ if (MSG_CMSG_COMPAT & flags) {
+ err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags, datagrams);
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+ err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags, datagrams);
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index bf005d3..f34f5ab 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1344,7 +1344,6 @@ static void xs_tcp_state_change(struct sock *sk)
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+ xprt_force_disconnect(xprt);
+- case TCP_SYN_SENT:
+ xprt->connect_cookie++;
+ case TCP_CLOSING:
+ /*
+@@ -1758,6 +1757,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
+ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++ int ret = -ENOTCONN;
+
+ if (!transport->inet) {
+ struct sock *sk = sock->sk;
+@@ -1789,12 +1789,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ }
+
+ if (!xprt_bound(xprt))
+- return -ENOTCONN;
++ goto out;
+
+ /* Tell the socket layer to start connecting... */
+ xprt->stat.connect_count++;
+ xprt->stat.connect_start = jiffies;
+- return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
++ ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
++ switch (ret) {
++ case 0:
++ case -EINPROGRESS:
++ /* SYN_SENT! */
++ xprt->connect_cookie++;
++ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
++ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
++ }
++out:
++ return ret;
+ }
+
+ /**
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 4ebce42..2c70a1e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -1679,14 +1679,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
+ if (err)
+ goto out;
+
+- if (!(rdev->wiphy.flags &
+- WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) {
+- if (!key.def_uni || !key.def_multi) {
+- err = -EOPNOTSUPP;
+- goto out;
+- }
+- }
+-
+ err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
+ key.def_uni, key.def_multi);
+
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index ae3a698..ec1bcec 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -593,7 +593,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
+ sa.aad.op = OP_SETPROCATTR;
+ sa.aad.info = name;
+ sa.aad.error = -EINVAL;
+- return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
++ return aa_audit(AUDIT_APPARMOR_DENIED,
++ __aa_current_profile(), GFP_KERNEL,
+ &sa, NULL);
+ }
+ } else if (strcmp(name, "exec") == 0) {
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 930634e..7a0c586 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -845,6 +845,7 @@ void key_replace_session_keyring(void)
+ new-> sgid = old-> sgid;
+ new->fsgid = old->fsgid;
+ new->user = get_uid(old->user);
++ new->user_ns = new->user->user_ns;
+ new->group_info = get_group_info(old->group_info);
+
+ new->securebits = old->securebits;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 70a9d32..f5cad7c 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2349,9 +2349,16 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
+ /* Check VIA/ATI HD Audio Controller exist */
+ switch (chip->driver_type) {
+ case AZX_DRIVER_VIA:
+- case AZX_DRIVER_ATI:
+ /* Use link position directly, avoid any transfer problem. */
+ return POS_FIX_VIACOMBO;
++ case AZX_DRIVER_ATI:
++ /* ATI chipsets don't work well with position-buffer */
++ return POS_FIX_LPIB;
++ case AZX_DRIVER_GENERIC:
++ /* AMD chipsets also don't work with position-buffer */
++ if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
++ return POS_FIX_LPIB;
++ break;
+ }
+
+ return POS_FIX_AUTO;
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index 2942d2a..9f886bf 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -3070,6 +3070,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec)
+
+ for (i = 0; i < cfg->num_inputs; i++) {
+ hda_nid_t nid = cfg->inputs[i].pin;
++ int type = cfg->inputs[i].type;
+ switch (nid) {
+ case 0x15: /* port-C */
+ snd_hda_codec_write(codec, 0x33, 0, AC_VERB_SET_CONNECT_SEL, 0x0);
+@@ -3079,7 +3080,7 @@ static void ad1988_auto_init_analog_input(struct hda_codec *codec)
+ break;
+ }
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+- i == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN);
++ type == AUTO_PIN_MIC ? PIN_VREF80 : PIN_IN);
+ if (nid != AD1988_PIN_CD_NID)
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_MUTE);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index ad97d93..15b9d16 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3036,6 +3036,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
++ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
+ {}
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 94d19c0..1e32235 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1600,7 +1600,7 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+ "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+- "Dell Studio 1558", STAC_DELL_M6_BOTH),
++ "Dell Studio 1558", STAC_DELL_M6_DMIC),
+ {} /* terminator */
+ };
+
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index 4005e9a..e55b298 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -787,17 +787,17 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
+ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
+ { "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
+ { "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
+- { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
++ { "LINEOUT1 Mixer", "Output Switch", "Left Output PGA" },
+
+ { "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
+ { "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
+ };
+
+ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+- { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
+- { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
++ { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
++ { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
+
+- { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
++ { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
+
+ { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
+ { "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
+@@ -806,17 +806,17 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
+ { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
+ { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
+- { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
++ { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
+ { "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
+ };
+
+ static const struct snd_soc_dapm_route lineout2_se_routes[] = {
+- { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
+- { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
++ { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
++ { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
+
+- { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
++ { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
+ { "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
+@@ -836,17 +836,21 @@ int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
+ snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
+ WM8993_IN2_VU, WM8993_IN2_VU);
+
++ snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_LEFT,
++ WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
+ snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
+ WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
+
+ snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
+- WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
++ WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC,
++ WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC);
+ snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
+ WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
+ WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
+
+ snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
+- WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
++ WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU,
++ WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU);
+ snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
+ WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
+ WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);
+diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
+index 2afabaf..1a591f1 100644
+--- a/sound/soc/pxa/raumfeld.c
++++ b/sound/soc/pxa/raumfeld.c
+@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
+ .hw_params = raumfeld_cs4270_hw_params,
+ };
+
+-static int raumfeld_line_suspend(struct snd_soc_card *card)
++static int raumfeld_analog_suspend(struct snd_soc_card *card)
+ {
+ raumfeld_enable_audio(false);
+ return 0;
+ }
+
+-static int raumfeld_line_resume(struct snd_soc_card *card)
++static int raumfeld_analog_resume(struct snd_soc_card *card)
+ {
+ raumfeld_enable_audio(true);
+ return 0;
+@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
+ .hw_params = raumfeld_ak4104_hw_params,
+ };
+
+-static struct snd_soc_dai_link raumfeld_dai[] = {
++#define DAI_LINK_CS4270 \
++{ \
++ .name = "CS4270", \
++ .stream_name = "CS4270", \
++ .cpu_dai_name = "pxa-ssp-dai.0", \
++ .platform_name = "pxa-pcm-audio", \
++ .codec_dai_name = "cs4270-hifi", \
++ .codec_name = "cs4270-codec.0-0048", \
++ .ops = &raumfeld_cs4270_ops, \
++}
++
++#define DAI_LINK_AK4104 \
++{ \
++ .name = "ak4104", \
++ .stream_name = "Playback", \
++ .cpu_dai_name = "pxa-ssp-dai.1", \
++ .codec_dai_name = "ak4104-hifi", \
++ .platform_name = "pxa-pcm-audio", \
++ .ops = &raumfeld_ak4104_ops, \
++ .codec_name = "spi0.0", \
++}
++
++static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
+ {
+- .name = "ak4104",
+- .stream_name = "Playback",
+- .cpu_dai_name = "pxa-ssp-dai.1",
+- .codec_dai_name = "ak4104-hifi",
+- .platform_name = "pxa-pcm-audio",
+- .ops = &raumfeld_ak4104_ops,
+- .codec_name = "ak4104-codec.0",
+-},
++ DAI_LINK_CS4270,
++ DAI_LINK_AK4104,
++};
++
++static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
+ {
+- .name = "CS4270",
+- .stream_name = "CS4270",
+- .cpu_dai_name = "pxa-ssp-dai.0",
+- .platform_name = "pxa-pcm-audio",
+- .codec_dai_name = "cs4270-hifi",
+- .codec_name = "cs4270-codec.0-0048",
+- .ops = &raumfeld_cs4270_ops,
+-},};
+-
+-static struct snd_soc_card snd_soc_raumfeld = {
+- .name = "Raumfeld",
+- .dai_link = raumfeld_dai,
+- .suspend_post = raumfeld_line_suspend,
+- .resume_pre = raumfeld_line_resume,
+- .num_links = ARRAY_SIZE(raumfeld_dai),
++ DAI_LINK_CS4270,
++};
++
++static struct snd_soc_card snd_soc_raumfeld_connector = {
++ .name = "Raumfeld Connector",
++ .dai_link = snd_soc_raumfeld_connector_dai,
++ .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
++ .suspend_post = raumfeld_analog_suspend,
++ .resume_pre = raumfeld_analog_resume,
++};
++
++static struct snd_soc_card snd_soc_raumfeld_speaker = {
++ .name = "Raumfeld Speaker",
++ .dai_link = snd_soc_raumfeld_speaker_dai,
++ .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
++ .suspend_post = raumfeld_analog_suspend,
++ .resume_pre = raumfeld_analog_resume,
+ };
+
+ static struct platform_device *raumfeld_audio_device;
+@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
+
+ set_max9485_clk(MAX9485_MCLK_FREQ_122880);
+
+- /* Register LINE and SPDIF */
++ /* Register analog device */
+ raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
+ if (!raumfeld_audio_device)
+ return -ENOMEM;
+
+- platform_set_drvdata(raumfeld_audio_device,
+- &snd_soc_raumfeld);
+- ret = platform_device_add(raumfeld_audio_device);
+-
+- /* no S/PDIF on Speakers */
+ if (machine_is_raumfeld_speaker())
++ platform_set_drvdata(raumfeld_audio_device,
++ &snd_soc_raumfeld_speaker);
++
++ if (machine_is_raumfeld_connector())
++ platform_set_drvdata(raumfeld_audio_device,
++ &snd_soc_raumfeld_connector);
++
++ ret = platform_device_add(raumfeld_audio_device);
++ if (ret < 0)
+ return ret;
+
+ raumfeld_enable_audio(true);
+-
+- return ret;
++ return 0;
+ }
+
+ static void __exit raumfeld_audio_exit(void)