summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.11.6/1005_linux-3.11.6.patch')
-rw-r--r--3.11.6/1005_linux-3.11.6.patch2260
1 files changed, 2260 insertions, 0 deletions
diff --git a/3.11.6/1005_linux-3.11.6.patch b/3.11.6/1005_linux-3.11.6.patch
new file mode 100644
index 0000000..ad3cb53
--- /dev/null
+++ b/3.11.6/1005_linux-3.11.6.patch
@@ -0,0 +1,2260 @@
+diff --git a/Makefile b/Makefile
+index 83121b7..e87ba83 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 11
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Linux for Workgroups
+
+diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
+index 442ce5d..43de302 100644
+--- a/arch/arc/include/asm/delay.h
++++ b/arch/arc/include/asm/delay.h
+@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
+ {
+ unsigned long loops;
+
+- /* (long long) cast ensures 64 bit MPY - real or emulated
++ /* (u64) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+- loops = ((long long)(usecs * 4295 * HZ) *
+- (long long)(loops_per_jiffy)) >> 32;
++ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
+
+ __delay(loops);
+ }
+diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
+index f158197..b6a8c2d 100644
+--- a/arch/arc/include/asm/spinlock.h
++++ b/arch/arc/include/asm/spinlock.h
+@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+
+ static inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
+- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
++ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
++
++ __asm__ __volatile__(
++ " ex %0, [%1] \n"
++ : "+r" (tmp)
++ : "r"(&(lock->slock))
++ : "memory");
++
+ smp_mb();
+ }
+
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index 3242082..30c9baf 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -43,7 +43,7 @@
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+- * The reason for rewriting being, for majorit yof cases, @len is generally
++ * The reason for rewriting being, for majority of cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+@@ -53,7 +53,7 @@
+ *
+ */
+ #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+- (((addr)+(sz)) <= get_fs()))
++ ((addr) <= (get_fs() - (sz))))
+ #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
+index 3332385..5d76706 100644
+--- a/arch/arc/kernel/ptrace.c
++++ b/arch/arc/kernel/ptrace.c
+@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
+ REG_IGNORE_ONE(pad2);
+ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+- REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
++ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
+
+ return ret;
+ }
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index ee6ef2f..7e95e1a 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ {
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+- int err;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ goto badframe;
+
+- err = restore_usr_regs(regs, sf);
+- err |= __get_user(magic, &sf->sigret_magic);
+- if (err)
++ if (__get_user(magic, &sf->sigret_magic))
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
++ if (restore_usr_regs(regs, sf))
++ goto badframe;
++
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ return 1;
+
+ /*
++ * w/o SA_SIGINFO, struct ucontext is partially populated (only
++ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
++ * during signal handler execution. This works for SA_SIGINFO as well
++ * although the semantics are now overloaded (the same reg state can be
++ * inspected by userland: but are they allowed to fiddle with it ?
++ */
++ err |= stash_usr_regs(sf, regs, set);
++
++ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ magic = MAGIC_SIGALTSTK;
+ }
+
+- /*
+- * w/o SA_SIGINFO, struct ucontext is partially populated (only
+- * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+- * during signal handler execution. This works for SA_SIGINFO as well
+- * although the semantics are now overloaded (the same reg state can be
+- * inspected by userland: but are they allowed to fiddle with it ?
+- */
+- err |= stash_usr_regs(sf, regs, set);
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
+index c0f832f..00ad070 100644
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
++
++ /* handle zero-overhead-loop */
++ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
++ regs->ret = regs->lp_start;
++ regs->lp_count--;
++ }
+ }
+
+ return 0;
+diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
+index bfc198c..863c892 100644
+--- a/arch/arm/include/asm/jump_label.h
++++ b/arch/arm/include/asm/jump_label.h
+@@ -16,7 +16,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ JUMP_LABEL_NOP "\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index 4d6d77e..e194f95 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\tnop\n\t"
++ asm_volatile_goto("1:\tnop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
+index 4204d76..029e002 100644
+--- a/arch/mips/kernel/octeon_switch.S
++++ b/arch/mips/kernel/octeon_switch.S
+@@ -73,7 +73,7 @@
+ 3:
+
+ #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+- PTR_L t8, __stack_chk_guard
++ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+ #endif
+diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
+index 38af83f..20b7b04 100644
+--- a/arch/mips/kernel/r2300_switch.S
++++ b/arch/mips/kernel/r2300_switch.S
+@@ -67,7 +67,7 @@ LEAF(resume)
+ 1:
+
+ #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+- PTR_L t8, __stack_chk_guard
++ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+ #endif
+diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
+index 921238a..078de5e 100644
+--- a/arch/mips/kernel/r4k_switch.S
++++ b/arch/mips/kernel/r4k_switch.S
+@@ -69,7 +69,7 @@
+ 1:
+
+ #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+- PTR_L t8, __stack_chk_guard
++ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+ #endif
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 04e47c6..b3f87a3 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ else {
+
+ /*
+- * The kernel should never fault on its own address space.
++ * The kernel should never fault on its own address space,
++ * unless pagefault_disable() was called before.
+ */
+
+- if (fault_space == 0)
++ if (fault_space == 0 && !in_atomic())
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+-
+ }
+ }
+
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index ae098c4..f016bb6 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -19,7 +19,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index b02f91e..7bcd4d6 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+- std r8, VCPU_DSCR(r7)
++ std r8, VCPU_DSCR(r9)
+ mtspr SPRN_DSCR, r7
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 6c32190..346b1c8 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -15,7 +15,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("0: brcl 0,0\n"
++ asm_volatile_goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index 5080d16..ec2e2e2 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -9,7 +9,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 47538a6..7290585 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -373,7 +373,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ * Catch too early usage of this before alternatives
+ * have run.
+ */
+- asm goto("1: jmp %l[t_warn]\n"
++ asm_volatile_goto("1: jmp %l[t_warn]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+@@ -386,7 +386,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+ #endif
+
+- asm goto("1: jmp %l[t_no]\n"
++ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+@@ -448,7 +448,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+ * have. Thus, we force the jump to the widest, 4-byte, signed relative
+ * offset even though the last would often fit in less bytes.
+ */
+- asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
++ asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
+index cccd07f..779c2ef 100644
+--- a/arch/x86/include/asm/e820.h
++++ b/arch/x86/include/asm/e820.h
+@@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
+ extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
+ unsigned long start_addr, unsigned long long end_addr);
+ struct setup_data;
+-extern void parse_e820_ext(struct setup_data *data);
++extern void parse_e820_ext(u64 phys_addr, u32 data_len);
+
+ #if defined(CONFIG_X86_64) || \
+ (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index 3a16c14..0297669 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+
+ static __always_inline bool arch_static_branch(struct static_key *key)
+ {
+- asm goto("1:"
++ asm_volatile_goto("1:"
+ STATIC_KEY_INITIAL_NOP
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index d32abea..174da5f 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
+ * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
+ * linked list of struct setup_data, which is parsed here.
+ */
+-void __init parse_e820_ext(struct setup_data *sdata)
++void __init parse_e820_ext(u64 phys_addr, u32 data_len)
+ {
+ int entries;
+ struct e820entry *extmap;
++ struct setup_data *sdata;
+
++ sdata = early_memremap(phys_addr, data_len);
+ entries = sdata->len / sizeof(struct e820entry);
+ extmap = (struct e820entry *)(sdata->data);
+ __append_e820_map(extmap, entries);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
++ early_iounmap(sdata, data_len);
+ printk(KERN_INFO "e820: extended physical RAM map:\n");
+ e820_print_map("extended");
+ }
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index f8ec578..234e1e3 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
+ static void __init parse_setup_data(void)
+ {
+ struct setup_data *data;
+- u64 pa_data;
++ u64 pa_data, pa_next;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+- u32 data_len, map_len;
++ u32 data_len, map_len, data_type;
+
+ map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
+ (u64)sizeof(struct setup_data));
+ data = early_memremap(pa_data, map_len);
+ data_len = data->len + sizeof(struct setup_data);
+- if (data_len > map_len) {
+- early_iounmap(data, map_len);
+- data = early_memremap(pa_data, data_len);
+- map_len = data_len;
+- }
++ data_type = data->type;
++ pa_next = data->next;
++ early_iounmap(data, map_len);
+
+- switch (data->type) {
++ switch (data_type) {
+ case SETUP_E820_EXT:
+- parse_e820_ext(data);
++ parse_e820_ext(pa_data, data_len);
+ break;
+ case SETUP_DTB:
+ add_dtb(pa_data);
+@@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
+ default:
+ break;
+ }
+- pa_data = data->next;
+- early_iounmap(data, map_len);
++ pa_data = pa_next;
+ }
+ }
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 0d91fe5..92e6c67 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1462,12 +1462,11 @@ struct ctl_table random_table[] = {
+
+ static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+-static int __init random_int_secret_init(void)
++int random_int_secret_init(void)
+ {
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ return 0;
+ }
+-late_initcall(random_int_secret_init);
+
+ /*
+ * Get a random word for internal kernel use only. Similar to urandom but
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 342f1f3..c42d31c 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3791,6 +3791,9 @@
+ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
+ #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+
++#define HSW_SCRATCH1 0xb038
++#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
++
+ #define HSW_FUSE_STRAP 0x42014
+ #define HSW_CDCLK_LIMIT (1 << 24)
+
+@@ -4624,6 +4627,9 @@
+ #define GEN7_ROW_CHICKEN2_GT2 0xf4f4
+ #define DOP_CLOCK_GATING_DISABLE (1<<0)
+
++#define HSW_ROW_CHICKEN3 0xe49c
++#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
++
+ #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
+ #define INTEL_AUDIO_DEVCL 0x808629FB
+ #define INTEL_AUDIO_DEVBLC 0x80862801
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7fc8a76..90a7c17 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -3890,8 +3890,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
+ * consider. */
+ void intel_connector_dpms(struct drm_connector *connector, int mode)
+ {
+- struct intel_encoder *encoder = intel_attached_encoder(connector);
+-
+ /* All the simple cases only support two dpms states. */
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+@@ -3902,10 +3900,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
+ connector->dpms = mode;
+
+ /* Only need to change hw state when actually enabled */
+- if (encoder->base.crtc)
+- intel_encoder_dpms(encoder, mode);
+- else
+- WARN_ON(encoder->connectors_active != false);
++ if (connector->encoder)
++ intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
+
+ intel_modeset_check_state(connector->dev);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index b0e4a0b..cad0482 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3603,8 +3603,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
+ dev_priv->rps.rpe_delay),
+ dev_priv->rps.rpe_delay);
+
+- INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
+-
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+
+ /* requires MSI enabled */
+@@ -4699,6 +4697,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
++ /* L3 caching of data atomics doesn't work -- disable it. */
++ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
++ I915_WRITE(HSW_ROW_CHICKEN3,
++ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
++
+ /* This is required by WaCatErrorRejectionIssue:hsw */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+@@ -5562,6 +5565,8 @@ void intel_pm_init(struct drm_device *dev)
+
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
++
++ INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
+ }
+
+ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
+index 084e694..639b9aa 100644
+--- a/drivers/gpu/drm/radeon/btc_dpm.c
++++ b/drivers/gpu/drm/radeon/btc_dpm.c
+@@ -1913,7 +1913,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
+ }
+ j++;
+
+- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ tmp = RREG32(MC_PMG_CMD_MRS);
+@@ -1928,7 +1928,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
+ }
+ j++;
+
+- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ case MC_SEQ_RESERVE_M >> 2:
+@@ -1942,7 +1942,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
+ }
+ j++;
+
+- if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 94dab1e..8307883 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3126,7 +3126,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+- rdev->config.evergreen.max_hw_contexts = 8;
++ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 20fd17c..6be00c9 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -1494,7 +1494,7 @@
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+ # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+- /* 0 - SRC_ADDR
++ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+ # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+@@ -1509,7 +1509,7 @@
+ # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+ /* COMMAND */
+ # define PACKET3_CP_DMA_DIS_WC (1 << 21)
+-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
++# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index 7c78083..d079cb1 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -1487,7 +1487,7 @@
+ */
+ # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+ /* COMMAND */
+-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
++# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
+index f4d6bce..12e8099 100644
+--- a/drivers/gpu/drm/radeon/radeon_test.c
++++ b/drivers/gpu/drm/radeon/radeon_test.c
+@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
+ uint64_t gtt_addr, vram_addr;
+- unsigned i, n, size;
+- int r, ring;
++ unsigned n, size;
++ int i, r, ring;
+
+ switch (flag) {
+ case RADEON_TEST_COPY_DMA:
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 1cfba39..1c23b61 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -5174,7 +5174,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!pi->mem_gddr5) {
+@@ -5184,7 +5184,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+@@ -5197,7 +5197,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ j++;
+- if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 2010d6b..a75d25a 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -1490,7 +1490,7 @@
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+ # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+- /* 0 - SRC_ADDR
++ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+ # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+@@ -1505,7 +1505,7 @@
+ # define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+ /* COMMAND */
+ # define PACKET3_CP_DMA_DIS_WC (1 << 21)
+-# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
++# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 98814d1..3288f13 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -230,6 +230,7 @@ static int send_argument(const char *key)
+
+ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
++ u8 status, data = 0;
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ return -EIO;
+ }
+
++ /* This has no effect on newer (2012) SMCs */
+ if (send_byte(len, APPLESMC_DATA_PORT)) {
+ pr_warn("%.4s: read len fail\n", key);
+ return -EIO;
+@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+ }
+
++ /* Read the data port until bit0 is cleared */
++ for (i = 0; i < 16; i++) {
++ udelay(APPLESMC_MIN_WAIT);
++ status = inb(APPLESMC_CMD_PORT);
++ if (!(status & 0x01))
++ break;
++ data = inb(APPLESMC_DATA_PORT);
++ }
++ if (i)
++ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
++
+ return 0;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 142b694d..e6b8dcd 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -944,6 +944,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
+ /*
+ * ProDB0017052: Clear ARDY bit twice
+ */
++ if (stat & OMAP_I2C_STAT_ARDY)
++ omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
++
+ if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
+ OMAP_I2C_STAT_AL)) {
+ omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
+diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
+index 491419e..5c3d4df 100644
+--- a/drivers/watchdog/kempld_wdt.c
++++ b/drivers/watchdog/kempld_wdt.c
+@@ -35,7 +35,7 @@
+ #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
+ #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
+ #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
+-#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4)
++#define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
+ #define STAGE_CFG_PRESCALER_MASK 0x30
+ #define STAGE_CFG_ACTION_MASK 0x7
+ #define STAGE_CFG_ASSERT (1 << 3)
+diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
+index 4da59b4..381999c 100644
+--- a/drivers/watchdog/ts72xx_wdt.c
++++ b/drivers/watchdog/ts72xx_wdt.c
+@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+- return put_user(0, p);
++ error = put_user(0, p);
++ break;
+
+ case WDIOC_KEEPALIVE:
+ ts72xx_wdt_kick(wdt);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d3280b2..8220491 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8036,7 +8036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+
+
+ /* check for collisions, even if the name isn't there */
+- ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
++ ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len);
+
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index c081e34..03e9beb 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1350,6 +1350,8 @@ retry:
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
++ kfree(is); is = NULL;
++ kfree(bs); bs = NULL;
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/statfs.c b/fs/statfs.c
+index c219e733..083dc0a 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -94,7 +94,7 @@ retry:
+
+ int fd_statfs(int fd, struct kstatfs *st)
+ {
+- struct fd f = fdget(fd);
++ struct fd f = fdget_raw(fd);
+ int error = -EBADF;
+ if (f.file) {
+ error = vfs_statfs(&f.file->f_path, st);
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index 842de22..ded4299 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -65,6 +65,21 @@
+ #define __visible __attribute__((externally_visible))
+ #endif
+
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#if GCC_VERSION <= 40801
++# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++#else
++# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
++#endif
+
+ #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+ #if GCC_VERSION >= 40400
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index c4d870b..19c19a5 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -22,7 +22,7 @@ struct ipc_ids {
+ int in_use;
+ unsigned short seq;
+ unsigned short seq_max;
+- struct rw_semaphore rw_mutex;
++ struct rw_semaphore rwsem;
+ struct idr ipcs_idr;
+ int next_id;
+ };
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 3b9377d..6312dd9 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
++extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+diff --git a/init/main.c b/init/main.c
+index d03d2ec..586cd33 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -75,6 +75,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/elevator.h>
+ #include <linux/sched_clock.h>
++#include <linux/random.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -778,6 +779,7 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
++ random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)
+diff --git a/ipc/msg.c b/ipc/msg.c
+index a877c16..558aa91 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -70,8 +70,6 @@ struct msg_sender {
+
+ #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
+
+-#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
+-
+ static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
+ static int newque(struct ipc_namespace *, struct ipc_params *);
+ #ifdef CONFIG_PROC_FS
+@@ -181,7 +179,7 @@ static void msg_rcu_free(struct rcu_head *head)
+ * @ns: namespace
+ * @params: ptr to the structure that contains the key and msgflg
+ *
+- * Called with msg_ids.rw_mutex held (writer)
++ * Called with msg_ids.rwsem held (writer)
+ */
+ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ {
+@@ -267,8 +265,8 @@ static void expunge_all(struct msg_queue *msq, int res)
+ * removes the message queue from message queue ID IDR, and cleans up all the
+ * messages associated with this queue.
+ *
+- * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
+- * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
++ * msg_ids.rwsem (writer) and the spinlock for this message queue are held
++ * before freeque() is called. msg_ids.rwsem remains locked on exit.
+ */
+ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ {
+@@ -278,7 +276,8 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ expunge_all(msq, -EIDRM);
+ ss_wakeup(&msq->q_senders, 1);
+ msg_rmid(ns, msq);
+- msg_unlock(msq);
++ ipc_unlock_object(&msq->q_perm);
++ rcu_read_unlock();
+
+ list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
+ atomic_dec(&ns->msg_hdrs);
+@@ -289,7 +288,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ }
+
+ /*
+- * Called with msg_ids.rw_mutex and ipcp locked.
++ * Called with msg_ids.rwsem and ipcp locked.
+ */
+ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
+ {
+@@ -393,9 +392,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
+ }
+
+ /*
+- * This function handles some msgctl commands which require the rw_mutex
++ * This function handles some msgctl commands which require the rwsem
+ * to be held in write mode.
+- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
++ * NOTE: no locks must be held, the rwsem is taken inside this function.
+ */
+ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ struct msqid_ds __user *buf, int version)
+@@ -410,7 +409,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+ return -EFAULT;
+ }
+
+- down_write(&msg_ids(ns).rw_mutex);
++ down_write(&msg_ids(ns).rwsem);
+ rcu_read_lock();
+
+ ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
+@@ -466,7 +465,7 @@ out_unlock0:
+ out_unlock1:
+ rcu_read_unlock();
+ out_up:
+- up_write(&msg_ids(ns).rw_mutex);
++ up_write(&msg_ids(ns).rwsem);
+ return err;
+ }
+
+@@ -501,7 +500,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
+ msginfo.msgmnb = ns->msg_ctlmnb;
+ msginfo.msgssz = MSGSSZ;
+ msginfo.msgseg = MSGSEG;
+- down_read(&msg_ids(ns).rw_mutex);
++ down_read(&msg_ids(ns).rwsem);
+ if (cmd == MSG_INFO) {
+ msginfo.msgpool = msg_ids(ns).in_use;
+ msginfo.msgmap = atomic_read(&ns->msg_hdrs);
+@@ -512,7 +511,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
+ msginfo.msgtql = MSGTQL;
+ }
+ max_id = ipc_get_maxid(&msg_ids(ns));
+- up_read(&msg_ids(ns).rw_mutex);
++ up_read(&msg_ids(ns).rwsem);
+ if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
+ return -EFAULT;
+ return (max_id < 0) ? 0 : max_id;
+diff --git a/ipc/namespace.c b/ipc/namespace.c
+index 7ee61bf..aba9a58 100644
+--- a/ipc/namespace.c
++++ b/ipc/namespace.c
+@@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
+ int next_id;
+ int total, in_use;
+
+- down_write(&ids->rw_mutex);
++ down_write(&ids->rwsem);
+
+ in_use = ids->in_use;
+
+@@ -89,11 +89,12 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
+ perm = idr_find(&ids->ipcs_idr, next_id);
+ if (perm == NULL)
+ continue;
+- ipc_lock_by_ptr(perm);
++ rcu_read_lock();
++ ipc_lock_object(perm);
+ free(ns, perm);
+ total++;
+ }
+- up_write(&ids->rw_mutex);
++ up_write(&ids->rwsem);
+ }
+
+ static void free_ipc_ns(struct ipc_namespace *ns)
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 87614511..8e2bf30 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -248,12 +248,20 @@ static void merge_queues(struct sem_array *sma)
+ * Caller must own sem_perm.lock.
+ * New simple ops cannot start, because simple ops first check
+ * that sem_perm.lock is free.
++ * that a) sem_perm.lock is free and b) complex_count is 0.
+ */
+ static void sem_wait_array(struct sem_array *sma)
+ {
+ int i;
+ struct sem *sem;
+
++ if (sma->complex_count) {
++ /* The thread that increased sma->complex_count waited on
++ * all sem->lock locks. Thus we don't need to wait again.
++ */
++ return;
++ }
++
+ for (i = 0; i < sma->sem_nsems; i++) {
+ sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+@@ -365,7 +373,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
+ }
+
+ /*
+- * sem_lock_(check_) routines are called in the paths where the rw_mutex
++ * sem_lock_(check_) routines are called in the paths where the rwsem
+ * is not held.
+ *
+ * The caller holds the RCU read lock.
+@@ -464,7 +472,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+ * @ns: namespace
+ * @params: ptr to the structure that contains key, semflg and nsems
+ *
+- * Called with sem_ids.rw_mutex held (as a writer)
++ * Called with sem_ids.rwsem held (as a writer)
+ */
+
+ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+@@ -529,7 +537,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
+
+
+ /*
+- * Called with sem_ids.rw_mutex and ipcp locked.
++ * Called with sem_ids.rwsem and ipcp locked.
+ */
+ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
+ {
+@@ -540,7 +548,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
+ }
+
+ /*
+- * Called with sem_ids.rw_mutex and ipcp locked.
++ * Called with sem_ids.rwsem and ipcp locked.
+ */
+ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
+ struct ipc_params *params)
+@@ -910,6 +918,24 @@ again:
+ }
+
+ /**
++ * set_semotime(sma, sops) - set sem_otime
++ * @sma: semaphore array
++ * @sops: operations that modified the array, may be NULL
++ *
++ * sem_otime is replicated to avoid cache line trashing.
++ * This function sets one instance to the current time.
++ */
++static void set_semotime(struct sem_array *sma, struct sembuf *sops)
++{
++ if (sops == NULL) {
++ sma->sem_base[0].sem_otime = get_seconds();
++ } else {
++ sma->sem_base[sops[0].sem_num].sem_otime =
++ get_seconds();
++ }
++}
++
++/**
+ * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
+ * @sma: semaphore array
+ * @sops: operations that were performed
+@@ -959,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
+ }
+ }
+ }
+- if (otime) {
+- if (sops == NULL) {
+- sma->sem_base[0].sem_otime = get_seconds();
+- } else {
+- sma->sem_base[sops[0].sem_num].sem_otime =
+- get_seconds();
+- }
+- }
++ if (otime)
++ set_semotime(sma, sops);
+ }
+
+-
+ /* The following counts are associated to each semaphore:
+ * semncnt number of tasks waiting on semval being nonzero
+ * semzcnt number of tasks waiting on semval being zero
+@@ -1031,8 +1050,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
+ return semzcnt;
+ }
+
+-/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
+- * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
++/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
++ * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
+ * remains locked on exit.
+ */
+ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+@@ -1152,7 +1171,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
+ seminfo.semmnu = SEMMNU;
+ seminfo.semmap = SEMMAP;
+ seminfo.semume = SEMUME;
+- down_read(&sem_ids(ns).rw_mutex);
++ down_read(&sem_ids(ns).rwsem);
+ if (cmd == SEM_INFO) {
+ seminfo.semusz = sem_ids(ns).in_use;
+ seminfo.semaem = ns->used_sems;
+@@ -1161,7 +1180,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
+ seminfo.semaem = SEMAEM;
+ }
+ max_id = ipc_get_maxid(&sem_ids(ns));
+- up_read(&sem_ids(ns).rw_mutex);
++ up_read(&sem_ids(ns).rwsem);
+ if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
+ return -EFAULT;
+ return (max_id < 0) ? 0: max_id;
+@@ -1467,9 +1486,9 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
+ }
+
+ /*
+- * This function handles some semctl commands which require the rw_mutex
++ * This function handles some semctl commands which require the rwsem
+ * to be held in write mode.
+- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
++ * NOTE: no locks must be held, the rwsem is taken inside this function.
+ */
+ static int semctl_down(struct ipc_namespace *ns, int semid,
+ int cmd, int version, void __user *p)
+@@ -1484,7 +1503,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
+ return -EFAULT;
+ }
+
+- down_write(&sem_ids(ns).rw_mutex);
++ down_write(&sem_ids(ns).rwsem);
+ rcu_read_lock();
+
+ ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
+@@ -1523,7 +1542,7 @@ out_unlock0:
+ out_unlock1:
+ rcu_read_unlock();
+ out_up:
+- up_write(&sem_ids(ns).rw_mutex);
++ up_write(&sem_ids(ns).rwsem);
+ return err;
+ }
+
+@@ -1831,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
+
+ error = perform_atomic_semop(sma, sops, nsops, un,
+ task_tgid_vnr(current));
+- if (error <= 0) {
+- if (alter && error == 0)
++ if (error == 0) {
++ /* If the operation was successful, then do
++ * the required updates.
++ */
++ if (alter)
+ do_smart_update(sma, sops, nsops, 1, &tasks);
+-
+- goto out_unlock_free;
++ else
++ set_semotime(sma, sops);
+ }
++ if (error <= 0)
++ goto out_unlock_free;
+
+ /* We need to sleep on this operation, so we put the current
+ * task into the pending queue and go to sleep.
+@@ -2095,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
+ struct sem_array *sma = it;
+ time_t sem_otime;
+
++ /*
++ * The proc interface isn't aware of sem_lock(), it calls
++ * ipc_lock_object() directly (in sysvipc_find_ipc).
++ * In order to stay compatible with sem_lock(), we must wait until
++ * all simple semop() calls have left their critical regions.
++ */
++ sem_wait_array(sma);
++
+ sem_otime = get_semotime(sma);
+
+ return seq_printf(s,
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 2d6833d..d697396 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -19,6 +19,9 @@
+ * namespaces support
+ * OpenVZ, SWsoft Inc.
+ * Pavel Emelianov <xemul@openvz.org>
++ *
++ * Better ipc lock (kern_ipc_perm.lock) handling
++ * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
+ */
+
+ #include <linux/slab.h>
+@@ -80,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *ns)
+ }
+
+ /*
+- * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
+- * Only shm_ids.rw_mutex remains locked on exit.
++ * Called with shm_ids.rwsem (writer) and the shp structure locked.
++ * Only shm_ids.rwsem remains locked on exit.
+ */
+ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ {
+@@ -124,8 +127,28 @@ void __init shm_init (void)
+ IPC_SHM_IDS, sysvipc_shm_proc_show);
+ }
+
++static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
++{
++ struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
++
++ if (IS_ERR(ipcp))
++ return ERR_CAST(ipcp);
++
++ return container_of(ipcp, struct shmid_kernel, shm_perm);
++}
++
++static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
++{
++ struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
++
++ if (IS_ERR(ipcp))
++ return ERR_CAST(ipcp);
++
++ return container_of(ipcp, struct shmid_kernel, shm_perm);
++}
++
+ /*
+- * shm_lock_(check_) routines are called in the paths where the rw_mutex
++ * shm_lock_(check_) routines are called in the paths where the rwsem
+ * is not necessarily held.
+ */
+ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
+@@ -144,17 +167,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
+ ipc_lock_object(&ipcp->shm_perm);
+ }
+
+-static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
+- int id)
+-{
+- struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
+-
+- if (IS_ERR(ipcp))
+- return (struct shmid_kernel *)ipcp;
+-
+- return container_of(ipcp, struct shmid_kernel, shm_perm);
+-}
+-
+ static void shm_rcu_free(struct rcu_head *head)
+ {
+ struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
+@@ -191,7 +203,7 @@ static void shm_open(struct vm_area_struct *vma)
+ * @ns: namespace
+ * @shp: struct to free
+ *
+- * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
++ * It has to be called with shp and shm_ids.rwsem (writer) locked,
+ * but returns with shp unlocked and freed.
+ */
+ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+@@ -238,7 +250,7 @@ static void shm_close(struct vm_area_struct *vma)
+ struct shmid_kernel *shp;
+ struct ipc_namespace *ns = sfd->ns;
+
+- down_write(&shm_ids(ns).rw_mutex);
++ down_write(&shm_ids(ns).rwsem);
+ /* remove from the list of attaches of the shm segment */
+ shp = shm_lock(ns, sfd->id);
+ BUG_ON(IS_ERR(shp));
+@@ -249,10 +261,10 @@ static void shm_close(struct vm_area_struct *vma)
+ shm_destroy(ns, shp);
+ else
+ shm_unlock(shp);
+- up_write(&shm_ids(ns).rw_mutex);
++ up_write(&shm_ids(ns).rwsem);
+ }
+
+-/* Called with ns->shm_ids(ns).rw_mutex locked */
++/* Called with ns->shm_ids(ns).rwsem locked */
+ static int shm_try_destroy_current(int id, void *p, void *data)
+ {
+ struct ipc_namespace *ns = data;
+@@ -283,7 +295,7 @@ static int shm_try_destroy_current(int id, void *p, void *data)
+ return 0;
+ }
+
+-/* Called with ns->shm_ids(ns).rw_mutex locked */
++/* Called with ns->shm_ids(ns).rwsem locked */
+ static int shm_try_destroy_orphaned(int id, void *p, void *data)
+ {
+ struct ipc_namespace *ns = data;
+@@ -294,7 +306,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
+ * We want to destroy segments without users and with already
+ * exit'ed originating process.
+ *
+- * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
++ * As shp->* are changed under rwsem, it's safe to skip shp locking.
+ */
+ if (shp->shm_creator != NULL)
+ return 0;
+@@ -308,10 +320,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
+
+ void shm_destroy_orphaned(struct ipc_namespace *ns)
+ {
+- down_write(&shm_ids(ns).rw_mutex);
++ down_write(&shm_ids(ns).rwsem);
+ if (shm_ids(ns).in_use)
+ idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
+- up_write(&shm_ids(ns).rw_mutex);
++ up_write(&shm_ids(ns).rwsem);
+ }
+
+
+@@ -323,10 +335,10 @@ void exit_shm(struct task_struct *task)
+ return;
+
+ /* Destroy all already created segments, but not mapped yet */
+- down_write(&shm_ids(ns).rw_mutex);
++ down_write(&shm_ids(ns).rwsem);
+ if (shm_ids(ns).in_use)
+ idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
+- up_write(&shm_ids(ns).rw_mutex);
++ up_write(&shm_ids(ns).rwsem);
+ }
+
+ static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -460,7 +472,7 @@ static const struct vm_operations_struct shm_vm_ops = {
+ * @ns: namespace
+ * @params: ptr to the structure that contains key, size and shmflg
+ *
+- * Called with shm_ids.rw_mutex held as a writer.
++ * Called with shm_ids.rwsem held as a writer.
+ */
+
+ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+@@ -567,7 +579,7 @@ no_file:
+ }
+
+ /*
+- * Called with shm_ids.rw_mutex and ipcp locked.
++ * Called with shm_ids.rwsem and ipcp locked.
+ */
+ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
+ {
+@@ -578,7 +590,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
+ }
+
+ /*
+- * Called with shm_ids.rw_mutex and ipcp locked.
++ * Called with shm_ids.rwsem and ipcp locked.
+ */
+ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
+ struct ipc_params *params)
+@@ -691,7 +703,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
+
+ /*
+ * Calculate and add used RSS and swap pages of a shm.
+- * Called with shm_ids.rw_mutex held as a reader
++ * Called with shm_ids.rwsem held as a reader
+ */
+ static void shm_add_rss_swap(struct shmid_kernel *shp,
+ unsigned long *rss_add, unsigned long *swp_add)
+@@ -718,7 +730,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
+ }
+
+ /*
+- * Called with shm_ids.rw_mutex held as a reader
++ * Called with shm_ids.rwsem held as a reader
+ */
+ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
+ unsigned long *swp)
+@@ -747,9 +759,9 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
+ }
+
+ /*
+- * This function handles some shmctl commands which require the rw_mutex
++ * This function handles some shmctl commands which require the rwsem
+ * to be held in write mode.
+- * NOTE: no locks must be held, the rw_mutex is taken inside this function.
++ * NOTE: no locks must be held, the rwsem is taken inside this function.
+ */
+ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+ struct shmid_ds __user *buf, int version)
+@@ -764,14 +776,13 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+ return -EFAULT;
+ }
+
+- down_write(&shm_ids(ns).rw_mutex);
++ down_write(&shm_ids(ns).rwsem);
+ rcu_read_lock();
+
+- ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
+- &shmid64.shm_perm, 0);
++ ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
++ &shmid64.shm_perm, 0);
+ if (IS_ERR(ipcp)) {
+ err = PTR_ERR(ipcp);
+- /* the ipc lock is not held upon failure */
+ goto out_unlock1;
+ }
+
+@@ -779,14 +790,16 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+
+ err = security_shm_shmctl(shp, cmd);
+ if (err)
+- goto out_unlock0;
++ goto out_unlock1;
+
+ switch (cmd) {
+ case IPC_RMID:
++ ipc_lock_object(&shp->shm_perm);
+ /* do_shm_rmid unlocks the ipc object and rcu */
+ do_shm_rmid(ns, ipcp);
+ goto out_up;
+ case IPC_SET:
++ ipc_lock_object(&shp->shm_perm);
+ err = ipc_update_perm(&shmid64.shm_perm, ipcp);
+ if (err)
+ goto out_unlock0;
+@@ -794,6 +807,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+ break;
+ default:
+ err = -EINVAL;
++ goto out_unlock1;
+ }
+
+ out_unlock0:
+@@ -801,33 +815,28 @@ out_unlock0:
+ out_unlock1:
+ rcu_read_unlock();
+ out_up:
+- up_write(&shm_ids(ns).rw_mutex);
++ up_write(&shm_ids(ns).rwsem);
+ return err;
+ }
+
+-SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
++static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
++ int cmd, int version, void __user *buf)
+ {
++ int err;
+ struct shmid_kernel *shp;
+- int err, version;
+- struct ipc_namespace *ns;
+
+- if (cmd < 0 || shmid < 0) {
+- err = -EINVAL;
+- goto out;
++ /* preliminary security checks for *_INFO */
++ if (cmd == IPC_INFO || cmd == SHM_INFO) {
++ err = security_shm_shmctl(NULL, cmd);
++ if (err)
++ return err;
+ }
+
+- version = ipc_parse_version(&cmd);
+- ns = current->nsproxy->ipc_ns;
+-
+- switch (cmd) { /* replace with proc interface ? */
++ switch (cmd) {
+ case IPC_INFO:
+ {
+ struct shminfo64 shminfo;
+
+- err = security_shm_shmctl(NULL, cmd);
+- if (err)
+- return err;
+-
+ memset(&shminfo, 0, sizeof(shminfo));
+ shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
+ shminfo.shmmax = ns->shm_ctlmax;
+@@ -837,9 +846,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ if(copy_shminfo_to_user (buf, &shminfo, version))
+ return -EFAULT;
+
+- down_read(&shm_ids(ns).rw_mutex);
++ down_read(&shm_ids(ns).rwsem);
+ err = ipc_get_maxid(&shm_ids(ns));
+- up_read(&shm_ids(ns).rw_mutex);
++ up_read(&shm_ids(ns).rwsem);
+
+ if(err<0)
+ err = 0;
+@@ -849,19 +858,15 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ {
+ struct shm_info shm_info;
+
+- err = security_shm_shmctl(NULL, cmd);
+- if (err)
+- return err;
+-
+ memset(&shm_info, 0, sizeof(shm_info));
+- down_read(&shm_ids(ns).rw_mutex);
++ down_read(&shm_ids(ns).rwsem);
+ shm_info.used_ids = shm_ids(ns).in_use;
+ shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
+ shm_info.shm_tot = ns->shm_tot;
+ shm_info.swap_attempts = 0;
+ shm_info.swap_successes = 0;
+ err = ipc_get_maxid(&shm_ids(ns));
+- up_read(&shm_ids(ns).rw_mutex);
++ up_read(&shm_ids(ns).rwsem);
+ if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
+ err = -EFAULT;
+ goto out;
+@@ -876,27 +881,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ struct shmid64_ds tbuf;
+ int result;
+
++ rcu_read_lock();
+ if (cmd == SHM_STAT) {
+- shp = shm_lock(ns, shmid);
++ shp = shm_obtain_object(ns, shmid);
+ if (IS_ERR(shp)) {
+ err = PTR_ERR(shp);
+- goto out;
++ goto out_unlock;
+ }
+ result = shp->shm_perm.id;
+ } else {
+- shp = shm_lock_check(ns, shmid);
++ shp = shm_obtain_object_check(ns, shmid);
+ if (IS_ERR(shp)) {
+ err = PTR_ERR(shp);
+- goto out;
++ goto out_unlock;
+ }
+ result = 0;
+ }
++
+ err = -EACCES;
+ if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
+ goto out_unlock;
++
+ err = security_shm_shmctl(shp, cmd);
+ if (err)
+ goto out_unlock;
++
+ memset(&tbuf, 0, sizeof(tbuf));
+ kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
+ tbuf.shm_segsz = shp->shm_segsz;
+@@ -906,43 +915,76 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ tbuf.shm_cpid = shp->shm_cprid;
+ tbuf.shm_lpid = shp->shm_lprid;
+ tbuf.shm_nattch = shp->shm_nattch;
+- shm_unlock(shp);
+- if(copy_shmid_to_user (buf, &tbuf, version))
++ rcu_read_unlock();
++
++ if (copy_shmid_to_user(buf, &tbuf, version))
+ err = -EFAULT;
+ else
+ err = result;
+ goto out;
+ }
++ default:
++ return -EINVAL;
++ }
++
++out_unlock:
++ rcu_read_unlock();
++out:
++ return err;
++}
++
++SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
++{
++ struct shmid_kernel *shp;
++ int err, version;
++ struct ipc_namespace *ns;
++
++ if (cmd < 0 || shmid < 0)
++ return -EINVAL;
++
++ version = ipc_parse_version(&cmd);
++ ns = current->nsproxy->ipc_ns;
++
++ switch (cmd) {
++ case IPC_INFO:
++ case SHM_INFO:
++ case SHM_STAT:
++ case IPC_STAT:
++ return shmctl_nolock(ns, shmid, cmd, version, buf);
++ case IPC_RMID:
++ case IPC_SET:
++ return shmctl_down(ns, shmid, cmd, buf, version);
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ {
+ struct file *shm_file;
+
+- shp = shm_lock_check(ns, shmid);
++ rcu_read_lock();
++ shp = shm_obtain_object_check(ns, shmid);
+ if (IS_ERR(shp)) {
+ err = PTR_ERR(shp);
+- goto out;
++ goto out_unlock1;
+ }
+
+ audit_ipc_obj(&(shp->shm_perm));
++ err = security_shm_shmctl(shp, cmd);
++ if (err)
++ goto out_unlock1;
+
++ ipc_lock_object(&shp->shm_perm);
+ if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
+ kuid_t euid = current_euid();
+ err = -EPERM;
+ if (!uid_eq(euid, shp->shm_perm.uid) &&
+ !uid_eq(euid, shp->shm_perm.cuid))
+- goto out_unlock;
++ goto out_unlock0;
+ if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
+- goto out_unlock;
++ goto out_unlock0;
+ }
+
+- err = security_shm_shmctl(shp, cmd);
+- if (err)
+- goto out_unlock;
+-
+ shm_file = shp->shm_file;
+ if (is_file_hugepages(shm_file))
+- goto out_unlock;
++ goto out_unlock0;
+
+ if (cmd == SHM_LOCK) {
+ struct user_struct *user = current_user();
+@@ -951,32 +993,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ shp->shm_perm.mode |= SHM_LOCKED;
+ shp->mlock_user = user;
+ }
+- goto out_unlock;
++ goto out_unlock0;
+ }
+
+ /* SHM_UNLOCK */
+ if (!(shp->shm_perm.mode & SHM_LOCKED))
+- goto out_unlock;
++ goto out_unlock0;
+ shmem_lock(shm_file, 0, shp->mlock_user);
+ shp->shm_perm.mode &= ~SHM_LOCKED;
+ shp->mlock_user = NULL;
+ get_file(shm_file);
+- shm_unlock(shp);
++ ipc_unlock_object(&shp->shm_perm);
++ rcu_read_unlock();
+ shmem_unlock_mapping(shm_file->f_mapping);
++
+ fput(shm_file);
+- goto out;
+- }
+- case IPC_RMID:
+- case IPC_SET:
+- err = shmctl_down(ns, shmid, cmd, buf, version);
+ return err;
++ }
+ default:
+ return -EINVAL;
+ }
+
+-out_unlock:
+- shm_unlock(shp);
+-out:
++out_unlock0:
++ ipc_unlock_object(&shp->shm_perm);
++out_unlock1:
++ rcu_read_unlock();
+ return err;
+ }
+
+@@ -1044,10 +1085,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ * additional creator id...
+ */
+ ns = current->nsproxy->ipc_ns;
+- shp = shm_lock_check(ns, shmid);
++ rcu_read_lock();
++ shp = shm_obtain_object_check(ns, shmid);
+ if (IS_ERR(shp)) {
+ err = PTR_ERR(shp);
+- goto out;
++ goto out_unlock;
+ }
+
+ err = -EACCES;
+@@ -1058,24 +1100,31 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ if (err)
+ goto out_unlock;
+
++ ipc_lock_object(&shp->shm_perm);
+ path = shp->shm_file->f_path;
+ path_get(&path);
+ shp->shm_nattch++;
+ size = i_size_read(path.dentry->d_inode);
+- shm_unlock(shp);
++ ipc_unlock_object(&shp->shm_perm);
++ rcu_read_unlock();
+
+ err = -ENOMEM;
+ sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
+- if (!sfd)
+- goto out_put_dentry;
++ if (!sfd) {
++ path_put(&path);
++ goto out_nattch;
++ }
+
+ file = alloc_file(&path, f_mode,
+ is_file_hugepages(shp->shm_file) ?
+ &shm_file_operations_huge :
+ &shm_file_operations);
+ err = PTR_ERR(file);
+- if (IS_ERR(file))
+- goto out_free;
++ if (IS_ERR(file)) {
++ kfree(sfd);
++ path_put(&path);
++ goto out_nattch;
++ }
+
+ file->private_data = sfd;
+ file->f_mapping = shp->shm_file->f_mapping;
+@@ -1101,7 +1150,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ addr > current->mm->start_stack - size - PAGE_SIZE * 5)
+ goto invalid;
+ }
+-
++
+ addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
+ *raddr = addr;
+ err = 0;
+@@ -1116,7 +1165,7 @@ out_fput:
+ fput(file);
+
+ out_nattch:
+- down_write(&shm_ids(ns).rw_mutex);
++ down_write(&shm_ids(ns).rwsem);
+ shp = shm_lock(ns, shmid);
+ BUG_ON(IS_ERR(shp));
+ shp->shm_nattch--;
+@@ -1124,20 +1173,13 @@ out_nattch:
+ shm_destroy(ns, shp);
+ else
+ shm_unlock(shp);
+- up_write(&shm_ids(ns).rw_mutex);
+-
+-out:
++ up_write(&shm_ids(ns).rwsem);
+ return err;
+
+ out_unlock:
+- shm_unlock(shp);
+- goto out;
+-
+-out_free:
+- kfree(sfd);
+-out_put_dentry:
+- path_put(&path);
+- goto out_nattch;
++ rcu_read_unlock();
++out:
++ return err;
+ }
+
+ SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
+@@ -1242,8 +1284,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
+ #else /* CONFIG_MMU */
+ /* under NOMMU conditions, the exact address to be destroyed must be
+ * given */
+- retval = -EINVAL;
+- if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
++ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
+ do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
+ retval = 0;
+ }
+diff --git a/ipc/util.c b/ipc/util.c
+index 0c6566b..fdb8ae7 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -15,6 +15,14 @@
+ * Jun 2006 - namespaces ssupport
+ * OpenVZ, SWsoft Inc.
+ * Pavel Emelianov <xemul@openvz.org>
++ *
++ * General sysv ipc locking scheme:
++ * when doing ipc id lookups, take the ids->rwsem
++ * rcu_read_lock()
++ * obtain the ipc object (kern_ipc_perm)
++ * perform security, capabilities, auditing and permission checks, etc.
++ * acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
++ * perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
+ */
+
+ #include <linux/mm.h>
+@@ -119,7 +127,7 @@ __initcall(ipc_init);
+
+ void ipc_init_ids(struct ipc_ids *ids)
+ {
+- init_rwsem(&ids->rw_mutex);
++ init_rwsem(&ids->rwsem);
+
+ ids->in_use = 0;
+ ids->seq = 0;
+@@ -174,7 +182,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
+ * @ids: Identifier set
+ * @key: The key to find
+ *
+- * Requires ipc_ids.rw_mutex locked.
++ * Requires ipc_ids.rwsem locked.
+ * Returns the LOCKED pointer to the ipc structure if found or NULL
+ * if not.
+ * If key is found ipc points to the owning ipc structure
+@@ -197,7 +205,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
+ continue;
+ }
+
+- ipc_lock_by_ptr(ipc);
++ rcu_read_lock();
++ ipc_lock_object(ipc);
+ return ipc;
+ }
+
+@@ -208,7 +217,7 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
+ * ipc_get_maxid - get the last assigned id
+ * @ids: IPC identifier set
+ *
+- * Called with ipc_ids.rw_mutex held.
++ * Called with ipc_ids.rwsem held.
+ */
+
+ int ipc_get_maxid(struct ipc_ids *ids)
+@@ -246,7 +255,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
+ * is returned. The 'new' entry is returned in a locked state on success.
+ * On failure the entry is not locked and a negative err-code is returned.
+ *
+- * Called with writer ipc_ids.rw_mutex held.
++ * Called with writer ipc_ids.rwsem held.
+ */
+ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
+ {
+@@ -312,9 +321,9 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
+ {
+ int err;
+
+- down_write(&ids->rw_mutex);
++ down_write(&ids->rwsem);
+ err = ops->getnew(ns, params);
+- up_write(&ids->rw_mutex);
++ up_write(&ids->rwsem);
+ return err;
+ }
+
+@@ -331,7 +340,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
+ *
+ * On success, the IPC id is returned.
+ *
+- * It is called with ipc_ids.rw_mutex and ipcp->lock held.
++ * It is called with ipc_ids.rwsem and ipcp->lock held.
+ */
+ static int ipc_check_perms(struct ipc_namespace *ns,
+ struct kern_ipc_perm *ipcp,
+@@ -376,7 +385,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
+ * Take the lock as a writer since we are potentially going to add
+ * a new entry + read locks are not "upgradable"
+ */
+- down_write(&ids->rw_mutex);
++ down_write(&ids->rwsem);
+ ipcp = ipc_findkey(ids, params->key);
+ if (ipcp == NULL) {
+ /* key not used */
+@@ -402,7 +411,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
+ }
+ ipc_unlock(ipcp);
+ }
+- up_write(&ids->rw_mutex);
++ up_write(&ids->rwsem);
+
+ return err;
+ }
+@@ -413,7 +422,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
+ * @ids: IPC identifier set
+ * @ipcp: ipc perm structure containing the identifier to remove
+ *
+- * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
++ * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
+ * before this function is called, and remain locked on the exit.
+ */
+
+@@ -613,7 +622,7 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
+ }
+
+ /**
+- * ipc_lock - Lock an ipc structure without rw_mutex held
++ * ipc_lock - Lock an ipc structure without rwsem held
+ * @ids: IPC identifier set
+ * @id: ipc id to look for
+ *
+@@ -669,22 +678,6 @@ out:
+ return out;
+ }
+
+-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
+-{
+- struct kern_ipc_perm *out;
+-
+- out = ipc_lock(ids, id);
+- if (IS_ERR(out))
+- return out;
+-
+- if (ipc_checkid(out, id)) {
+- ipc_unlock(out);
+- return ERR_PTR(-EIDRM);
+- }
+-
+- return out;
+-}
+-
+ /**
+ * ipcget - Common sys_*get() code
+ * @ns : namsepace
+@@ -725,7 +718,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
+ }
+
+ /**
+- * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
++ * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * @ns: the ipc namespace
+ * @ids: the table of ids where to look for the ipc
+ * @id: the id of the ipc to retrieve
+@@ -738,29 +731,13 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
+ * It must be called without any lock held and
+ * - retrieves the ipc with the given id in the given table.
+ * - performs some audit and permission check, depending on the given cmd
+- * - returns the ipc with the ipc lock held in case of success
+- * or an err-code without any lock held otherwise.
++ * - returns a pointer to the ipc object or otherwise, the corresponding error.
+ *
+- * Call holding the both the rw_mutex and the rcu read lock.
++ * Call holding the both the rwsem and the rcu read lock.
+ */
+-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
+- struct ipc_ids *ids, int id, int cmd,
+- struct ipc64_perm *perm, int extra_perm)
+-{
+- struct kern_ipc_perm *ipcp;
+-
+- ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
+- if (IS_ERR(ipcp))
+- goto out;
+-
+- spin_lock(&ipcp->lock);
+-out:
+- return ipcp;
+-}
+-
+ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
+- struct ipc_ids *ids, int id, int cmd,
+- struct ipc64_perm *perm, int extra_perm)
++ struct ipc_ids *ids, int id, int cmd,
++ struct ipc64_perm *perm, int extra_perm)
+ {
+ kuid_t euid;
+ int err = -EPERM;
+@@ -838,7 +815,8 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
+ ipc = idr_find(&ids->ipcs_idr, pos);
+ if (ipc != NULL) {
+ *new_pos = pos + 1;
+- ipc_lock_by_ptr(ipc);
++ rcu_read_lock();
++ ipc_lock_object(ipc);
+ return ipc;
+ }
+ }
+@@ -876,7 +854,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
+ * Take the lock - this will be released by the corresponding
+ * call to stop().
+ */
+- down_read(&ids->rw_mutex);
++ down_read(&ids->rwsem);
+
+ /* pos < 0 is invalid */
+ if (*pos < 0)
+@@ -903,7 +881,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
+
+ ids = &iter->ns->ids[iface->ids];
+ /* Release the lock we took in start() */
+- up_read(&ids->rw_mutex);
++ up_read(&ids->rwsem);
+ }
+
+ static int sysvipc_proc_show(struct seq_file *s, void *it)
+diff --git a/ipc/util.h b/ipc/util.h
+index 25299e7..f2f5036 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -101,10 +101,10 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
+ #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
+ #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
+
+-/* must be called with ids->rw_mutex acquired for writing */
++/* must be called with ids->rwsem acquired for writing */
+ int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
+
+-/* must be called with ids->rw_mutex acquired for reading */
++/* must be called with ids->rwsem acquired for reading */
+ int ipc_get_maxid(struct ipc_ids *);
+
+ /* must be called with both locks acquired. */
+@@ -139,9 +139,6 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
+ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
+ struct ipc_ids *ids, int id, int cmd,
+ struct ipc64_perm *perm, int extra_perm);
+-struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
+- struct ipc_ids *ids, int id, int cmd,
+- struct ipc64_perm *perm, int extra_perm);
+
+ #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
+ /* On IA-64, we always use the "64-bit version" of the IPC structures. */
+@@ -182,19 +179,12 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
+ assert_spin_locked(&perm->lock);
+ }
+
+-static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
+-{
+- rcu_read_lock();
+- ipc_lock_object(perm);
+-}
+-
+ static inline void ipc_unlock(struct kern_ipc_perm *perm)
+ {
+ ipc_unlock_object(perm);
+ rcu_read_unlock();
+ }
+
+-struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
+ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
+ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
+ struct ipc_ops *ops, struct ipc_params *params);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 45850f6..4865756 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -930,6 +930,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ }
+
+ /*
++ * always configure channel mapping, it may have been changed by the
++ * user in the meantime
++ */
++ hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
++ channels, per_pin->chmap,
++ per_pin->chmap_set);
++
++ /*
+ * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
+ * sizeof(*dp_ai) to avoid partial match/update problems when
+ * the user switches between HDMI/DP monitors.
+@@ -940,20 +948,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ "pin=%d channels=%d\n",
+ pin_nid,
+ channels);
+- hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
+- channels, per_pin->chmap,
+- per_pin->chmap_set);
+ hdmi_stop_infoframe_trans(codec, pin_nid);
+ hdmi_fill_audio_infoframe(codec, pin_nid,
+ ai.bytes, sizeof(ai));
+ hdmi_start_infoframe_trans(codec, pin_nid);
+- } else {
+- /* For non-pcm audio switch, setup new channel mapping
+- * accordingly */
+- if (per_pin->non_pcm != non_pcm)
+- hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
+- channels, per_pin->chmap,
+- per_pin->chmap_set);
+ }
+
+ per_pin->non_pcm = non_pcm;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 389db4c..1383f38 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3308,6 +3308,15 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
+ }
+ }
+
++static void alc290_fixup_mono_speakers(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
++ /* Remove DAC node 0x03, as it seems to be
++ giving mono output */
++ snd_hda_override_wcaps(codec, 0x03, 0);
++}
++
+ enum {
+ ALC269_FIXUP_SONY_VAIO,
+ ALC275_FIXUP_SONY_VAIO_GPIO2,
+@@ -3331,9 +3340,12 @@ enum {
+ ALC269_FIXUP_HP_GPIO_LED,
+ ALC269_FIXUP_INV_DMIC,
+ ALC269_FIXUP_LENOVO_DOCK,
++ ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
+ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
++ ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
++ ALC290_FIXUP_MONO_SPEAKERS,
+ ALC269_FIXUP_HEADSET_MODE,
+ ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ ALC269_FIXUP_ASUS_X101_FUNC,
+@@ -3521,6 +3533,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
++ [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++ },
+ [ALC269_FIXUP_HEADSET_MODE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode,
+@@ -3529,6 +3550,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode_no_hp_mic,
+ },
++ [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ },
+ [ALC269_FIXUP_ASUS_X101_FUNC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_x101_headset_mic,
+@@ -3595,6 +3623,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC290_FIXUP_MONO_SPEAKERS] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc290_fixup_mono_speakers,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -3631,6 +3665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -3651,6 +3686,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
++ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
+ SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+@@ -4345,6 +4381,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index 63fb521..6234a51 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
+ usX2Y_clients_stop(usX2Y);
+ }
+
+-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
+- struct snd_usX2Y_substream *subs, struct urb *urb)
+-{
+- snd_printk(KERN_ERR
+-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
+-"Most probably some urb of usb-frame %i is still missing.\n"
+-"Cause could be too long delays in usb-hcd interrupt handling.\n",
+- usb_get_current_frame_number(usX2Y->dev),
+- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
+- usX2Y_clients_stop(usX2Y);
+-}
+-
+ static void i_usX2Y_urb_complete(struct urb *urb)
+ {
+ struct snd_usX2Y_substream *subs = urb->context;
+@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
++
++ subs->completed_urb = urb;
++
+ {
+ struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
+ *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index f2a1acd..814d0e8 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
+
++ subs->completed_urb = urb;
+ capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];