summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.2.53/1052_linux-3.2.53.patch')
-rw-r--r--3.2.53/1052_linux-3.2.53.patch3357
1 files changed, 3357 insertions, 0 deletions
diff --git a/3.2.53/1052_linux-3.2.53.patch b/3.2.53/1052_linux-3.2.53.patch
new file mode 100644
index 0000000..986d714
--- /dev/null
+++ b/3.2.53/1052_linux-3.2.53.patch
@@ -0,0 +1,3357 @@
+diff --git a/Makefile b/Makefile
+index 1dd2c09..90f57dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index 1881b31..b19559c 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\tnop\n\t"
++ asm_volatile_goto("1:\tnop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index 37aabd7..d2d5825 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -195,6 +195,8 @@ common_stext:
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
++ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
++
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+@@ -203,6 +205,8 @@ common_stext:
+ copy %r0,%arg3
+
+ stext_pdc_ret:
++ mtctl %r6,%cr30 /* restore task thread info */
++
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index f19e660..cd8b02f 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -811,14 +811,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ else {
+
+ /*
+- * The kernel should never fault on its own address space.
++ * The kernel should never fault on its own address space,
++ * unless pagefault_disable() was called before.
+ */
+
+- if (fault_space == 0)
++ if (fault_space == 0 && !in_atomic())
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+-
+ }
+ }
+
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index 938986e..ee33888 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -19,7 +19,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 5e8dc08..e3b3cf9 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -922,7 +922,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+- std r8, VCPU_DSCR(r7)
++ std r8, VCPU_DSCR(r9)
+ mtspr SPRN_DSCR, r7
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 95a6cf2..8512d0a 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -15,7 +15,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("0: brcl 0,0\n"
++ asm_volatile_goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index fc73a82..e17b65b 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -9,7 +9,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
+index 63294f5..4f7ae39 100644
+--- a/arch/tile/include/asm/percpu.h
++++ b/arch/tile/include/asm/percpu.h
+@@ -15,9 +15,37 @@
+ #ifndef _ASM_TILE_PERCPU_H
+ #define _ASM_TILE_PERCPU_H
+
+-register unsigned long __my_cpu_offset __asm__("tp");
+-#define __my_cpu_offset __my_cpu_offset
+-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
++register unsigned long my_cpu_offset_reg asm("tp");
++
++#ifdef CONFIG_PREEMPT
++/*
++ * For full preemption, we can't just use the register variable
++ * directly, since we need barrier() to hazard against it, causing the
++ * compiler to reload anything computed from a previous "tp" value.
++ * But we also don't want to use volatile asm, since we'd like the
++ * compiler to be able to cache the value across multiple percpu reads.
++ * So we use a fake stack read as a hazard against barrier().
++ * The 'U' constraint is like 'm' but disallows postincrement.
++ */
++static inline unsigned long __my_cpu_offset(void)
++{
++ unsigned long tp;
++ register unsigned long *sp asm("sp");
++ asm("move %0, tp" : "=r" (tp) : "U" (*sp));
++ return tp;
++}
++#define __my_cpu_offset __my_cpu_offset()
++#else
++/*
++ * We don't need to hazard against barrier() since "tp" doesn't ever
++ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
++ * changes at function call points, at which we are already re-reading
++ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
++ */
++#define __my_cpu_offset my_cpu_offset_reg
++#endif
++
++#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
+
+ #include <asm-generic/percpu.h>
+
+diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
+index 829df49..41ebbfe 100644
+--- a/arch/um/kernel/exitcode.c
++++ b/arch/um/kernel/exitcode.c
+@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+ {
+ char *end, buf[sizeof("nnnnn\0")];
++ size_t size;
+ int tmp;
+
+- if (copy_from_user(buf, buffer, count))
++ size = min(count, sizeof(buf));
++ if (copy_from_user(buf, buffer, size))
+ return -EFAULT;
+
+ tmp = simple_strtol(buf, &end, 0);
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 0c3b775..a315f1c 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -334,7 +334,7 @@ extern const char * const x86_power_flags[32];
+ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ {
+ #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+- asm goto("1: jmp %l[t_no]\n"
++ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index a32b18c..e12c1bc 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:"
++ asm_volatile_goto("1:"
+ JUMP_LABEL_INITIAL_NOP
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
+index f2220b5..cf3e9cb 100644
+--- a/arch/xtensa/kernel/signal.c
++++ b/arch/xtensa/kernel/signal.c
+@@ -346,7 +346,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ sp = regs->areg[1];
+
+- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
++ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index aea627e..7d1a478 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1286,14 +1286,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+- * scmd->retries is decremented for commands which get retried
++ * scmd->allowed is incremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *scmd = qc->scsicmd;
+- if (!qc->err_mask && scmd->retries)
+- scmd->retries--;
++ if (!qc->err_mask)
++ scmd->allowed++;
+ __ata_eh_qc_complete(qc);
+ }
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index b651733..c244f0e 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -668,7 +668,7 @@ static void set_timer_rand_state(unsigned int irq,
+ */
+ void add_device_randomness(const void *buf, unsigned int size)
+ {
+- unsigned long time = get_cycles() ^ jiffies;
++ unsigned long time = random_get_entropy() ^ jiffies;
+
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+@@ -705,7 +705,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ goto out;
+
+ sample.jiffies = jiffies;
+- sample.cycles = get_cycles();
++ sample.cycles = random_get_entropy();
+ sample.num = num;
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+@@ -772,7 +772,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+- __u32 input[4], cycles = get_cycles();
++ __u32 input[4], cycles = random_get_entropy();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+@@ -1480,12 +1480,11 @@ ctl_table random_table[] = {
+
+ static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+-static int __init random_int_secret_init(void)
++int random_int_secret_init(void)
+ {
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ return 0;
+ }
+-late_initcall(random_int_secret_init);
+
+ /*
+ * Get a random word for internal kernel use only. Similar to urandom but
+@@ -1504,7 +1503,7 @@ unsigned int get_random_int(void)
+
+ hash = get_cpu_var(get_random_int_hash);
+
+- hash[0] += current->pid + jiffies + get_cycles();
++ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
+ put_cpu_var(get_random_int_hash);
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 46bbf43..66d5384 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -64,6 +64,7 @@ void proc_fork_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -79,6 +80,7 @@ void proc_fork_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ /* If cn_netlink_send() failed, the data is not sent */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+@@ -95,6 +97,7 @@ void proc_exec_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -105,6 +108,7 @@ void proc_exec_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -121,6 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+ ev->event_data.id.process_pid = task->pid;
+ ev->event_data.id.process_tgid = task->tgid;
+@@ -144,6 +149,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -159,6 +165,7 @@ void proc_sid_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -169,6 +176,7 @@ void proc_sid_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -184,6 +192,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -202,6 +211,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -217,6 +227,7 @@ void proc_comm_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -228,6 +239,7 @@ void proc_comm_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -243,6 +255,7 @@ void proc_exit_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -255,6 +268,7 @@ void proc_exit_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -278,6 +292,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -287,6 +302,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = rcvd_ack + 1;
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index dde6a0f..ea6efe8 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
+ static void cn_rx_skb(struct sk_buff *__skb)
+ {
+ struct nlmsghdr *nlh;
+- int err;
+ struct sk_buff *skb;
++ int len, err;
+
+ skb = skb_get(__skb);
+
+ if (skb->len >= NLMSG_SPACE(0)) {
+ nlh = nlmsg_hdr(skb);
++ len = nlmsg_len(nlh);
+
+- if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
++ if (len < (int)sizeof(struct cn_msg) ||
+ skb->len < nlh->nlmsg_len ||
+- nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
++ len > CONNECTOR_MAX_MSG_SIZE) {
+ kfree_skb(skb);
+ return;
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 40c187c..acfe567 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -408,9 +408,16 @@ long drm_ioctl(struct file *filp,
+ asize = drv_size;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++ u32 drv_size;
++
+ ioctl = &drm_ioctls[nr];
+- cmd = ioctl->cmd;
++
++ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
++ if (drv_size > asize)
++ asize = drv_size;
++
++ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 3171294..475a275 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1390,7 +1390,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+- (rdev->family != CHIP_RS880))
++ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index a68057a..5efba47 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1797,7 +1797,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+- rdev->config.evergreen.max_hw_contexts = 8;
++ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 30cac58..0b86d47 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -212,6 +212,7 @@ static int send_argument(const char *key)
+
+ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
++ u8 status, data = 0;
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+@@ -219,6 +220,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ return -EIO;
+ }
+
++ /* This has no effect on newer (2012) SMCs */
+ outb(len, APPLESMC_DATA_PORT);
+
+ for (i = 0; i < len; i++) {
+@@ -229,6 +231,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+ }
+
++ /* Read the data port until bit0 is cleared */
++ for (i = 0; i < 16; i++) {
++ udelay(APPLESMC_MIN_WAIT);
++ status = inb(APPLESMC_CMD_PORT);
++ if (!(status & 0x01))
++ break;
++ data = inb(APPLESMC_DATA_PORT);
++ }
++ if (i)
++ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 3ac4156..75c182b 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
+ return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
+ }
+
++static void skip_metadata(struct pstore *ps)
++{
++ uint32_t stride = ps->exceptions_per_area + 1;
++ chunk_t next_free = ps->next_free;
++ if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
++ ps->next_free++;
++}
++
+ /*
+ * Read or write a metadata area. Remembering to skip the first
+ * chunk which holds the header.
+@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
+
+ ps->current_area--;
+
++ skip_metadata(ps);
++
+ return 0;
+ }
+
+@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ struct dm_exception *e)
+ {
+ struct pstore *ps = get_info(store);
+- uint32_t stride;
+- chunk_t next_free;
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+ /* Is there enough room ? */
+@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ * Move onto the next free pending, making sure to take
+ * into account the location of the metadata chunks.
+ */
+- stride = (ps->exceptions_per_area + 1);
+- next_free = ++ps->next_free;
+- if (sector_div(next_free, stride) == 1)
+- ps->next_free++;
++ ps->next_free++;
++ skip_metadata(ps);
+
+ atomic_inc(&ps->pending_count);
+ return 0;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index a319057..de87f82 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -658,14 +658,14 @@ static size_t can_get_size(const struct net_device *dev)
+ size_t size;
+
+ size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
++ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
+- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
++ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
++ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+- size += sizeof(struct can_berr_counter);
++ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+- size += sizeof(struct can_bittiming_const);
++ size += nla_total_size(sizeof(struct can_bittiming_const));
+
+ return size;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4c50ac0..bbb6692 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -516,6 +516,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
++ skb_record_rx_queue(skb, fp->index);
+ napi_gro_receive(&fp->napi, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 4236b82..4aa830f 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -1234,6 +1234,7 @@ static void cp_tx_timeout(struct net_device *dev)
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
++ cp_enable_irq(cp);
+
+ netif_wake_queue(dev);
+
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index fd8115e..10668eb 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -873,8 +873,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+- }
+- if (!netdev_mc_empty(ndev)) {
++ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index ebb9f24..7a4c491 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
+ }
+
+ i = port->index;
++ memset(&sync, 0, sizeof(sync));
+ sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
+ /* Lucky card and linux use same encoding here */
+ sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
+index 44b7071..c643d77 100644
+--- a/drivers/net/wan/wanxl.c
++++ b/drivers/net/wan/wanxl.c
+@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
++ memset(&line, 0, sizeof(line));
+ line.clock_type = get_status(port)->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index a97a52a..408477d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -270,11 +270,6 @@ struct iwl_cfg iwl2000_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2000_2bg_cfg = {
+- .name = "2000 Series 2x2 BG",
+- IWL_DEVICE_2000,
+-};
+-
+ struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+@@ -304,11 +299,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2030_2bg_cfg = {
+- .name = "2000 Series 2x2 BG/BT",
+- IWL_DEVICE_2030,
+-};
+-
+ #define IWL_DEVICE_105 \
+ .fw_name_pre = IWL105_FW_PRE, \
+ .ucode_api_max = IWL105_UCODE_API_MAX, \
+@@ -326,11 +316,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl105_bg_cfg = {
+- .name = "105 Series 1x1 BG",
+- IWL_DEVICE_105,
+-};
+-
+ struct iwl_cfg iwl105_bgn_cfg = {
+ .name = "105 Series 1x1 BGN",
+ IWL_DEVICE_105,
+@@ -361,11 +346,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl135_bg_cfg = {
+- .name = "135 Series 1x1 BG/BT",
+- IWL_DEVICE_135,
+-};
+-
+ struct iwl_cfg iwl135_bgn_cfg = {
+ .name = "135 Series 1x1 BGN/BT",
+ IWL_DEVICE_135,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 4ac4ef0..e1a43c4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -411,6 +411,17 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
++struct iwl_cfg iwl6005_2agn_mow1_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++struct iwl_cfg iwl6005_2agn_mow2_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++
+ #define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+@@ -469,14 +480,10 @@ struct iwl_cfg iwl6035_2agn_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
+-struct iwl_cfg iwl6035_2abg_cfg = {
+- .name = "6035 Series 2x2 ABG/BT",
+- IWL_DEVICE_6030,
+-};
+-
+-struct iwl_cfg iwl6035_2bg_cfg = {
+- .name = "6035 Series 2x2 BG/BT",
+- IWL_DEVICE_6030,
++struct iwl_cfg iwl6035_2agn_sff_cfg = {
++ .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
++ IWL_DEVICE_6035,
++ .ht_params = &iwl6000_ht_params,
+ };
+
+ struct iwl_cfg iwl1030_bgn_cfg = {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+index 2a2dc45..e786497 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
++++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+@@ -80,6 +80,8 @@ extern struct iwl_cfg iwl6005_2abg_cfg;
+ extern struct iwl_cfg iwl6005_2bg_cfg;
+ extern struct iwl_cfg iwl6005_2agn_sff_cfg;
+ extern struct iwl_cfg iwl6005_2agn_d_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow1_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow2_cfg;
+ extern struct iwl_cfg iwl1030_bgn_cfg;
+ extern struct iwl_cfg iwl1030_bg_cfg;
+ extern struct iwl_cfg iwl6030_2agn_cfg;
+@@ -101,17 +103,12 @@ extern struct iwl_cfg iwl100_bg_cfg;
+ extern struct iwl_cfg iwl130_bgn_cfg;
+ extern struct iwl_cfg iwl130_bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_cfg;
+-extern struct iwl_cfg iwl2000_2bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_d_cfg;
+ extern struct iwl_cfg iwl2030_2bgn_cfg;
+-extern struct iwl_cfg iwl2030_2bg_cfg;
+ extern struct iwl_cfg iwl6035_2agn_cfg;
+-extern struct iwl_cfg iwl6035_2abg_cfg;
+-extern struct iwl_cfg iwl6035_2bg_cfg;
+-extern struct iwl_cfg iwl105_bg_cfg;
++extern struct iwl_cfg iwl6035_2agn_sff_cfg;
+ extern struct iwl_cfg iwl105_bgn_cfg;
+ extern struct iwl_cfg iwl105_bgn_d_cfg;
+-extern struct iwl_cfg iwl135_bg_cfg;
+ extern struct iwl_cfg iwl135_bgn_cfg;
+
+ #endif /* __iwl_pci_h__ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
+index 346dc9b..62a0f81 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
++++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
+@@ -236,13 +236,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+
+ /* 6x00 Series */
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+@@ -250,13 +253,19 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+- {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
++ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+ /* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+@@ -326,46 +335,33 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+ /* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+ /* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+
+ /* 105 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+ /* 135 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
+
+ {0}
+ };
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index bc33b14..a7e1a2c 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus) {
+- p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
++ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
++ stats->rx_bufshift);
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 9d7f172..093bf0a 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -88,6 +88,7 @@ struct xenvif {
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
++ u64 credit_window_start;
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 8eaf0e2..2cb9c92 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -272,8 +272,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+- /* Initialize 'expires' now: it's used to track the credit window. */
+- vif->credit_timeout.expires = jiffies;
++ vif->credit_window_start = get_jiffies_64();
+
+ dev->netdev_ops = &xenvif_netdev_ops;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index fd2b92d..9a4626c 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1365,9 +1365,8 @@ out:
+
+ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ {
+- unsigned long now = jiffies;
+- unsigned long next_credit =
+- vif->credit_timeout.expires +
++ u64 now = get_jiffies_64();
++ u64 next_credit = vif->credit_window_start +
+ msecs_to_jiffies(vif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+@@ -1375,8 +1374,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ return true;
+
+ /* Passed the point where we can replenish credit? */
+- if (time_after_eq(now, next_credit)) {
+- vif->credit_timeout.expires = now;
++ if (time_after_eq64(now, next_credit)) {
++ vif->credit_window_start = now;
+ tx_add_credit(vif);
+ }
+
+@@ -1388,6 +1387,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ tx_credit_callback;
+ mod_timer(&vif->credit_timeout,
+ next_credit);
++ vif->credit_window_start = next_credit;
+
+ return true;
+ }
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 8b25f9c..41f08e5 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -188,7 +188,8 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ return ret;
+ }
+
+-static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
++static int _pci_assign_resource(struct pci_dev *dev, int resno,
++ resource_size_t size, resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
+ struct pci_bus *bus;
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 705e13e..2e658d2 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
+ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+ {
+ struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
+ return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+ }
+
+diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
+index 2fa658e..391b768 100644
+--- a/drivers/staging/bcm/Bcmchar.c
++++ b/drivers/staging/bcm/Bcmchar.c
+@@ -1932,6 +1932,7 @@ cntrlEnd:
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+
++ memset(&DevInfo, 0, sizeof(DevInfo));
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
+index 260d4f0..b3d2e17 100644
+--- a/drivers/staging/wlags49_h2/wl_priv.c
++++ b/drivers/staging/wlags49_h2/wl_priv.c
+@@ -570,6 +570,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ ltv_t *pLtv;
+ bool_t ltvAllocated = FALSE;
+ ENCSTRCT sEncryption;
++ size_t len;
+
+ #ifdef USE_WDS
+ hcf_16 hcfPort = HCF_PORT_0;
+@@ -686,7 +687,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ break;
+ case CFG_CNF_OWN_NAME:
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+- memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
++ len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
++ strlcpy(lp->StationName, &pLtv->u.u8[2], len);
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ break;
+ case CFG_CNF_LOAD_BALANCING:
+@@ -1800,6 +1802,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ {
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
++ size_t len;
+ int ret = 0;
+ /*------------------------------------------------------------------------*/
+
+@@ -1810,8 +1813,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ wl_lock(lp, &flags);
+
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+-
+- memcpy( lp->StationName, extra, wrqu->data.length);
++ len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
++ strlcpy(lp->StationName, extra, len);
+
+ /* Commit the adapter parameters */
+ wl_apply( lp );
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d197b3e..e1a4994 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -553,7 +553,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+- if (unlikely(start >= bound || end >= bound || start > end))
++ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 5c12137..e813227 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -129,10 +129,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+ * pSCSI Host ID and enable for phba mode
+ */
+ sh = scsi_host_lookup(phv->phv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
+ " phv_host_id: %d\n", phv->phv_host_id);
+- return PTR_ERR(sh);
++ return -EINVAL;
+ }
+
+ phv->phv_lld_host = sh;
+@@ -564,10 +564,10 @@ static struct se_device *pscsi_create_virtdevice(
+ sh = phv->phv_lld_host;
+ } else {
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate"
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+- return ERR_CAST(sh);
++ return ERR_PTR(-EINVAL);
+ }
+ }
+ } else {
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index a783d53..af57648 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -650,16 +650,28 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+ int mi = uio_find_mem_index(vma);
++ struct uio_mem *mem;
+ if (mi < 0)
+ return -EINVAL;
++ mem = idev->info->mem + mi;
+
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++ if (vma->vm_end - vma->vm_start > mem->size)
++ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
++ /*
++ * We cannot use the vm_iomap_memory() helper here,
++ * because vma->vm_pgoff is the map index we looked
++ * up above in uio_find_mem_index(), rather than an
++ * actual page offset into the mmap.
++ *
++ * So we just do the physical mmap without a page
++ * offset.
++ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+- idev->info->mem[mi].addr >> PAGE_SHIFT,
++ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f52182d..bcde6f6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MicroTouch Systems touchscreen */
++ { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Broadcom BCM92035DGROM BT dongle */
+ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MAYA44USB sound device */
++ { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Action Semiconductor flash disk */
+ { USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 24107a7..107e6b4 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1007,20 +1007,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ t1 = xhci_port_state_to_neutral(t1);
+ if (t1 != t2)
+ xhci_writel(xhci, t2, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* enable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp |= PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+ hcd->state = HC_STATE_SUSPENDED;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+@@ -1099,20 +1085,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ xhci_ring_device(xhci, slot_id);
+ } else
+ xhci_writel(xhci, temp, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* disable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp &= ~PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+
+ (void) xhci_readl(xhci, &xhci->op_regs->command);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 61b0668..827f933 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -34,6 +34,9 @@
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++
+ static const char hcd_name[] = "xhci_hcd";
+
+ /* called after powerup, by probe or system-pm "wakeup" */
+@@ -67,6 +70,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+ }
++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
++ pdev->revision == 0x4) {
++ xhci->quirks |= XHCI_SLOW_SUSPEND;
++ xhci_dbg(xhci,
++ "QUIRK: Fresco Logic xHC revision %u"
++ "must be suspended extra slowly",
++ pdev->revision);
++ }
+ /* Fresco Logic confirms: all revisions of this chip do not
+ * support MSI, even though some of them claim to in their PCI
+ * capabilities.
+@@ -103,6 +114,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_AVOID_BEI;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
++ /* Workaround for occasional spurious wakeups from S5 (or
++ * any other sleep) on Haswell machines with LPT and LPT-LP
++ * with the new Intel BIOS
++ */
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -202,6 +222,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ usb_put_hcd(xhci->shared_hcd);
+ }
+ usb_hcd_pci_remove(dev);
++
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(dev, PCI_D3hot);
++
+ kfree(xhci);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 629aa74..03c35da 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -763,12 +763,19 @@ void xhci_shutdown(struct usb_hcd *hcd)
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+ xhci_cleanup_msix(xhci);
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
++
++ /* Yet another workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+ }
+
+ #ifdef CONFIG_PM
+@@ -869,6 +876,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ int xhci_suspend(struct xhci_hcd *xhci)
+ {
+ int rc = 0;
++ unsigned int delay = XHCI_MAX_HALT_USEC;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
+@@ -887,8 +895,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
++
++ /* Some chips from Fresco Logic need an extraordinary delay */
++ delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
++
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
++ STS_HALT, STS_HALT, delay)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8b4cce45..cf4fd24 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1493,6 +1493,8 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
+ #define XHCI_AVOID_BEI (1 << 15)
++#define XHCI_SLOW_SUSPEND (1 << 17)
++#define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 536c4ad..d8ace82 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -457,6 +457,10 @@ static void option_instat_callback(struct urb *urb);
+ #define CHANGHONG_VENDOR_ID 0x2077
+ #define CHANGHONG_PRODUCT_CH690 0x7001
+
++/* Inovia */
++#define INOVIA_VENDOR_ID 0x20a6
++#define INOVIA_SEW858 0x1105
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -703,6 +707,222 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+
+
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+@@ -1279,7 +1499,9 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+- { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist
++ },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+@@ -1367,6 +1589,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
+index 649cb35..1be8b5d 100644
+--- a/drivers/video/au1100fb.c
++++ b/drivers/video/au1100fb.c
+@@ -387,39 +387,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
+ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+ {
+ struct au1100fb_device *fbdev;
+- unsigned int len;
+- unsigned long start=0, off;
+
+ fbdev = to_au1100fb_device(fbi);
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
+
+- vma->vm_flags |= VM_IO;
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot)) {
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static struct fb_ops au1100fb_ops =
+diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
+index 7200559..5bd7d88 100644
+--- a/drivers/video/au1200fb.c
++++ b/drivers/video/au1200fb.c
+@@ -1216,38 +1216,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
+ * method mainly to allow the use of the TLB streaming flag (CCA=6)
+ */
+ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+-
+ {
+- unsigned int len;
+- unsigned long start=0, off;
+ struct au1200fb_device *fbdev = info->par;
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
+
+- vma->vm_flags |= VM_IO;
+-
+- return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot);
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index ac1ad48..5ce56e7 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1151,7 +1151,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ struct ecryptfs_msg_ctx *msg_ctx;
+ struct ecryptfs_message *msg = NULL;
+ char *auth_tok_sig;
+- char *payload;
++ char *payload = NULL;
+ size_t payload_len;
+ int rc;
+
+@@ -1206,6 +1206,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ out:
+ if (msg)
+ kfree(msg);
++ kfree(payload);
+ return rc;
+ }
+
+diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
+index 34f0a07..3268697 100644
+--- a/fs/ext3/dir.c
++++ b/fs/ext3/dir.c
+@@ -25,6 +25,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/buffer_head.h>
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/rbtree.h>
+
+@@ -32,24 +33,8 @@ static unsigned char ext3_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext3_readdir(struct file *, void *, filldir_t);
+ static int ext3_dx_readdir(struct file * filp,
+ void * dirent, filldir_t filldir);
+-static int ext3_release_dir (struct inode * inode,
+- struct file * filp);
+-
+-const struct file_operations ext3_dir_operations = {
+- .llseek = generic_file_llseek,
+- .read = generic_read_dir,
+- .readdir = ext3_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext3_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext3_compat_ioctl,
+-#endif
+- .fsync = ext3_sync_file, /* BKL held */
+- .release = ext3_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +45,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext3_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT3_FEATURE_COMPAT_DIR_INDEX) &&
++ ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
+
+ int ext3_check_dir_entry (const char * function, struct inode * dir,
+ struct ext3_dir_entry_2 * de,
+@@ -99,18 +103,13 @@ static int ext3_readdir(struct file * filp,
+ unsigned long offset;
+ int i, stored;
+ struct ext3_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+- ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext3_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -232,22 +231,87 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext3_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT3_HTREE_EOF_32BIT;
++ else
++ return EXT3_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
++ *
++ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
++ * we need to pass the max hash as the maximum allowable offset in
++ * the htree directory case.
++ *
++ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
++ */
++loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ int dx_dir = is_dx_dir(inode);
++
++ if (likely(dx_dir))
++ return generic_file_llseek_size(file, offset, origin,
++ ext3_get_htree_eof(file));
++ else
++ return generic_file_llseek(file, offset, origin);
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -308,15 +372,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -406,7 +471,7 @@ static int call_filldir(struct file * filp, void * dirent,
+ printk("call_filldir: called with null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -431,13 +496,13 @@ static int ext3_dx_readdir(struct file * filp,
+ int ret;
+
+ if (!info) {
+- info = ext3_htree_create_dir_info(filp->f_pos);
++ info = ext3_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT3_HTREE_EOF)
++ if (filp->f_pos == ext3_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -445,8 +510,8 @@ static int ext3_dx_readdir(struct file * filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -478,7 +543,7 @@ static int ext3_dx_readdir(struct file * filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -498,7 +563,7 @@ static int ext3_dx_readdir(struct file * filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -517,3 +582,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
+
+ return 0;
+ }
++
++const struct file_operations ext3_dir_operations = {
++ .llseek = ext3_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext3_readdir,
++ .unlocked_ioctl = ext3_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext3_compat_ioctl,
++#endif
++ .fsync = ext3_sync_file,
++ .release = ext3_release_dir,
++};
+diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
+index 7d215b4..d4d3ade 100644
+--- a/fs/ext3/hash.c
++++ b/fs/ext3/hash.c
+@@ -200,8 +200,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT3_HTREE_EOF << 1))
+- hash = (EXT3_HTREE_EOF-1) << 1;
++ if (hash == (EXT3_HTREE_EOF_32BIT << 1))
++ hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 164c560..689d1b1 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext4_readdir(struct file *, void *, filldir_t);
+ static int ext4_dx_readdir(struct file *filp,
+ void *dirent, filldir_t filldir);
+-static int ext4_release_dir(struct inode *inode,
+- struct file *filp);
+-
+-const struct file_operations ext4_dir_operations = {
+- .llseek = ext4_llseek,
+- .read = generic_read_dir,
+- .readdir = ext4_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext4_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext4_compat_ioctl,
+-#endif
+- .fsync = ext4_sync_file,
+- .release = ext4_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext4_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_COMPAT_DIR_INDEX) &&
++ ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
++
+ /*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
+ unsigned int offset;
+ int i, stored;
+ struct ext4_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_COMPAT_DIR_INDEX) &&
+- ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext4_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -254,22 +253,134 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
++ *
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
++ */
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext4_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT4_HTREE_EOF_32BIT;
++ else
++ return EXT4_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext4_dir_llseek() based on generic_file_llseek() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ loff_t ret = -EINVAL;
++ int dx_dir = is_dx_dir(inode);
++
++ mutex_lock(&inode->i_mutex);
++
++ /* NOTE: relative offsets with dx directories might not work
++ * as expected, as it is difficult to figure out the
++ * correct offset between dx hashes */
++
++ switch (origin) {
++ case SEEK_END:
++ if (unlikely(offset > 0))
++ goto out_err; /* not supported for directories */
++
++ /* so only negative offsets are left, does that have a
++ * meaning for directories at all? */
++ if (dx_dir)
++ offset += ext4_get_htree_eof(file);
++ else
++ offset += inode->i_size;
++ break;
++ case SEEK_CUR:
++ /*
++ * Here we special-case the lseek(fd, 0, SEEK_CUR)
++ * position-querying operation. Avoid rewriting the "same"
++ * f_pos value back to the file because a concurrent read(),
++ * write() or lseek() might have altered it
++ */
++ if (offset == 0) {
++ offset = file->f_pos;
++ goto out_ok;
++ }
++
++ offset += file->f_pos;
++ break;
++ }
++
++ if (unlikely(offset < 0))
++ goto out_err;
++
++ if (!dx_dir) {
++ if (offset > inode->i_sb->s_maxbytes)
++ goto out_err;
++ } else if (offset > ext4_get_htree_eof(file))
++ goto out_err;
++
++ /* Special lock needed here? */
++ if (offset != file->f_pos) {
++ file->f_pos = offset;
++ file->f_version = 0;
++ }
++
++out_ok:
++ ret = offset;
++out_err:
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -429,7 +541,7 @@ static int call_filldir(struct file *filp, void *dirent,
+ "null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -454,13 +566,13 @@ static int ext4_dx_readdir(struct file *filp,
+ int ret;
+
+ if (!info) {
+- info = ext4_htree_create_dir_info(filp->f_pos);
++ info = ext4_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT4_HTREE_EOF)
++ if (filp->f_pos == ext4_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -468,8 +580,8 @@ static int ext4_dx_readdir(struct file *filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -501,7 +613,7 @@ static int ext4_dx_readdir(struct file *filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -521,7 +633,7 @@ static int ext4_dx_readdir(struct file *filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -540,3 +652,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
+
+ return 0;
+ }
++
++const struct file_operations ext4_dir_operations = {
++ .llseek = ext4_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext4_readdir,
++ .unlocked_ioctl = ext4_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext4_compat_ioctl,
++#endif
++ .fsync = ext4_sync_file,
++ .release = ext4_release_dir,
++};
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 60b6ca5..22c71b9 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1597,7 +1597,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT4_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext4_htree_next_block
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index ac8f168..fa8e491 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT4_HTREE_EOF << 1))
+- hash = (EXT4_HTREE_EOF-1) << 1;
++ if (hash == (EXT4_HTREE_EOF_32BIT << 1))
++ hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index d5498b2..b4e9f3f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1269,6 +1269,8 @@ retry:
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
++ kfree(is); is = NULL;
++ kfree(bs); bs = NULL;
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
+index c1a3e60..7f464c5 100644
+--- a/fs/jfs/jfs_inode.c
++++ b/fs/jfs/jfs_inode.c
+@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+
+ if (insert_inode_locked(inode) < 0) {
+ rc = -EINVAL;
+- goto fail_unlock;
++ goto fail_put;
+ }
+
+ inode_init_owner(inode, parent, mode);
+@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+ fail_drop:
+ dquot_drop(inode);
+ inode->i_flags |= S_NOQUOTA;
+-fail_unlock:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ fail_put:
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 561a3dc..61b697e 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -726,12 +726,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
+
+ /*
+ * Open an existing file or directory.
+- * The access argument indicates the type of open (read/write/lock)
++ * The may_flags argument indicates the type of open (read/write/lock)
++ * and additional flags.
+ * N.B. After this call fhp needs an fh_put
+ */
+ __be32
+ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+- int access, struct file **filp)
++ int may_flags, struct file **filp)
+ {
+ struct dentry *dentry;
+ struct inode *inode;
+@@ -746,7 +747,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+ * in case a chmod has now revoked permission.
+ */
+- err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE);
++ err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
+ if (err)
+ goto out;
+
+@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * or any access when mandatory locking enabled
+ */
+ err = nfserr_perm;
+- if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE))
++ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ goto out;
+ /*
+ * We must ignore files (but only files) which might have mandatory
+@@ -770,12 +771,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (!inode->i_fop)
+ goto out;
+
+- host_err = nfsd_open_break_lease(inode, access);
++ host_err = nfsd_open_break_lease(inode, may_flags);
+ if (host_err) /* NOMEM or WOULDBLOCK */
+ goto out_nfserr;
+
+- if (access & NFSD_MAY_WRITE) {
+- if (access & NFSD_MAY_READ)
++ if (may_flags & NFSD_MAY_WRITE) {
++ if (may_flags & NFSD_MAY_READ)
+ flags = O_RDWR|O_LARGEFILE;
+ else
+ flags = O_WRONLY|O_LARGEFILE;
+@@ -785,8 +786,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+ *filp = NULL;
+- } else
+- host_err = ima_file_check(*filp, access);
++ } else {
++ host_err = ima_file_check(*filp, may_flags);
++
++ if (may_flags & NFSD_MAY_64BIT_COOKIE)
++ (*filp)->f_mode |= FMODE_64BITHASH;
++ else
++ (*filp)->f_mode |= FMODE_32BITHASH;
++ }
++
+ out_nfserr:
+ err = nfserrno(host_err);
+ out:
+@@ -2016,8 +2024,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
+ __be32 err;
+ struct file *file;
+ loff_t offset = *offsetp;
++ int may_flags = NFSD_MAY_READ;
++
++ /* NFSv2 only supports 32 bit cookies */
++ if (rqstp->rq_vers > 2)
++ may_flags |= NFSD_MAY_64BIT_COOKIE;
+
+- err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file);
++ err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
+ if (err)
+ goto out;
+
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 3f54ad0..85d4d42 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -27,6 +27,8 @@
+ #define NFSD_MAY_BYPASS_GSS 0x400
+ #define NFSD_MAY_READ_IF_EXEC 0x800
+
++#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
++
+ #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
+ #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
+
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 9cf04a1..a133c3e 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -86,7 +86,7 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
+
+ int fd_statfs(int fd, struct kstatfs *st)
+ {
+- struct file *file = fget(fd);
++ struct file *file = fget_raw(fd);
+ int error = -EBADF;
+ if (file) {
+ error = vfs_statfs(&file->f_path, st);
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index 7639f18..8f4ae68 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -184,6 +184,8 @@ struct drm_mode_get_connector {
+ __u32 connection;
+ __u32 mm_width, mm_height; /**< HxW in millimeters */
+ __u32 subpixel;
++
++ __u32 pad;
+ };
+
+ #define DRM_MODE_PROP_PENDING (1<<0)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 3fd17c2..5633053 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -5,6 +5,9 @@
+ /*
+ * Common definitions for all gcc versions go here.
+ */
++#define GCC_VERSION (__GNUC__ * 10000 \
++ + __GNUC_MINOR__ * 100 \
++ + __GNUC_PATCHLEVEL__)
+
+
+ /* Optimization barrier */
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index dfadc96..643d6c4 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -29,6 +29,21 @@
+ the kernel context */
+ #define __cold __attribute__((__cold__))
+
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#if GCC_VERSION <= 40801
++# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++#else
++# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
++#endif
+
+ #if __GNUC_MINOR__ >= 5
+ /*
+diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+index dec9911..d59ab12 100644
+--- a/include/linux/ext3_fs.h
++++ b/include/linux/ext3_fs.h
+@@ -781,7 +781,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT3_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext3_htree_next_block
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index a276817..dd74385 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -92,6 +92,10 @@ struct inodes_stat_t {
+ /* File is opened using open(.., 3, ..) and is writeable only for ioctls
+ (specialy hack for floppy.c) */
+ #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
++/* 32bit hashes as llseek() offset (for directories) */
++#define FMODE_32BITHASH ((__force fmode_t)0x200)
++/* 64bit hashes as llseek() offset (for directories) */
++#define FMODE_64BITHASH ((__force fmode_t)0x400)
+
+ /*
+ * Don't update ctime and mtime.
+@@ -907,9 +911,11 @@ static inline loff_t i_size_read(const struct inode *inode)
+ static inline void i_size_write(struct inode *inode, loff_t i_size)
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++ preempt_disable();
+ write_seqcount_begin(&inode->i_size_seqcount);
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
++ preempt_enable();
+ #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ inode->i_size = i_size;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index eeb6a29..8d5b91e 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -300,13 +300,15 @@ struct perf_event_mmap_page {
+ /*
+ * Control data for the mmap() data buffer.
+ *
+- * User-space reading the @data_head value should issue an rmb(), on
+- * SMP capable platforms, after reading this value -- see
+- * perf_event_wakeup().
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+- * written by userspace to reflect the last read data. In this case
+- * the kernel will not over-write unread data.
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 29e217a..7e77cee 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -58,6 +58,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
++extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index efe50af..85180bf 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -737,6 +737,16 @@ static inline int skb_cloned(const struct sk_buff *skb)
+ (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
+ }
+
++static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
++{
++ might_sleep_if(pri & __GFP_WAIT);
++
++ if (skb_cloned(skb))
++ return pskb_expand_head(skb, 0, 0, pri);
++
++ return 0;
++}
++
+ /**
+ * skb_header_cloned - is the header a clone
+ * @skb: buffer to check
+@@ -1157,6 +1167,11 @@ static inline int skb_pagelen(const struct sk_buff *skb)
+ return len + skb_headlen(skb);
+ }
+
++static inline bool skb_has_frags(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->nr_frags;
++}
++
+ /**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index 08e90fb..5fee575 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -173,6 +173,20 @@ struct timex {
+
+ #include <asm/timex.h>
+
++#ifndef random_get_entropy
++/*
++ * The random_get_entropy() function is used by the /dev/random driver
++ * in order to extract entropy via the relative unpredictability of
++ * when an interrupt takes places versus a high speed, fine-grained
++ * timing source or cycle counter. Since it will be occurred on every
++ * single interrupt, it must have a very low cost/overhead.
++ *
++ * By default we use get_cycles() for this purpose, but individual
++ * architectures may override this in their asm/timex.h header file.
++ */
++#define random_get_entropy() get_cycles()
++#endif
++
+ /*
+ * SHIFT_PLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in PLL mode.
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a7a683e..a8c2ef6 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char err_offset = 0;
+ u8 opt_len = opt[1];
+ u8 opt_iter;
++ u8 tag_len;
+
+ if (opt_len < 8) {
+ err_offset = 1;
+@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
+- if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ tag_len = opt[opt_iter + 1];
++ if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+ goto out;
+ }
+- opt_iter += opt[opt_iter + 1];
++ opt_iter += tag_len;
+ }
+
+ out:
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 16010d1..86ef78d 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -459,10 +459,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
+ {
+ return dst_orig;
+ }
++
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return NULL;
++}
++
+ #else
+ extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
++
++/* skb attached with this dst needs transformation if dst->xfrm is valid */
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return dst->xfrm;
++}
+ #endif
+
+ #endif /* _NET_DST_H */
+diff --git a/init/main.c b/init/main.c
+index 5d0eb1d..7474450 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/random.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -732,6 +733,7 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
++ random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 7f3011c..58c3b51 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -75,10 +75,31 @@ again:
+ goto out;
+
+ /*
+- * Publish the known good head. Rely on the full barrier implied
+- * by atomic_dec_and_test() order the rb->head read and this
+- * write.
++ * Since the mmap() consumer (userspace) can run on a different CPU:
++ *
++ * kernel user
++ *
++ * READ ->data_tail READ ->data_head
++ * smp_mb() (A) smp_rmb() (C)
++ * WRITE $data READ $data
++ * smp_wmb() (B) smp_mb() (D)
++ * STORE ->data_head WRITE ->data_tail
++ *
++ * Where A pairs with D, and B pairs with C.
++ *
++ * I don't think A needs to be a full barrier because we won't in fact
++ * write data until we see the store from userspace. So we simply don't
++ * issue the data WRITE until we observe it. Be conservative for now.
++ *
++ * OTOH, D needs to be a full barrier since it separates the data READ
++ * from the tail WRITE.
++ *
++ * For B a WMB is sufficient since it separates two WRITEs, and for C
++ * an RMB is sufficient since it separates two READs.
++ *
++ * See perf_output_begin().
+ */
++ smp_wmb();
+ rb->user_page->data_head = head;
+
+ /*
+@@ -142,9 +163,11 @@ int perf_output_begin(struct perf_output_handle *handle,
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
++ *
++ * See perf_output_put_handle().
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+- smp_rmb();
++ smp_mb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ce1067f..c5a12a7 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -534,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ if (isspace(ch)) {
+ parser->buffer[parser->idx] = 0;
+ parser->cont = false;
+- } else {
++ } else if (parser->idx < parser->size - 1) {
+ parser->cont = true;
+ parser->buffer[parser->idx++] = ch;
++ } else {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ *ppos += read;
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 4ceb05d..2ffcb3c 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -419,7 +419,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+- if (miter->__flags & SG_MITER_TO_SG)
++ if ((miter->__flags & SG_MITER_TO_SG) &&
++ !PageSlab(miter->page))
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 235c219..c705612 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -152,7 +152,7 @@ static size_t vlan_get_size(const struct net_device *dev)
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+
+ return nla_total_size(2) + /* IFLA_VLAN_ID */
+- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
++ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
+ vlan_qos_map_size(vlan->nr_ingress_mappings) +
+ vlan_qos_map_size(vlan->nr_egress_mappings);
+ }
+diff --git a/net/compat.c b/net/compat.c
+index 8c979cc..3139ef2 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 984ec65..4afcf31 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -268,7 +268,7 @@ begintw:
+ }
+ if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie,
+ saddr, daddr, ports, dif))) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index daf408e..16191f0 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -833,7 +833,7 @@ static int __ip_append_data(struct sock *sk,
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++ if (((length > mtu) || (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c45a155a3..6768ce2 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2727,7 +2727,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
+ RT_SCOPE_LINK);
+ goto make_route;
+ }
+- if (fl4->saddr) {
++ if (!fl4->saddr) {
+ if (ipv4_is_multicast(fl4->daddr))
+ fl4->saddr = inet_select_addr(dev_out, 0,
+ fl4->flowi4_scope);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 872b41d..c1ed01e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1469,7 +1469,10 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ tp->lost_cnt_hint -= tcp_skb_pcount(prev);
+ }
+
+- TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
++ TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
++ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++ TCP_SKB_CB(prev)->end_seq++;
++
+ if (skb == tcp_highest_sack(sk))
+ tcp_advance_highest_sack(sk, skb);
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 3add486..0d5a118 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -933,6 +933,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
+ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
+ unsigned int mss_now)
+ {
++ /* Make sure we own this skb before messing gso_size/gso_segs */
++ WARN_ON_ONCE(skb_cloned(skb));
++
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
+ /* Avoid the costly divide in the normal
+@@ -1014,9 +1017,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (skb_cloned(skb) &&
+- skb_is_nonlinear(skb) &&
+- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Get a new skb... force flag on. */
+@@ -2129,6 +2130,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ int oldpcount = tcp_skb_pcount(skb);
+
+ if (unlikely(oldpcount > 1)) {
++ if (skb_unclone(skb, GFP_ATOMIC))
++ return -ENOMEM;
+ tcp_init_tso_segs(sk, skb, cur_mss);
+ tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
+ }
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 73f1a00..e38290b 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -110,7 +110,7 @@ begintw:
+ goto out;
+ }
+ if (!INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 91d0711..97675bf 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1342,7 +1342,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ cork->length += length;
+ if (((length > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 18ea73c..bc9103d 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -791,7 +791,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
+ }
+
+ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
+- struct flowi6 *fl6, int flags)
++ struct flowi6 *fl6, int flags, bool input)
+ {
+ struct fib6_node *fn;
+ struct rt6_info *rt, *nrt;
+@@ -799,8 +799,11 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+ int attempts = 3;
+ int err;
+ int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
++ int local = RTF_NONEXTHOP;
+
+ strict |= flags & RT6_LOOKUP_F_IFACE;
++ if (input)
++ local |= RTF_LOCAL;
+
+ relookup:
+ read_lock_bh(&table->tb6_lock);
+@@ -820,7 +823,7 @@ restart:
+ read_unlock_bh(&table->tb6_lock);
+
+ if (!dst_get_neighbour_raw(&rt->dst)
+- && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
++ && !(rt->rt6i_flags & local))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+@@ -864,7 +867,7 @@ out2:
+ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags, true);
+ }
+
+ void ip6_route_input(struct sk_buff *skb)
+@@ -890,7 +893,7 @@ void ip6_route_input(struct sk_buff *skb)
+ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags, false);
+ }
+
+ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index e579006..8570079 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -357,7 +357,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ goto error_put_sess_tun;
+ }
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+@@ -432,7 +434,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(sk_tun);
+ sock_put(sk);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 73495f1..a9cf593 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -708,6 +708,8 @@ struct tpt_led_trigger {
+ * that the scan completed.
+ * @SCAN_ABORTED: Set for our scan work function when the driver reported
+ * a scan complete for an aborted scan.
++ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
++ * cancelled.
+ */
+ enum {
+ SCAN_SW_SCANNING,
+@@ -715,6 +717,7 @@ enum {
+ SCAN_OFF_CHANNEL,
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
++ SCAN_HW_CANCELLED,
+ };
+
+ /**
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 7d882fc..db01d02 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2780,6 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
+ case NL80211_IFTYPE_ADHOC:
+ if (!bssid)
+ return 0;
++ if (compare_ether_addr(sdata->vif.addr, hdr->addr2) == 0 ||
++ compare_ether_addr(sdata->u.ibss.bssid, hdr->addr2) == 0)
++ return 0;
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ return 1;
+ }
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 5279300..0aeea49 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -224,6 +224,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
+ enum ieee80211_band band;
+ int i, ielen, n_chans;
+
++ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
++ return false;
++
+ do {
+ if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+ return false;
+@@ -815,7 +818,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ if (!local->scan_req)
+ goto out;
+
++ /*
++ * We have a scan running and the driver already reported completion,
++ * but the worker hasn't run yet or is stuck on the mutex - mark it as
++ * cancelled.
++ */
++ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
++ test_bit(SCAN_COMPLETED, &local->scanning)) {
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
++ goto out;
++ }
++
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
++ /*
++ * Make sure that __ieee80211_scan_completed doesn't trigger a
++ * scan on another band.
++ */
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ if (local->ops->cancel_hw_scan)
+ drv_cancel_hw_scan(local, local->scan_sdata);
+ goto out;
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 67df50e..1a49354 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -181,6 +181,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
++ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
++ sta->last_rx = jiffies;
++
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 93faf6a..4a8c55b 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1468,7 +1468,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+
+ msglen = origlen = end - dptr;
+ if (msglen > datalen)
+- return NF_DROP;
++ return NF_ACCEPT;
+
+ ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+ if (ret != NF_ACCEPT)
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 32ba8d0..cf3e22c 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -518,7 +518,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
+ */
+ if (!sctp_checksum_disable) {
+- if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
++ if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
++ (dst_xfrm(dst) != NULL) || packet->ipfragok) {
+ __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+
+ /* 3) Put the resultant value into the checksum field in the
+diff --git a/net/socket.c b/net/socket.c
+index cf546a3..bf7adaa 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1876,6 +1876,16 @@ struct used_address {
+ unsigned int name_len;
+ };
+
++static int copy_msghdr_from_user(struct msghdr *kmsg,
++ struct msghdr __user *umsg)
++{
++ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
++ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
++ return 0;
++}
++
+ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
+@@ -1894,8 +1904,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+@@ -2110,8 +2123,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ err = -EMSGSIZE;
+ if (msg_sys->msg_iovlen > UIO_MAXIOV)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5611563..5122b22 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1236,6 +1236,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
+ return 0;
+ }
+
++static void unix_sock_inherit_flags(const struct socket *old,
++ struct socket *new)
++{
++ if (test_bit(SOCK_PASSCRED, &old->flags))
++ set_bit(SOCK_PASSCRED, &new->flags);
++ if (test_bit(SOCK_PASSSEC, &old->flags))
++ set_bit(SOCK_PASSSEC, &new->flags);
++}
++
+ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ {
+ struct sock *sk = sock->sk;
+@@ -1270,6 +1279,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ /* attach accepted sock to socket */
+ unix_state_lock(tsk);
+ newsock->state = SS_CONNECTED;
++ unix_sock_inherit_flags(sock, newsock);
+ sock_graft(tsk, newsock);
+ unix_state_unlock(tsk);
+ return 0;
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index c4ad795..617a310 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -95,6 +95,10 @@ int ieee80211_radiotap_iterator_init(
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
+ {
++ /* check the radiotap header can actually be present */
++ if (max_length < sizeof(struct ieee80211_radiotap_header))
++ return -EINVAL;
++
+ /* Linux only supports version 0 radiotap format */
+ if (radiotap_header->it_version)
+ return -EINVAL;
+@@ -129,7 +133,8 @@ int ieee80211_radiotap_iterator_init(
+ */
+
+ if ((unsigned long)iterator->_arg -
+- (unsigned long)iterator->_rtheader >
++ (unsigned long)iterator->_rtheader +
++ sizeof(uint32_t) >
+ (unsigned long)iterator->_max_length)
+ return -EINVAL;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1b43fde..92c913d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5798,6 +5798,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
++ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index 3642e06..e44c0e3 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -414,6 +414,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
+ hubs->hp_startup_mode);
+ break;
+ }
++ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b516488..1d83a40 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1523,7 +1523,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ w->active ? "active" : "inactive");
+
+ list_for_each_entry(p, &w->sources, list_sink) {
+- if (p->connected && !p->connected(w, p->sink))
++ if (p->connected && !p->connected(w, p->source))
+ continue;
+
+ if (p->connect)
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index d5724d8..1226631 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
+ usX2Y_clients_stop(usX2Y);
+ }
+
+-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
+- struct snd_usX2Y_substream *subs, struct urb *urb)
+-{
+- snd_printk(KERN_ERR
+-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
+-"Most propably some urb of usb-frame %i is still missing.\n"
+-"Cause could be too long delays in usb-hcd interrupt handling.\n",
+- usb_get_current_frame_number(usX2Y->dev),
+- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
+- usX2Y_clients_stop(usX2Y);
+-}
+-
+ static void i_usX2Y_urb_complete(struct urb *urb)
+ {
+ struct snd_usX2Y_substream *subs = urb->context;
+@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
++
++ subs->completed_urb = urb;
++
+ {
+ struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
+ *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index a51340f..83a8b8d 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
+
++ subs->completed_urb = urb;
+ capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index 5177964..714fc35 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -14,6 +14,7 @@
+ #include "util/debug.h"
+
+ #include <sys/prctl.h>
++#include <sys/resource.h>
+
+ #include <semaphore.h>
+ #include <pthread.h>