summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.14.41/1040_linux-3.14.41.patch')
-rw-r--r--3.14.41/1040_linux-3.14.41.patch3586
1 files changed, 3586 insertions, 0 deletions
diff --git a/3.14.41/1040_linux-3.14.41.patch b/3.14.41/1040_linux-3.14.41.patch
new file mode 100644
index 0000000..444b427
--- /dev/null
+++ b/3.14.41/1040_linux-3.14.41.patch
@@ -0,0 +1,3586 @@
+diff --git a/Makefile b/Makefile
+index 070e0eb..7a60d4a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Remembering Coco
+
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index 187fd46..355117c 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -154,7 +154,7 @@
+
+ uart2: serial@12200 {
+ compatible = "ns16550a";
+- reg = <0x12000 0x100>;
++ reg = <0x12200 0x100>;
+ reg-shift = <2>;
+ interrupts = <9>;
+ clocks = <&core_clk 0>;
+@@ -163,7 +163,7 @@
+
+ uart3: serial@12300 {
+ compatible = "ns16550a";
+- reg = <0x12100 0x100>;
++ reg = <0x12300 0x100>;
+ reg-shift = <2>;
+ interrupts = <10>;
+ clocks = <&core_clk 0>;
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index f4b46d3..051b726 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -114,7 +114,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 7bc6668..dcbe17f 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <mach/gpio-samsung.h>
+
+ #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
+
+ #define PCA935X_GPIO_BASE GPIO_BOARD_START
+ #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index 3df3c37..66b95c4 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -555,6 +555,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+
+ static struct wm831x_pdata crag_pmic_pdata = {
+ .wm831x_num = 1,
++ .irq_base = BANFF_PMIC_IRQ_BASE,
+ .gpio_base = BANFF_PMIC_GPIO_BASE,
+ .soft_shutdown = true,
+
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index 6d20b7d..a268a9a 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -43,7 +43,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+ # Assembly rules for the .S files
+-$(obj-vdso): %.o: %.S
++$(obj-vdso): %.o: %.S FORCE
+ $(call if_changed_dep,vdsoas)
+
+ # Actual build commands
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84..04845aa 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+
+-void time_init(void)
++void __init time_init(void)
+ {
+ u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+
+diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
+deleted file mode 100644
+index 3adac3b..0000000
+--- a/arch/mips/include/asm/suspend.h
++++ /dev/null
+@@ -1,7 +0,0 @@
+-#ifndef __ASM_SUSPEND_H
+-#define __ASM_SUSPEND_H
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+-#endif /* __ASM_SUSPEND_H */
+diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
+index 521e596..2129e67 100644
+--- a/arch/mips/power/cpu.c
++++ b/arch/mips/power/cpu.c
+@@ -7,7 +7,7 @@
+ * Author: Hu Hongbing <huhb@lemote.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+-#include <asm/suspend.h>
++#include <asm/sections.h>
+ #include <asm/fpu.h>
+ #include <asm/dsp.h>
+
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c82..e7567c8 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+
+ LEAF(swsusp_arch_resume)
++ /* Avoid TLB mismatch during and after kernel resume */
++ jal local_flush_tlb_all
+ PTR_L t0, restore_pblist
+ 0:
+ PTR_L t1, PBE_ADDRESS(t0) /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
+- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index 2912b87..3eb36ce 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -61,12 +61,22 @@ struct cache_type_info {
+ };
+
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED 0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA 2
++#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA 3
+
+ static const struct cache_type_info cache_type_info[] = {
+ {
++ /* Embedded systems that use cache-size, cache-block-size,
++ * etc. for the Unified (typically L2) cache. */
++ .name = "Unified",
++ .size_prop = "cache-size",
++ .line_size_props = { "cache-line-size",
++ "cache-block-size", },
++ .nr_sets_prop = "cache-sets",
++ },
++ {
+ /* PowerPC Processor binding says the [di]-cache-*
+ * must be equal on unified caches, so just use
+ * d-cache properties. */
+@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ struct cache *iter;
+
+- if (cache->type == CACHE_TYPE_UNIFIED)
++ if (cache->type == CACHE_TYPE_UNIFIED ||
++ cache->type == CACHE_TYPE_UNIFIED_D)
+ return cache;
+
+ list_for_each_entry(iter, &cache_list, list)
+@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ return of_get_property(np, "cache-unified", NULL);
+ }
+
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+- int level)
++/*
++ * Unified caches can have two different sets of tags. Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc. Check on initialization for which type we have, and
++ * return the appropriate structure type. Assume it's embedded if it isn't
++ * open firmware. If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+- struct cache *cache;
++ return of_get_property(np,
++ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+- return cache;
++ return new_cache(cache_is_unified_d(node), level, node);
+ }
+
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
+index 0167d53..a531154 100644
+--- a/arch/powerpc/kernel/suspend.c
++++ b/arch/powerpc/kernel/suspend.c
+@@ -9,9 +9,7 @@
+
+ #include <linux/mm.h>
+ #include <asm/page.h>
+-
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
++#include <asm/sections.h>
+
+ /*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda..ead5535 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+- for (;;) {
++ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ fp = (unsigned long __user *) sp;
+ if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ return;
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index 2b90ff8..59ef76c 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+
+ io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+
+- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
++ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+
+ mb();
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index a7a7537..d3236c9 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -13,14 +13,10 @@
+ #include <asm/ipl.h>
+ #include <asm/cio.h>
+ #include <asm/pci.h>
++#include <asm/sections.h>
+ #include "entry.h"
+
+ /*
+- * References to section boundaries
+- */
+-extern const void __nosave_begin, __nosave_end;
+-
+-/*
+ * The restore of the saved pages in an hibernation image will set
+ * the change and referenced bits in the storage key for each page.
+ * Overindication of the referenced bits after an hibernation cycle
+@@ -142,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ /* Always save lowcore pages (LC protection might be enabled). */
+ if (pfn <= LC_PAGES)
+@@ -149,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++ if (pfn >= stext_pfn && pfn <= eshared_pfn)
++ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ if (tprot(PFN_PHYS(pfn)))
+ return 1;
+ return 0;
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 75beea6..3588f2f 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -414,6 +414,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ for (n = mem->count - 1; n > 0 ; n--)
+ memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
++ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ mem->vm[0].cpus_total = cpus;
+ mem->vm[0].cpus_configured = cpus;
+ mem->vm[0].cpus_standby = 0;
+diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
+index 1b61997..7a99e6a 100644
+--- a/arch/sh/include/asm/sections.h
++++ b/arch/sh/include/asm/sections.h
+@@ -3,7 +3,6 @@
+
+ #include <asm-generic/sections.h>
+
+-extern long __nosave_begin, __nosave_end;
+ extern long __machvec_start, __machvec_end;
+ extern char __uncached_start, __uncached_end;
+ extern char __start_eh_frame[], __stop_eh_frame[];
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 42b0b8c..17bd2e1 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -9,11 +9,9 @@
+ #include <asm/hibernate.h>
+ #include <asm/visasm.h>
+ #include <asm/page.h>
++#include <asm/sections.h>
+ #include <asm/tlb.h>
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ struct saved_context saved_context;
+
+ /*
+diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
+index 4dcd34a..77b5226 100644
+--- a/arch/unicore32/include/mach/pm.h
++++ b/arch/unicore32/include/mach/pm.h
+@@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state);
+ /* Defined in hibernate_asm.S */
+ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ extern struct pbe *restore_pblist;
+ #endif
+diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
+index d75ef8b..9969ec3 100644
+--- a/arch/unicore32/kernel/hibernate.c
++++ b/arch/unicore32/kernel/hibernate.c
+@@ -18,6 +18,7 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+
+ #include "mach/pm.h"
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index 1da25a5..3ba047c 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ :: "a" (eax), "c" (ecx));
+ }
+
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++ trace_hardirqs_on();
++ /* "mwait %eax, %ecx;" */
++ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
++ :: "a" (eax), "c" (ecx));
++}
++
+ /*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 3fb8d95..1a1ff42 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -28,6 +28,7 @@
+ #include <asm/fpu-internal.h>
+ #include <asm/debugreg.h>
+ #include <asm/nmi.h>
++#include <asm/mwait.h>
+
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -398,6 +399,52 @@ static void amd_e400_idle(void)
+ default_idle();
+ }
+
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++ if (c->x86_vendor != X86_VENDOR_INTEL)
++ return 0;
++
++ if (!cpu_has(c, X86_FEATURE_MWAIT))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++ if (!current_set_polling_and_test()) {
++ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
++ mb();
++ clflush((void *)&current_thread_info()->flags);
++ mb();
++ }
++
++ __monitor((void *)&current_thread_info()->flags, 0, 0);
++ if (!need_resched())
++ __sti_mwait(0, 0);
++ else
++ local_irq_enable();
++ } else
++ local_irq_enable();
++ current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -411,6 +458,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ /* E400: APIC timer interrupt does not wake up CPU from C1e */
+ pr_info("using AMD E400 aware idle routine\n");
+ x86_idle = amd_e400_idle;
++ } else if (prefer_mwait_c1_over_halt(c)) {
++ pr_info("using mwait in idle threads\n");
++ x86_idle = mwait_idle;
+ } else
+ x86_idle = default_idle;
+ }
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
+index 7d28c88..291226b 100644
+--- a/arch/x86/power/hibernate_32.c
++++ b/arch/x86/power/hibernate_32.c
+@@ -13,13 +13,11 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mmzone.h>
++#include <asm/sections.h>
+
+ /* Defined in hibernate_asm_32.S */
+ extern int restore_image(void);
+
+-/* References to section boundaries */
+-extern const void __nosave_begin, __nosave_end;
+-
+ /* Pointer to the temporary resume page tables */
+ pgd_t *resume_pg_dir;
+
+diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
+index 304fca2..2276238 100644
+--- a/arch/x86/power/hibernate_64.c
++++ b/arch/x86/power/hibernate_64.c
+@@ -17,11 +17,9 @@
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/mtrr.h>
++#include <asm/sections.h>
+ #include <asm/suspend.h>
+
+-/* References to section boundaries */
+-extern __visible const void __nosave_begin, __nosave_end;
+-
+ /* Defined in hibernate_asm_64.S */
+ extern asmlinkage int restore_image(void);
+
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index c87ae7c..8879361 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -336,6 +336,36 @@ menu "Executable file formats"
+
+ source "fs/Kconfig.binfmt"
+
++config XTFPGA_LCD
++ bool "Enable XTFPGA LCD driver"
++ depends on XTENSA_PLATFORM_XTFPGA
++ default n
++ help
++ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++ progress messages there during bootup/shutdown. It may be useful
++ during board bringup.
++
++ If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++ hex "XTFPGA LCD base address"
++ depends on XTFPGA_LCD
++ default "0x0d0c0000"
++ help
++ Base address of the LCD controller inside KIO region.
++ Different boards from XTFPGA family have LCD controller at different
++ addresses. Please consult prototyping user guide for your board for
++ the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++ bool "Use 8-bit access to XTFPGA LCD"
++ depends on XTFPGA_LCD
++ default n
++ help
++ LCD may be connected with 4- or 8-bit interface, 8-bit access may
++ only be used with 8-bit interface. Please consult prototyping user
++ guide for your board for the correct interface width.
++
+ endmenu
+
+ source "net/Kconfig"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index 50084f7..b54fa1b 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at 325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range 326
++#define __NR_sync_file_range2 326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open 327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index d05f8fe..17b1ef3 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
+ {
+ struct iss_net_private *lp = (struct iss_net_private *)priv;
+
+- spin_lock(&lp->lock);
+ iss_net_poll();
++ spin_lock(&lp->lock);
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+ spin_unlock(&lp->lock);
+ }
+@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
+ struct iss_net_private *lp = netdev_priv(dev);
+ int err;
+
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ err = lp->tp.open(lp);
+ if (err < 0)
+@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
+ while ((err = iss_net_rx(dev)) > 0)
+ ;
+
+- spin_lock(&opened_lock);
++ spin_unlock_bh(&lp->lock);
++ spin_lock_bh(&opened_lock);
+ list_add(&lp->opened_list, &opened);
+- spin_unlock(&opened_lock);
++ spin_unlock_bh(&opened_lock);
++ spin_lock_bh(&lp->lock);
+
+ init_timer(&lp->timer);
+ lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
+ mod_timer(&lp->timer, jiffies + lp->timer_val);
+
+ out:
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return err;
+ }
+
+@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+
+ spin_lock(&opened_lock);
+ list_del(&opened);
+@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
+
+ lp->tp.close(lp);
+
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct iss_net_private *lp = netdev_priv(dev);
+- unsigned long flags;
+ int len;
+
+ netif_stop_queue(dev);
+- spin_lock_irqsave(&lp->lock, flags);
++ spin_lock_bh(&lp->lock);
+
+ len = lp->tp.write(lp, &skb);
+
+@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ }
+
+- spin_unlock_irqrestore(&lp->lock, flags);
++ spin_unlock_bh(&lp->lock);
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+- spin_lock(&lp->lock);
++ spin_lock_bh(&lp->lock);
+ memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+- spin_unlock(&lp->lock);
++ spin_unlock_bh(&lp->lock);
+ return 0;
+ }
+
+@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
+ *lp = (struct iss_net_private) {
+ .device_list = LIST_HEAD_INIT(lp->device_list),
+ .opened_list = LIST_HEAD_INIT(lp->opened_list),
+- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
+ .dev = dev,
+ .index = index,
+- };
++ };
+
++ spin_lock_init(&lp->lock);
+ /*
+ * If this name ends up conflicting with an existing registered
+ * netdevice, that is OK, register_netdev{,ice}() will notice this
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206..7839d38 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+
+-obj-y = setup.o lcd.o
++obj-y += setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index aeb316b..e8cc86f 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -40,9 +40,6 @@
+
+ /* UART */
+ #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
+
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e43564..4c8541e 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301..4dc0c1b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+ */
+
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+
+-#define LCD_PAUSE_ITERATIONS 4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR 0x1
+ #define LCD_DISPLAY_ON 0xc
+
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT 0x38
++#define LCD_DISPLAY_MODE4BIT 0x28
+ #define LCD_DISPLAY_POS 0x80
+ #define LCD_SHIFT_LEFT 0x18
+ #define LCD_SHIFT_RIGHT 0x1c
+
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*addr) = data;
++#else
++ ACCESS_ONCE(*addr) = data & 0xf0;
++ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ mdelay(5);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(200);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++ udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++ udelay(50);
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ udelay(50);
+- *LCD_INSTR_ADDR = LCD_CLEAR;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ mdelay(10);
+ lcd_disp_at_pos("XTENSA LINUX", 0);
+ return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ udelay(100);
+ while (*str != 0) {
+- *LCD_DATA_ADDR = *str;
++ lcd_put_byte(LCD_DATA_ADDR, *str);
+ udelay(200);
+ str++;
+ }
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+
+ void lcd_shiftleft(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ udelay(50);
+ }
+
+ void lcd_shiftright(void)
+ {
+- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ udelay(50);
+ }
+
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 666beea..9498c3d 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -192,7 +192,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+ struct acpi_device_physical_node *pn;
+ bool offline = true;
+
+- mutex_lock(&adev->physical_node_lock);
++ /*
++ * acpi_container_offline() calls this for all of the container's
++ * children under the container's physical_node_lock lock.
++ */
++ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
+
+ list_for_each_entry(pn, &adev->physical_node_list, node)
+ if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 45d0fa7..12b39dc 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
+ goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+- goto out_groups;
++ goto out_id;
+ error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+- goto out_id;
++ goto out_groups;
+ error = sysfs_create_link(&dev->kobj,
+ &dev->bus->p->subsys.kobj, "subsystem");
+ if (error)
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 5afe556..26b03e1 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -64,6 +64,7 @@ static const struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0489, 0xE027) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x04F2, 0xAFF1) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 03b3317..9eb1669 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -142,6 +142,7 @@ static const struct usb_device_id blacklist_table[] = {
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 0996a3a..a9dd21a 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -257,7 +257,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
+ mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+- if (rcg->mnd_width && f->n)
++ if (rcg->mnd_width && f->n && (f->m != f->n))
+ cfg |= CFG_MODE_DUAL_EDGE;
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, mask,
+ cfg);
+diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
+index c0a7d77..a90af17 100644
+--- a/drivers/clk/tegra/clk.c
++++ b/drivers/clk/tegra/clk.c
+@@ -266,7 +266,7 @@ void __init tegra_add_of_provider(struct device_node *np)
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+ rst_ctlr.of_node = np;
+- rst_ctlr.nr_resets = clk_num * 32;
++ rst_ctlr.nr_resets = periph_banks * 32;
+ reset_controller_register(&rst_ctlr);
+ }
+
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index dde41f1d..d522396 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ return err;
+ }
+
+-static int omap_aes_check_aligned(struct scatterlist *sg)
++static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++ int len = 0;
++
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, 4))
+ return -1;
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return -1;
++
++ len += sg->length;
+ sg = sg_next(sg);
+ }
++
++ if (len != total)
++ return -1;
++
+ return 0;
+ }
+
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+- if (omap_aes_check_aligned(dd->in_sg) ||
+- omap_aes_check_aligned(dd->out_sg)) {
++ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++ omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ if (omap_aes_copy_sgs(dd))
+ pr_err("Failed to copy SGs for unaligned cases\n");
+ dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 3b1fd1c..e9d8cf6 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -304,11 +304,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -316,11 +318,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -328,11 +332,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache &= ~mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv &= ~mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -340,11 +346,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
++ struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+- gc->mask_cache |= mask;
+- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++ ct->mask_cache_priv |= mask;
++ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 8f42bd7..f1fc14c 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1928,15 +1928,15 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
+ * that the GPIO was actually requested.
+ */
+
+-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
++static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
+ {
+ struct gpio_chip *chip;
+- int value;
++ bool value;
+ int offset;
+
+ chip = desc->chip;
+ offset = gpio_chip_hwgpio(desc);
+- value = chip->get ? chip->get(chip, offset) : 0;
++ value = chip->get ? chip->get(chip, offset) : false;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
+ return value;
+ }
+@@ -1992,7 +1992,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
+ * @desc: gpio descriptor whose state need to be set.
+ * @value: Non-zero for setting it HIGH otherise it will set to LOW.
+ */
+-static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
++static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
+ {
+ int err = 0;
+ struct gpio_chip *chip = desc->chip;
+@@ -2019,7 +2019,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, int value)
+ * @desc: gpio descriptor whose state need to be set.
+ * @value: Non-zero for setting it HIGH otherise it will set to LOW.
+ */
+-static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
++static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
+ {
+ int err = 0;
+ struct gpio_chip *chip = desc->chip;
+@@ -2041,7 +2041,7 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, int value)
+ __func__, err);
+ }
+
+-static void _gpiod_set_raw_value(struct gpio_desc *desc, int value)
++static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
+ {
+ struct gpio_chip *chip;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 0c83b3d..5b38bf8 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1181,6 +1181,7 @@
+ #define GMBUS_CYCLE_INDEX (2<<25)
+ #define GMBUS_CYCLE_STOP (4<<25)
+ #define GMBUS_BYTE_COUNT_SHIFT 16
++#define GMBUS_BYTE_COUNT_MAX 256U
+ #define GMBUS_SLAVE_INDEX_SHIFT 8
+ #define GMBUS_SLAVE_ADDR_SHIFT 1
+ #define GMBUS_SLAVE_READ (1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index d33b61d..1d02970 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -324,18 +324,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+- u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len,
++ u32 gmbus1_index)
+ {
+ int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+- u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -357,11 +356,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++ u32 gmbus1_index)
+ {
+- int reg_offset = dev_priv->gpio_mmio_base;
+- u16 len = msg->len;
+ u8 *buf = msg->buf;
++ unsigned int rx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++ buf, len, gmbus1_index);
++ if (ret)
++ return ret;
++
++ rx_size -= len;
++ buf += len;
++ } while (rx_size != 0);
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++ unsigned short addr, u8 *buf, unsigned int len)
++{
++ int reg_offset = dev_priv->gpio_mmio_base;
++ unsigned int chunk_size = len;
+ u32 val, loop;
+
+ val = loop = 0;
+@@ -373,8 +396,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+@@ -391,6 +414,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ if (ret)
+ return ret;
+ }
++
++ return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++ u8 *buf = msg->buf;
++ unsigned int tx_size = msg->len;
++ unsigned int len;
++ int ret;
++
++ do {
++ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++ if (ret)
++ return ret;
++
++ buf += len;
++ tx_size -= len;
++ } while (tx_size != 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+index 461df93..4f32b34 100644
+--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+@@ -35,8 +35,6 @@
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+-static struct platform_device *a3xx_pdev;
+-
+ static void a3xx_me_init(struct msm_gpu *gpu)
+ {
+ struct msm_ringbuffer *ring = gpu->rb;
+@@ -311,7 +309,6 @@ static void a3xx_destroy(struct msm_gpu *gpu)
+ ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
+ #endif
+
+- put_device(&a3xx_gpu->pdev->dev);
+ kfree(a3xx_gpu);
+ }
+
+@@ -439,7 +436,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+- struct platform_device *pdev = a3xx_pdev;
++ struct msm_drm_private *priv = dev->dev_private;
++ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config;
+ int ret;
+
+@@ -460,7 +458,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+- get_device(&pdev->dev);
+ a3xx_gpu->pdev = pdev;
+
+ gpu->fast_rate = config->fast_rate;
+@@ -522,17 +519,24 @@ fail:
+ # include <mach/kgsl.h>
+ #endif
+
+-static int a3xx_probe(struct platform_device *pdev)
++static void set_gpu_pdev(struct drm_device *dev,
++ struct platform_device *pdev)
++{
++ struct msm_drm_private *priv = dev->dev_private;
++ priv->gpu_pdev = pdev;
++}
++
++static int a3xx_bind(struct device *dev, struct device *master, void *data)
+ {
+ static struct adreno_platform_config config = {};
+ #ifdef CONFIG_OF
+- struct device_node *child, *node = pdev->dev.of_node;
++ struct device_node *child, *node = dev->of_node;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(node, "qcom,chipid", &val);
+ if (ret) {
+- dev_err(&pdev->dev, "could not find chipid: %d\n", ret);
++ dev_err(dev, "could not find chipid: %d\n", ret);
+ return ret;
+ }
+
+@@ -548,7 +552,7 @@ static int a3xx_probe(struct platform_device *pdev)
+ for_each_child_of_node(child, pwrlvl) {
+ ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
+ if (ret) {
+- dev_err(&pdev->dev, "could not find gpu-freq: %d\n", ret);
++ dev_err(dev, "could not find gpu-freq: %d\n", ret);
+ return ret;
+ }
+ config.fast_rate = max(config.fast_rate, val);
+@@ -558,12 +562,12 @@ static int a3xx_probe(struct platform_device *pdev)
+ }
+
+ if (!config.fast_rate) {
+- dev_err(&pdev->dev, "could not find clk rates\n");
++ dev_err(dev, "could not find clk rates\n");
+ return -ENXIO;
+ }
+
+ #else
+- struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
++ struct kgsl_device_platform_data *pdata = dev->platform_data;
+ uint32_t version = socinfo_get_version();
+ if (cpu_is_apq8064ab()) {
+ config.fast_rate = 450000000;
+@@ -609,14 +613,30 @@ static int a3xx_probe(struct platform_device *pdev)
+ config.bus_scale_table = pdata->bus_scale_table;
+ # endif
+ #endif
+- pdev->dev.platform_data = &config;
+- a3xx_pdev = pdev;
++ dev->platform_data = &config;
++ set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+ }
+
++static void a3xx_unbind(struct device *dev, struct device *master,
++ void *data)
++{
++ set_gpu_pdev(dev_get_drvdata(master), NULL);
++}
++
++static const struct component_ops a3xx_ops = {
++ .bind = a3xx_bind,
++ .unbind = a3xx_unbind,
++};
++
++static int a3xx_probe(struct platform_device *pdev)
++{
++ return component_add(&pdev->dev, &a3xx_ops);
++}
++
+ static int a3xx_remove(struct platform_device *pdev)
+ {
+- a3xx_pdev = NULL;
++ component_del(&pdev->dev, &a3xx_ops);
+ return 0;
+ }
+
+@@ -624,7 +644,6 @@ static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+ };
+-MODULE_DEVICE_TABLE(of, dt_match);
+
+ static struct platform_driver a3xx_driver = {
+ .probe = a3xx_probe,
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index 6f1588a..8a04a1d 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -17,8 +17,6 @@
+
+ #include "hdmi.h"
+
+-static struct platform_device *hdmi_pdev;
+-
+ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+ {
+ uint32_t ctrl = 0;
+@@ -75,7 +73,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+ {
+ struct hdmi *hdmi = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+- struct platform_device *pdev = hdmi_pdev;
++ struct platform_device *pdev = priv->hdmi_pdev;
+ struct hdmi_platform_config *config;
+ int i, ret;
+
+@@ -95,8 +93,6 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+
+ kref_init(&hdmi->refcount);
+
+- get_device(&pdev->dev);
+-
+ hdmi->dev = dev;
+ hdmi->pdev = pdev;
+ hdmi->config = config;
+@@ -249,17 +245,24 @@ fail:
+
+ #include <linux/of_gpio.h>
+
+-static int hdmi_dev_probe(struct platform_device *pdev)
++static void set_hdmi_pdev(struct drm_device *dev,
++ struct platform_device *pdev)
++{
++ struct msm_drm_private *priv = dev->dev_private;
++ priv->hdmi_pdev = pdev;
++}
++
++static int hdmi_bind(struct device *dev, struct device *master, void *data)
+ {
+ static struct hdmi_platform_config config = {};
+ #ifdef CONFIG_OF
+- struct device_node *of_node = pdev->dev.of_node;
++ struct device_node *of_node = dev->of_node;
+
+ int get_gpio(const char *name)
+ {
+ int gpio = of_get_named_gpio(of_node, name, 0);
+ if (gpio < 0) {
+- dev_err(&pdev->dev, "failed to get gpio: %s (%d)\n",
++ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
+@@ -336,14 +339,30 @@ static int hdmi_dev_probe(struct platform_device *pdev)
+ config.mux_sel_gpio = -1;
+ }
+ #endif
+- pdev->dev.platform_data = &config;
+- hdmi_pdev = pdev;
++ dev->platform_data = &config;
++ set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ return 0;
+ }
+
++static void hdmi_unbind(struct device *dev, struct device *master,
++ void *data)
++{
++ set_hdmi_pdev(dev_get_drvdata(master), NULL);
++}
++
++static const struct component_ops hdmi_ops = {
++ .bind = hdmi_bind,
++ .unbind = hdmi_unbind,
++};
++
++static int hdmi_dev_probe(struct platform_device *pdev)
++{
++ return component_add(&pdev->dev, &hdmi_ops);
++}
++
+ static int hdmi_dev_remove(struct platform_device *pdev)
+ {
+- hdmi_pdev = NULL;
++ component_del(&pdev->dev, &hdmi_ops);
+ return 0;
+ }
+
+@@ -351,7 +370,6 @@ static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx" },
+ {}
+ };
+-MODULE_DEVICE_TABLE(of, dt_match);
+
+ static struct platform_driver hdmi_driver = {
+ .probe = hdmi_dev_probe,
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index e6adafc..e79cfd0 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -56,6 +56,10 @@ static char *vram;
+ MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
+ module_param(vram, charp, 0);
+
++/*
++ * Util/helpers:
++ */
++
+ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+ {
+@@ -143,6 +147,8 @@ static int msm_unload(struct drm_device *dev)
+ priv->vram.paddr, &attrs);
+ }
+
++ component_unbind_all(dev->dev, dev);
++
+ dev->dev_private = NULL;
+
+ kfree(priv);
+@@ -175,6 +181,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ struct msm_kms *kms;
+ int ret;
+
++
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+@@ -226,6 +233,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
++ platform_set_drvdata(pdev, dev);
++
++ /* Bind all our sub-components: */
++ ret = component_bind_all(dev->dev, dev);
++ if (ret)
++ return ret;
++
+ switch (get_mdp_ver(pdev)) {
+ case 4:
+ kms = mdp4_kms_init(dev);
+@@ -281,8 +295,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
+ goto fail;
+ }
+
+- platform_set_drvdata(pdev, dev);
+-
+ #ifdef CONFIG_DRM_MSM_FBDEV
+ priv->fbdev = msm_fbdev_init(dev);
+ #endif
+@@ -819,18 +831,110 @@ static const struct dev_pm_ops msm_pm_ops = {
+ };
+
+ /*
++ * Componentized driver support:
++ */
++
++#ifdef CONFIG_OF
++/* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
++ * (or probably any other).. so probably some room for some helpers
++ */
++static int compare_of(struct device *dev, void *data)
++{
++ return dev->of_node == data;
++}
++
++static int msm_drm_add_components(struct device *master, struct master *m)
++{
++ struct device_node *np = master->of_node;
++ unsigned i;
++ int ret;
++
++ for (i = 0; ; i++) {
++ struct device_node *node;
++
++ node = of_parse_phandle(np, "connectors", i);
++ if (!node)
++ break;
++
++ ret = component_master_add_child(m, compare_of, node);
++ of_node_put(node);
++
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++#else
++static int compare_dev(struct device *dev, void *data)
++{
++ return dev == data;
++}
++
++static int msm_drm_add_components(struct device *master, struct master *m)
++{
++ /* For non-DT case, it kinda sucks. We don't actually have a way
++ * to know whether or not we are waiting for certain devices (or if
++ * they are simply not present). But for non-DT we only need to
++ * care about apq8064/apq8060/etc (all mdp4/a3xx):
++ */
++ static const char *devnames[] = {
++ "hdmi_msm.0", "kgsl-3d0.0",
++ };
++ int i;
++
++ DBG("Adding components..");
++
++ for (i = 0; i < ARRAY_SIZE(devnames); i++) {
++ struct device *dev;
++ int ret;
++
++ dev = bus_find_device_by_name(&platform_bus_type,
++ NULL, devnames[i]);
++ if (!dev) {
++ dev_info(master, "still waiting for %s\n", devnames[i]);
++ return -EPROBE_DEFER;
++ }
++
++ ret = component_master_add_child(m, compare_dev, dev);
++ if (ret) {
++ DBG("could not add child: %d", ret);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++#endif
++
++static int msm_drm_bind(struct device *dev)
++{
++ return drm_platform_init(&msm_driver, to_platform_device(dev));
++}
++
++static void msm_drm_unbind(struct device *dev)
++{
++ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
++}
++
++static const struct component_master_ops msm_drm_ops = {
++ .add_components = msm_drm_add_components,
++ .bind = msm_drm_bind,
++ .unbind = msm_drm_unbind,
++};
++
++/*
+ * Platform driver:
+ */
+
+ static int msm_pdev_probe(struct platform_device *pdev)
+ {
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+- return drm_platform_init(&msm_driver, pdev);
++ return component_master_add(&pdev->dev, &msm_drm_ops);
+ }
+
+ static int msm_pdev_remove(struct platform_device *pdev)
+ {
+- drm_put_dev(platform_get_drvdata(pdev));
++ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
+index 3d63269..9d10ee0 100644
+--- a/drivers/gpu/drm/msm/msm_drv.h
++++ b/drivers/gpu/drm/msm/msm_drv.h
+@@ -22,6 +22,7 @@
+ #include <linux/clk.h>
+ #include <linux/cpufreq.h>
+ #include <linux/module.h>
++#include <linux/component.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -69,6 +70,9 @@ struct msm_drm_private {
+
+ struct msm_kms *kms;
+
++ /* subordinate devices, if present: */
++ struct platform_device *hdmi_pdev, *gpu_pdev;
++
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+ struct msm_file_private *lastctx;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 663394f..0db3e20 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index e99e71a..356f22f 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -134,7 +134,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -150,7 +150,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto error0;
++ goto error_gpadl;
+ }
+
+ if (userdatalen)
+@@ -194,6 +194,9 @@ error1:
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
++error_gpadl:
++ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index 5fb80b8..43fe15a 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -217,6 +217,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ adap->bus_recovery_info->set_scl(adap, 1);
+ return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -231,6 +232,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -240,6 +242,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+
+ static int i2c_device_probe(struct device *dev)
+ {
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 055ebeb..c1fef27 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -94,12 +94,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ if (dmasync)
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+
++ if (!size)
++ return ERR_PTR(-EINVAL);
++
+ /*
+ * If the combination of the addr and size requested for this memory
+ * region causes an integer overflow, return error.
+ */
+- if ((PAGE_ALIGN(addr + size) <= size) ||
+- (PAGE_ALIGN(addr + size) <= addr))
++ if (((addr + size) < addr) ||
++ PAGE_ALIGN(addr + size) < (addr + size))
+ return ERR_PTR(-EINVAL);
+
+ if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index d8f4d1f..8d7cd98 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2274,8 +2274,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+
+ memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+- wr->wr.ud.hlen);
++ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ *lso_seg_len = halign;
+ return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0b75b57..cfc5a2e 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -814,6 +814,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++ unsigned int rate)
++{
++ struct elantech_data *etd = psmouse->private;
++
++ etd->original_set_rate(psmouse, rate);
++ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++ psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+ * Put the touchpad into absolute mode
+ */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1015,6 +1030,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
+ * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
+ * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
++ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
++ * Asus X750JN 0x381f17 10, 14, 0e clickpad
+ * Asus UX31 0x361f00 20, 15, 0e clickpad
+ * Asus UX32VD 0x361f02 00, 15, 0e clickpad
+ * Avatar AVIU-145A2 0x361f00 ? clickpad
+@@ -1490,6 +1507,11 @@ int elantech_init(struct psmouse *psmouse)
+ goto init_fail;
+ }
+
++ if (etd->fw_version == 0x381f17) {
++ etd->original_set_rate = psmouse->set_rate;
++ psmouse->set_rate = elantech_set_rate_restore_reg_07;
++ }
++
+ if (elantech_set_input_params(psmouse)) {
+ psmouse_err(psmouse, "failed to query touchpad range.\n");
+ goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 9e0e2a1..59263a3 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -139,6 +139,7 @@ struct elantech_data {
+ struct finger_pos mt[ETP_MAX_FINGERS];
+ unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 4a8d19d..5a4cda2 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -915,11 +915,10 @@ static int crypt_convert(struct crypt_config *cc,
+
+ switch (r) {
+ /* async */
++ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&ctx->restart);
+ reinit_completion(&ctx->restart);
+- /* fall through*/
+- case -EINPROGRESS:
+ ctx->req = NULL;
+ ctx->cc_sector++;
+ continue;
+@@ -1314,10 +1313,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ struct crypt_config *cc = io->cc;
+
+- if (error == -EINPROGRESS) {
+- complete(&ctx->restart);
++ if (error == -EINPROGRESS)
+ return;
+- }
+
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1328,12 +1325,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+
+ if (!atomic_dec_and_test(&ctx->cc_pending))
+- return;
++ goto done;
+
+ if (bio_data_dir(io->base_bio) == READ)
+ kcryptd_crypt_read_done(io);
+ else
+ kcryptd_crypt_write_io_submit(io, 1);
++done:
++ if (!completion_done(&ctx->restart))
++ complete(&ctx->restart);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 407a99e..683e685 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -320,7 +320,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
+
+ /*
+ * remaps the bio to the target device. we separate two flows.
+- * power 2 flow and a general flow for the sake of perfromance
++ * power 2 flow and a general flow for the sake of performance
+ */
+ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+@@ -538,6 +538,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ split = bio;
+ }
+
++ sector = bio->bi_iter.bi_sector;
+ zone = find_zone(mddev->private, &sector);
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index c45c988..4572530 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ if (mutex_lock_interruptible(&dev->v4l_lock))
+ return -ERESTARTSYS;
+
++ /*
++ * Once URBs are cancelled, the URB complete handler
++ * won't be running. This is required to safely release the
++ * current buffer (dev->isoc_ctl.buf).
++ */
+ stk1160_cancel_isoc(dev);
+
+ /*
+@@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
+- /* It's important to clear current buffer */
+- dev->isoc_ctl.buf = NULL;
++
++ /* It's important to release the current buffer */
++ if (dev->isoc_ctl.buf) {
++ buf = dev->isoc_ctl.buf;
++ dev->isoc_ctl.buf = NULL;
++
++ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++ stk1160_info("buffer [%p/%d] aborted\n",
++ buf, buf->vb.v4l2_buf.index);
++ }
+ spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index fc145d2..922a750 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+
+ if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ if (msb->data_dir == READ) {
+- for (cnt = 0; cnt < msb->current_seg; cnt++)
++ for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ t_len += msb->req_sg[cnt].length
+ / msb->page_size;
+
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ t_len += msb->current_page - 1;
+
+ t_len *= msb->page_size;
++ }
+ }
+ } else
+ t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index 6f27d9a..21841fe 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+- bitflips = !!err;
++ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 8ca49f2..4cbbd55 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -451,7 +451,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++ req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ break;
+
+ err = get_exclusive(desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 0e11671d..930cf2c 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+- vol->eba_tbl[aeb->lnum] = aeb->pnum;
++ else
++ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 68b924e..c6b0b07 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -995,7 +995,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int cancel)
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+- int vol_id = -1, uninitialized_var(lnum);
++ int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 46e6544..b655fe4 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++ struct e1000_rx_ring *rx_ring,
++ int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count);
+@@ -3531,8 +3536,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+- if (netif_running(netdev))
++ if (netif_running(netdev)) {
++ /* prevent buffers from being reallocated */
++ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ e1000_down(adapter);
++ }
+
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index f583167..66c92a1 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -314,6 +314,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -370,6 +371,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index 7f1669c..779dc2b 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index f7381dd..1bce432 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+
+ #include "wlcore.h"
+
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+- loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++ loff_t *ppos, char *fmt, ...);
+
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 7297df2..2d9d198 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1037,7 +1037,9 @@ static int compal_probe(struct platform_device *pdev)
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+- power_supply_register(&compal_device->dev, &data->psy);
++ err = power_supply_register(&compal_device->dev, &data->psy);
++ if (err < 0)
++ goto remove;
+
+ platform_set_drvdata(pdev, data);
+
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index ed49b50..72da2a6 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ pchg->battery.get_property = lp8788_battery_get_property;
+
+- if (power_supply_register(&pdev->dev, &pchg->battery))
++ if (power_supply_register(&pdev->dev, &pchg->battery)) {
++ power_supply_unregister(&pchg->charger);
+ return -EPERM;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a..cf90760 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ struct twl4030_madc_battery *twl4030_madc_bat;
+ struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++ int ret = 0;
+
+ twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+
+ twl4030_madc_bat->pdata = pdata;
+ platform_set_drvdata(pdev, twl4030_madc_bat);
+- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++ if (ret < 0)
++ kfree(twl4030_madc_bat);
+
+- return 0;
++ return ret;
+ }
+
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
+index 65180e1..50c75e1 100644
+--- a/drivers/scsi/bfa/bfa_ioc.c
++++ b/drivers/scsi/bfa/bfa_ioc.c
+@@ -7006,7 +7006,7 @@ bfa_flash_sem_get(void __iomem *bar)
+ while (!bfa_raw_sem_get(bar)) {
+ if (--n <= 0)
+ return BFA_STATUS_BADFLASH;
+- udelay(10000);
++ mdelay(10);
+ }
+ return BFA_STATUS_OK;
+ }
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 6c1f223..4c0b8b4 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ struct mvs_task_exec_info *tei)
+ {
+- struct sas_ha_struct *sha = mvi->sas;
+ struct sas_task *task = tei->task;
+ struct domain_device *dev = task->dev;
+ struct mvs_device *mvi_dev = dev->lldd_dev;
+ struct mvs_cmd_hdr *hdr = tei->hdr;
+ struct asd_sas_port *sas_port = dev->port;
+- struct sas_phy *sphy = dev->phy;
+- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ struct mvs_slot_info *slot;
+ void *buf_prd;
+ u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ slot->tx = mvi->tx_prod;
+ del_q = TXQ_MODE_I | tag |
+ (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
++ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 86b0515..97892f2 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -739,21 +739,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ if (bounce_sgl[j].length == PAGE_SIZE) {
+ /* full..move to next entry */
+ sg_kunmap_atomic(bounce_addr);
++ bounce_addr = 0;
+ j++;
++ }
+
+- /* if we need to use another bounce buffer */
+- if (srclen || i != orig_sgl_count - 1)
+- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++ /* if we need to use another bounce buffer */
++ if (srclen && bounce_addr == 0)
++ bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+
+- } else if (srclen == 0 && i == orig_sgl_count - 1) {
+- /* unmap the last bounce that is < PAGE_SIZE */
+- sg_kunmap_atomic(bounce_addr);
+- }
+ }
+
+ sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ }
+
++ if (bounce_addr)
++ sg_kunmap_atomic(bounce_addr);
++
+ local_irq_restore(flags);
+
+ return total_copied;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index d7c6e36..2fe5b61 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -243,7 +243,10 @@ static int spidev_message(struct spidev_data *spidev,
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+- if (total > bufsiz) {
++ /* Check total length of transfers. Also check each
++ * transfer length to avoid arithmetic overflow.
++ */
++ if (total > bufsiz || k_tmp->len > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 41eff7d..b199f1e 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -263,40 +263,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ struct se_device *se_dev = cmd->se_dev;
+ struct fd_dev *dev = FD_DEV(se_dev);
+ struct file *prot_fd = dev->fd_prot_file;
+- struct scatterlist *sg;
+ loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ unsigned char *buf;
+- u32 prot_size, len, size;
+- int rc, ret = 1, i;
++ u32 prot_size;
++ int rc, ret = 1;
+
+ prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ se_dev->prot_length;
+
+ if (!is_write) {
+- fd_prot->prot_buf = vzalloc(prot_size);
++ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
+ if (!fd_prot->prot_buf) {
+ pr_err("Unable to allocate fd_prot->prot_buf\n");
+ return -ENOMEM;
+ }
+ buf = fd_prot->prot_buf;
+
+- fd_prot->prot_sg_nents = cmd->t_prot_nents;
+- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+- fd_prot->prot_sg_nents, GFP_KERNEL);
++ fd_prot->prot_sg_nents = 1;
++ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
++ GFP_KERNEL);
+ if (!fd_prot->prot_sg) {
+ pr_err("Unable to allocate fd_prot->prot_sg\n");
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ return -ENOMEM;
+ }
+- size = prot_size;
+-
+- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+-
+- len = min_t(u32, PAGE_SIZE, size);
+- sg_set_buf(sg, buf, len);
+- size -= len;
+- buf += len;
+- }
++ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
++ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
+ }
+
+ if (is_write) {
+@@ -317,7 +309,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+
+ if (is_write || ret < 0) {
+ kfree(fd_prot->prot_sg);
+- vfree(fd_prot->prot_buf);
++ kfree(fd_prot->prot_buf);
+ }
+
+ return ret;
+@@ -652,11 +644,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ }
+ } else {
+ memset(&fd_prot, 0, sizeof(struct fd_prot));
+@@ -672,7 +664,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 0, fd_prot.prot_sg, 0);
+ if (rc) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return rc;
+ }
+ }
+@@ -703,7 +695,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ if (ret < 0) {
+ kfree(fd_prot.prot_sg);
+- vfree(fd_prot.prot_buf);
++ kfree(fd_prot.prot_buf);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 68511e8..f89b24a 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -314,7 +314,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ return 0;
+ }
+
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+@@ -378,7 +378,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ cmd->data_direction);
+ }
+
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+
+@@ -401,7 +401,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ return TCM_NO_SENSE;
+ }
+
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *write_sg = NULL, *sg;
+@@ -416,11 +416,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+- * which will not have taken ->caw_mutex yet..
++ * which will not have taken ->caw_sem yet..
+ */
+- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ return TCM_NO_SENSE;
+ /*
++ * Handle special case for zero-length COMPARE_AND_WRITE
++ */
++ if (!cmd->data_length)
++ goto out;
++ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9e54c0f..6fc3890 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1600,11 +1600,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ transport_complete_task_attr(cmd);
+ /*
+ * Handle special case for COMPARE_AND_WRITE failure, where the
+- * callback is expected to drop the per device ->caw_mutex.
++ * callback is expected to drop the per device ->caw_sem.
+ */
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->transport_complete_callback)
+- cmd->transport_complete_callback(cmd);
++ cmd->transport_complete_callback(cmd, false);
+
+ switch (sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+@@ -1941,8 +1941,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+
+- rc = cmd->transport_complete_callback(cmd);
++ rc = cmd->transport_complete_callback(cmd, true);
+ if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ !cmd->data_length)
++ goto queue_rsp;
++
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+@@ -1956,6 +1960,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ }
+ }
+
++queue_rsp:
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2044,6 +2049,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++ /*
++ * Release special case READ buffer payload required for
++ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++ */
++ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++ transport_free_sgl(cmd->t_bidi_data_sg,
++ cmd->t_bidi_data_nents);
++ cmd->t_bidi_data_sg = NULL;
++ cmd->t_bidi_data_nents = 0;
++ }
+ transport_reset_sgl_orig(cmd);
+ return;
+ }
+@@ -2192,6 +2207,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ int ret = 0;
++ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ /*
+ * Determine is the TCM fabric module has already allocated physical
+@@ -2200,7 +2216,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ */
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2223,6 +2238,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ cmd->data_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++ cmd->data_length) {
++ /*
++ * Special case for COMPARE_AND_WRITE with fabrics
++ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++ */
++ u32 caw_length = cmd->t_task_nolb *
++ cmd->se_dev->dev_attrib.block_size;
++
++ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++ &cmd->t_bidi_data_nents,
++ caw_length, zero_flag);
++ if (ret < 0)
++ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * If this command is not a write we can execute it right here,
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index a051a7a..a81f9dd 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
+ case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ dev_dbg(&desc->intf->dev,
+ "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+- dr->wIndex, dr->wLength);
++ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ break;
+
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
+ clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ dev_err(&desc->intf->dev,
+ "unknown notification %d received: index %d len %d\n",
+- dr->bNotificationType, dr->wIndex, dr->wLength);
++ dr->bNotificationType,
++ le16_to_cpu(dr->wIndex),
++ le16_to_cpu(dr->wLength));
+ goto exit;
+ }
+
+@@ -408,7 +410,7 @@ static ssize_t wdm_write
+ USB_RECIP_INTERFACE);
+ req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ req->wValue = 0;
+- req->wIndex = desc->inum;
++ req->wIndex = desc->inum; /* already converted */
+ req->wLength = cpu_to_le16(count);
+ set_bit(WDM_IN_USE, &desc->flags);
+ desc->outbuf = buf;
+@@ -422,7 +424,7 @@ static ssize_t wdm_write
+ rv = usb_translate_errors(rv);
+ } else {
+ dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+- req->wIndex);
++ le16_to_cpu(req->wIndex));
+ }
+ out:
+ usb_autopm_put_interface(desc->intf);
+@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ desc->irq->wValue = 0;
+- desc->irq->wIndex = desc->inum;
++ desc->irq->wIndex = desc->inum; /* already converted */
+ desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+
+ usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d2bd9d7..1847a7d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3289,10 +3289,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
+ port1, status);
+ } else {
+- /* drive resume for at least 20 msec */
++ /* drive resume for USB_RESUME_TIMEOUT msec */
+ dev_dbg(&udev->dev, "usb %sresume\n",
+ (PMSG_IS_AUTO(msg) ? "auto-" : ""));
+- msleep(25);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Virtual root hubs can trigger on GET_PORT_STATUS to
+ * stop resume signaling. Then finish the resume
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 4d918ed..0f99800 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1501,7 +1501,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
+ dev_dbg(hsotg->dev,
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ writel(0, hsotg->regs + PCGCTL);
+- usleep_range(20000, 40000);
++ msleep(USB_RESUME_TIMEOUT);
+
+ hprt0 = dwc2_read_hprt0(hsotg);
+ hprt0 |= HPRT0_RES;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index d742bed..82df926 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -528,7 +528,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
+ usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
++ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
+
+ /*
+ * The Superspeed USB Capability descriptor shall be implemented by all
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 98a89d1..8aa4ba0 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index ba94990..3e3926a 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+- /* resume signaling for 20 msec */
+ fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ fusbh200->reset_done[wIndex] = jiffies
+- + msecs_to_jiffies(20);
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 240e792..b62298f 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ spin_unlock_irq(&isp116x->lock);
+
+ hcd->state = HC_STATE_RESUMING;
+- msleep(20);
++ msleep(USB_RESUME_TIMEOUT);
+
+ /* Go operational */
+ spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index 110b4b9..f130bb2 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ rh->port &= ~USB_PORT_STAT_SUSPEND;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+- msleep(50);
++ msleep(USB_RESUME_TIMEOUT);
+ r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ }
+
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index a517151..0f53cc8 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1259,7 +1259,7 @@ sl811h_hub_control(
+ sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+
+ mod_timer(&sl811->timer, jiffies
+- + msecs_to_jiffies(20));
++ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case USB_PORT_FEAT_POWER:
+ port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 93e17b1..98c66d8 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ /* Port received a wakeup request */
+ set_bit(port, &uhci->resuming_ports);
+ uhci->ports_timeout = jiffies +
+- msecs_to_jiffies(25);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ usb_hcd_start_port_resume(
+ &uhci_to_hcd(uhci)->self, port);
+
+@@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ uhci_finish_suspend(uhci, port, port_addr);
+
+ /* USB v2.0 7.1.7.5 */
+- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++ uhci->ports_timeout = jiffies +
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ break;
+ case USB_PORT_FEAT_POWER:
+ /* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index a95eee8..05185b9 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1768,7 +1768,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ } else {
+ xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ bus_state->resume_done[faked_port_index] = jiffies +
+- msecs_to_jiffies(20);
++ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ set_bit(faked_port_index, &bus_state->resuming_ports);
+ mod_timer(&hcd->rh_timer,
+ bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 0180eef..964ebaf 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+- return res == match_data;
++ struct usb_phy **phy = res;
++
++ return *phy == match_data;
+ }
+
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index f4d7b2f..78f4608 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -751,6 +751,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ int elf_prot = 0, elf_flags;
+ unsigned long k, vaddr;
++ unsigned long total_size = 0;
+
+ if (elf_ppnt->p_type != PT_LOAD)
+ continue;
+@@ -815,10 +816,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++ total_size = total_mapping_size(elf_phdata,
++ loc->elf_ex.e_phnum);
++ if (!total_size) {
++ error = -EINVAL;
++ goto out_free_dentry;
++ }
+ }
+
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+- elf_prot, elf_flags, 0);
++ elf_prot, elf_flags, total_size);
+ if (BAD_ADDR(error)) {
+ send_sig(SIGKILL, current, 0);
+ retval = IS_ERR((void *)error) ?
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d2f1c01..794d7c6 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6645,12 +6645,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ return -ENOSPC;
+ }
+
+- if (btrfs_test_opt(root, DISCARD))
+- ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ if (pin)
+ pin_down_extent(root, cache, start, len, 1);
+ else {
++ if (btrfs_test_opt(root, DISCARD))
++ ret = btrfs_discard_extent(root, start, len, NULL);
+ btrfs_add_free_space(cache, start, len);
+ btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 0b72006..3e16042 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2708,6 +2708,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ if (src == dst)
+ return -EINVAL;
+
++ if (len == 0)
++ return 0;
++
+ btrfs_double_lock(src, loff, dst, dst_loff, len);
+
+ ret = extent_same_check_offsets(src, loff, len);
+@@ -3226,6 +3229,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ if (off + len == src->i_size)
+ len = ALIGN(src->i_size, bs) - off;
+
++ if (len == 0) {
++ ret = 0;
++ goto out_unlock;
++ }
++
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index ad8328d..488e987 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -324,22 +324,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+ * Check if the attribute is in a supported namespace.
+ *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+ * namespace.
+ */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+- return !strncmp(name, XATTR_SECURITY_PREFIX,
+- XATTR_SECURITY_PREFIX_LEN) ||
+- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
++ int len = strlen(name);
++ int prefixlen = 0;
++
++ if (!strncmp(name, XATTR_SECURITY_PREFIX,
++ XATTR_SECURITY_PREFIX_LEN))
++ prefixlen = XATTR_SECURITY_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++ prefixlen = XATTR_USER_PREFIX_LEN;
++ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
++ prefixlen = XATTR_BTRFS_PREFIX_LEN;
++ else
++ return -EOPNOTSUPP;
++
++ /*
++ * The name cannot consist of just prefix
++ */
++ if (len <= prefixlen)
++ return -EINVAL;
++
++ return 0;
+ }
+
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+ {
++ int ret;
++
+ /*
+ * If this is a request for a synthetic attribute in the system.*
+ * namespace use the generic infrastructure to resolve a handler
+@@ -348,8 +368,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_getxattr(dentry, name, buffer, size);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+ return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+
+@@ -357,6 +378,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -373,8 +395,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_setxattr(dentry, name, value, size, flags);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+@@ -390,6 +413,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++ int ret;
+
+ /*
+ * The permission on security.* and system.* is not checked
+@@ -406,8 +430,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return generic_removexattr(dentry, name);
+
+- if (!btrfs_is_valid_xattr(name))
+- return -EOPNOTSUPP;
++ ret = btrfs_is_valid_xattr(name);
++ if (ret)
++ return ret;
+
+ if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ return btrfs_set_prop(dentry->d_inode, name,
+diff --git a/fs/exec.c b/fs/exec.c
+index ea4449d..05f1942 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1268,6 +1268,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ spin_unlock(&p->fs->lock);
+ }
+
++static void bprm_fill_uid(struct linux_binprm *bprm)
++{
++ struct inode *inode;
++ unsigned int mode;
++ kuid_t uid;
++ kgid_t gid;
++
++ /* clear any previous set[ug]id data from a previous binary */
++ bprm->cred->euid = current_euid();
++ bprm->cred->egid = current_egid();
++
++ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++ return;
++
++ if (current->no_new_privs)
++ return;
++
++ inode = file_inode(bprm->file);
++ mode = ACCESS_ONCE(inode->i_mode);
++ if (!(mode & (S_ISUID|S_ISGID)))
++ return;
++
++ /* Be careful if suid/sgid is set */
++ mutex_lock(&inode->i_mutex);
++
++ /* reload atomically mode/uid/gid now that lock held */
++ mode = inode->i_mode;
++ uid = inode->i_uid;
++ gid = inode->i_gid;
++ mutex_unlock(&inode->i_mutex);
++
++ /* We ignore suid/sgid if there are no mappings for them in the ns */
++ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
++ !kgid_has_mapping(bprm->cred->user_ns, gid))
++ return;
++
++ if (mode & S_ISUID) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->euid = uid;
++ }
++
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++ bprm->cred->egid = gid;
++ }
++}
++
+ /*
+ * Fill the binprm structure from the inode.
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+@@ -1276,36 +1323,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ */
+ int prepare_binprm(struct linux_binprm *bprm)
+ {
+- struct inode *inode = file_inode(bprm->file);
+- umode_t mode = inode->i_mode;
+ int retval;
+
+-
+- /* clear any previous set[ug]id data from a previous binary */
+- bprm->cred->euid = current_euid();
+- bprm->cred->egid = current_egid();
+-
+- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+- !current->no_new_privs &&
+- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+- /* Set-uid? */
+- if (mode & S_ISUID) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->euid = inode->i_uid;
+- }
+-
+- /* Set-gid? */
+- /*
+- * If setgid is set but no group execute bit then this
+- * is a candidate for mandatory locking, not a setgid
+- * executable.
+- */
+- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+- bprm->per_clear |= PER_CLEAR_ON_SETID;
+- bprm->cred->egid = inode->i_gid;
+- }
+- }
++ bprm_fill_uid(bprm);
+
+ /* fill in binprm security blob */
+ retval = security_bprm_set_creds(bprm);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 2dcbfb6..bc7e37b 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1869,7 +1869,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct inode *dir = dentry->d_parent->d_inode;
+- struct buffer_head *bh;
++ struct buffer_head *bh = NULL;
+ struct ext4_dir_entry_2 *de;
+ struct ext4_dir_entry_tail *t;
+ struct super_block *sb;
+@@ -1893,14 +1893,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return retval;
+ if (retval == 1) {
+ retval = 0;
+- return retval;
++ goto out;
+ }
+ }
+
+ if (is_dx(dir)) {
+ retval = ext4_dx_add_entry(handle, dentry, inode);
+ if (!retval || (retval != ERR_BAD_DX_DIR))
+- return retval;
++ goto out;
+ ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ dx_fallback++;
+ ext4_mark_inode_dirty(handle, dir);
+@@ -1912,14 +1912,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ return PTR_ERR(bh);
+
+ retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+- if (retval != -ENOSPC) {
+- brelse(bh);
+- return retval;
+- }
++ if (retval != -ENOSPC)
++ goto out;
+
+ if (blocks == 1 && !dx_fallback &&
+- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+- return make_indexed_dir(handle, dentry, inode, bh);
++ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++ retval = make_indexed_dir(handle, dentry, inode, bh);
++ bh = NULL; /* make_indexed_dir releases bh */
++ goto out;
++ }
+ brelse(bh);
+ }
+ bh = ext4_append(handle, dir, &block);
+@@ -1935,6 +1936,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ }
+
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ brelse(bh);
+ if (retval == 0)
+ ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/namei.c b/fs/namei.c
+index 0dd72c8..ccb8000 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1545,7 +1545,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+
+ if (should_follow_link(path->dentry, follow)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ err = -ECHILD;
+ goto out_err;
+ }
+@@ -2992,7 +2993,8 @@ finish_lookup:
+
+ if (should_follow_link(path->dentry, !symlink_ok)) {
+ if (nd->flags & LOOKUP_RCU) {
+- if (unlikely(unlazy_walk(nd, path->dentry))) {
++ if (unlikely(nd->path.mnt != path->mnt ||
++ unlazy_walk(nd, path->dentry))) {
+ error = -ECHILD;
+ goto out;
+ }
+diff --git a/fs/open.c b/fs/open.c
+index 2ed7325..17679f2 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -539,6 +539,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+
++retry_deleg:
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ if (!uid_valid(uid))
+@@ -555,7 +556,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+-retry_deleg:
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chown(path, uid, gid);
+ if (!error)
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 68a3ada..8fc12f8 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -198,9 +198,29 @@ typedef int INT32;
+ typedef s32 acpi_native_int;
+
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+
++#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX ACPI_UINT32_MAX
+
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index b402eb6..579912c 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
+index f1a24b5..b58fd66 100644
+--- a/include/asm-generic/sections.h
++++ b/include/asm-generic/sections.h
+@@ -3,6 +3,8 @@
+
+ /* References to section boundaries */
+
++#include <linux/compiler.h>
++
+ /*
+ * Usage guidelines:
+ * _text, _data: architecture specific, don't use them in arch-independent code
+@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
+ /* Start and end of .ctors section - used for constructor calls. */
+ extern char __ctors_start[], __ctors_end[];
+
++extern __visible const void __nosave_begin, __nosave_end;
++
+ /* function descriptor handling (if any). Override
+ * in asm/sections.h */
+ #ifndef dereference_function_descriptor
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ad8f859..ab31337 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -661,6 +661,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+
+ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ gfp_t priority)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7f6eb85..49466be 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -206,6 +206,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES 32
+ #define USB_MAXIADS (USB_MAXINTERFACES/2)
+
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT 40 /* ms */
++
+ /**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 34932540..e4b9e01 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -513,7 +513,7 @@ struct se_cmd {
+ sense_reason_t (*execute_cmd)(struct se_cmd *);
+ sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
+ u32, enum dma_data_direction);
+- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1f4bcb3..be9760f 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -720,6 +720,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ unsigned long data)
+ {
++ bool need_siglock;
++
+ if (!valid_signal(data))
+ return -EIO;
+
+@@ -747,8 +749,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ user_disable_single_step(child);
+ }
+
++ /*
++ * Change ->exit_code and ->state under siglock to avoid the race
++ * with wait_task_stopped() in between; a non-zero ->exit_code will
++ * wrongly look like another report from tracee.
++ *
++ * Note that we need siglock even if ->exit_code == data and/or this
++ * status was not reported yet, the new status must not be cleared by
++ * wait_task_stopped() after resume.
++ *
++ * If data == 0 we do not care if wait_task_stopped() reports the old
++ * status and clears the code too; this can't race with the tracee, it
++ * takes siglock after resume.
++ */
++ need_siglock = data && !thread_group_empty(current);
++ if (need_siglock)
++ spin_lock_irq(&child->sighand->siglock);
+ child->exit_code = data;
+ wake_up_state(child, __TASK_TRACED);
++ if (need_siglock)
++ spin_unlock_irq(&child->sighand->siglock);
+
+ return 0;
+ }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 490fcbb..93be750 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -657,9 +657,13 @@ static void run_ksoftirqd(unsigned int cpu)
+ * in the task stack here.
+ */
+ __do_softirq();
+- rcu_note_context_switch(cpu);
+ local_irq_enable();
+ cond_resched();
++
++ preempt_disable();
++ rcu_note_context_switch(cpu);
++ preempt_enable();
++
+ return;
+ }
+ local_irq_enable();
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 774a080..da41de9 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2651,7 +2651,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+
+ static __always_inline int trace_recursive_lock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+ int bit;
+
+ if (in_interrupt()) {
+@@ -2668,18 +2668,17 @@ static __always_inline int trace_recursive_lock(void)
+ return 1;
+
+ val |= (1 << bit);
+- this_cpu_write(current_context, val);
++ __this_cpu_write(current_context, val);
+
+ return 0;
+ }
+
+ static __always_inline void trace_recursive_unlock(void)
+ {
+- unsigned int val = this_cpu_read(current_context);
++ unsigned int val = __this_cpu_read(current_context);
+
+- val--;
+- val &= this_cpu_read(current_context);
+- this_cpu_write(current_context, val);
++ val &= val & (val - 1);
++ __this_cpu_write(current_context, val);
+ }
+
+ #else
+diff --git a/lib/string.c b/lib/string.c
+index 43d0781..cb9ea21 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -598,7 +598,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ memset(s, 0, count);
+- OPTIMIZER_HIDE_VAR(s);
++ barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e2b1bba..69ec61a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -278,13 +278,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+ * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ * or vmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+@@ -295,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb);
+ * before giving packet to stack.
+ * RX rings only contains data buffers, not full skbs.
+ */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ struct skb_shared_info *shinfo;
+ struct sk_buff *skb;
+@@ -309,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->truesize = SKB_TRUESIZE(size);
+- skb->head_frag = frag_size != 0;
+ atomic_set(&skb->users, 1);
+ skb->head = data;
+ skb->data = data;
+@@ -326,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++ struct sk_buff *skb = __build_skb(data, frag_size);
++
++ if (skb && frag_size) {
++ skb->head_frag = 1;
++ if (virt_to_head_page(data)->pfmemalloc)
++ skb->pfmemalloc = 1;
++ }
++ return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+
+ struct netdev_alloc_cache {
+@@ -352,7 +369,8 @@ refill:
+ gfp_t gfp = gfp_mask;
+
+ if (order)
+- gfp |= __GFP_COMP | __GFP_NOWARN;
++ gfp |= __GFP_COMP | __GFP_NOWARN |
++ __GFP_NOMEMALLOC;
+ nc->frag.page = alloc_pages(gfp, order);
+ if (likely(nc->frag.page))
+ break;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index ecb34b5..57075c4 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -127,6 +127,9 @@ int ip_forward(struct sk_buff *skb)
+ struct rtable *rt; /* Route we use */
+ struct ip_options *opt = &(IPCB(skb)->opt);
+
++ if (unlikely(skb->sk))
++ goto drop;
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8c70c73..a68cd71 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2595,39 +2595,65 @@ begin_fwd:
+ }
+ }
+
+-/* Send a fin. The caller locks the socket for us. This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++ int amt, status;
++
++ if (size <= sk->sk_forward_alloc)
++ return;
++ amt = sk_mem_pages(size);
++ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++ sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+ */
+ void tcp_send_fin(struct sock *sk)
+ {
++ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_write_queue_tail(sk);
+- int mss_now;
+
+- /* Optimization, tack on the FIN if we have a queue of
+- * unsent frames. But be careful about outgoing SACKS
+- * and IP options.
++ /* Optimization, tack on the FIN if we have one skb in write queue and
++ * this skb was not yet sent, or we are under memory pressure.
++ * Note: in the latter case, FIN packet will be sent after a timeout,
++ * as TCP stack thinks it has already been transmitted.
+ */
+- mss_now = tcp_current_mss(sk);
+-
+- if (tcp_send_head(sk) != NULL) {
+- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+- TCP_SKB_CB(skb)->end_seq++;
++ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(tskb)->end_seq++;
+ tp->write_seq++;
++ if (!tcp_send_head(sk)) {
++ /* This means tskb was already sent.
++ * Pretend we included the FIN on previous transmit.
++ * We need to set tp->snd_nxt to the value it would have
++ * if FIN had been sent. This is because retransmit path
++ * does not change tp->snd_nxt.
++ */
++ tp->snd_nxt++;
++ return;
++ }
+ } else {
+- /* Socket is locked, keep trying until memory is available. */
+- for (;;) {
+- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+- if (skb)
+- break;
+- yield();
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++ if (unlikely(!skb)) {
++ if (tskb)
++ goto coalesce;
++ return;
+ }
++ skb_reserve(skb, MAX_TCP_HEADER);
++ sk_forced_wmem_schedule(sk, skb->truesize);
+ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ tcp_init_nondata_skb(skb, tp->write_seq,
+ TCPHDR_ACK | TCPHDR_FIN);
+ tcp_queue_skb(sk, skb);
+ }
+- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1d52506..a0b0ea9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1624,13 +1624,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ if (data == NULL)
+ return NULL;
+
+- skb = build_skb(data, size);
++ skb = __build_skb(data, size);
+ if (skb == NULL)
+ vfree(data);
+- else {
+- skb->head_frag = 0;
++ else
+ skb->destructor = netlink_skb_destructor;
+- }
+
+ return skb;
+ }
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e..53745f4 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+ u32 value2;
+- unsigned long flags;
+ u32 rate;
+
+ if (emu->card_capabilities->emu_model) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x38, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x1) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x2a, &value);
+ snd_emu1010_fpga_read(emu, 0x2b, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ } else {
+ snd_iprintf(buffer, "ADAT Unlocked\n");
+ }
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x20, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ if ((value & 0x4) == 0) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, 0x28, &value);
+ snd_emu1010_fpga_read(emu, 0x29, &value2);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ rate = 0x1770000 / (((value << 5) | value2)+1);
+ snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ } else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ struct snd_emu10k1 *emu = entry->private_data;
+ u32 value;
+- unsigned long flags;
+ int i;
+ snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+
+ for(i = 0; i < 0x40; i+=1) {
+- spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_read(emu, i, &value);
+- spin_unlock_irqrestore(&emu->emu_lock, flags);
+ snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ }
+ }
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index 5e3bc3c..f40a7a4 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -384,18 +384,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+-static int davinci_evm_remove(struct platform_device *pdev)
+-{
+- struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+- snd_soc_unregister_card(card);
+-
+- return 0;
+-}
+-
+ static struct platform_driver davinci_evm_driver = {
+ .probe = davinci_evm_probe,
+- .remove = davinci_evm_remove,
+ .driver = {
+ .name = "davinci_evm",
+ .owner = THIS_MODULE,
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc6652..deb3569 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ *length = read_4(kbuf, data);
+- data += *length;
+ break;
+
+ case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a36..4039854 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC = $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT := $(PWD)
++BUILD_OUTPUT := $(CURDIR)
+ PREFIX := /usr
+ DESTDIR :=
+
++ifeq ("$(origin O)", "command line")
++ BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS += -Wall
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6611253..eed250e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1549,8 +1549,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ ghc->generation = slots->generation;
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ ghc->hva += offset;
+ } else {
+ /*