summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.8.9/1008_linux-4.8.9.patch')
-rw-r--r--4.8.9/1008_linux-4.8.9.patch3119
1 files changed, 3119 insertions, 0 deletions
diff --git a/4.8.9/1008_linux-4.8.9.patch b/4.8.9/1008_linux-4.8.9.patch
new file mode 100644
index 0000000..c526740
--- /dev/null
+++ b/4.8.9/1008_linux-4.8.9.patch
@@ -0,0 +1,3119 @@
+diff --git a/Makefile b/Makefile
+index c1519ab..8f18daa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 9
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
+index c10390d..f927b8d 100644
+--- a/arch/arc/kernel/time.c
++++ b/arch/arc/kernel/time.c
+@@ -152,17 +152,14 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
+ cycle_t full;
+ } stamp;
+
+- /*
+- * hardware has an internal state machine which tracks readout of
+- * low/high and updates the CTRL.status if
+- * - interrupt/exception taken between the two reads
+- * - high increments after low has been read
+- */
+- do {
+- stamp.low = read_aux_reg(AUX_RTC_LOW);
+- stamp.high = read_aux_reg(AUX_RTC_HIGH);
+- status = read_aux_reg(AUX_RTC_CTRL);
+- } while (!(status & _BITUL(31)));
++
++ __asm__ __volatile(
++ "1: \n"
++ " lr %0, [AUX_RTC_LOW] \n"
++ " lr %1, [AUX_RTC_HIGH] \n"
++ " lr %2, [AUX_RTC_CTRL] \n"
++ " bbit0.nt %2, 31, 1b \n"
++ : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
+
+ return stamp.full;
+ }
+diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
+index 9288851..20afc65 100644
+--- a/arch/arc/mm/dma.c
++++ b/arch/arc/mm/dma.c
+@@ -105,31 +105,6 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ __free_pages(page, get_order(size));
+ }
+
+-static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size,
+- unsigned long attrs)
+-{
+- unsigned long user_count = vma_pages(vma);
+- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+- unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+- unsigned long off = vma->vm_pgoff;
+- int ret = -ENXIO;
+-
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+- return ret;
+-
+- if (off < count && user_count <= (count - off)) {
+- ret = remap_pfn_range(vma, vma->vm_start,
+- pfn + off,
+- user_count << PAGE_SHIFT,
+- vma->vm_page_prot);
+- }
+-
+- return ret;
+-}
+-
+ /*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+@@ -218,7 +193,6 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
+ struct dma_map_ops arc_dma_ops = {
+ .alloc = arc_dma_alloc,
+ .free = arc_dma_free,
+- .mmap = arc_dma_mmap,
+ .map_page = arc_dma_map_page,
+ .map_sg = arc_dma_map_sg,
+ .sync_single_for_device = arc_dma_sync_single_for_device,
+diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
+index 794bebb..28f03ca 100644
+--- a/arch/s390/hypfs/hypfs_diag.c
++++ b/arch/s390/hypfs/hypfs_diag.c
+@@ -363,11 +363,11 @@ static void *diag204_store(void)
+ static int diag224_get_name_table(void)
+ {
+ /* memory must be below 2GB */
+- diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
++ diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!diag224_cpu_names)
+ return -ENOMEM;
+ if (diag224(diag224_cpu_names)) {
+- free_page((unsigned long) diag224_cpu_names);
++ kfree(diag224_cpu_names);
+ return -EOPNOTSUPP;
+ }
+ EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+@@ -376,7 +376,7 @@ static int diag224_get_name_table(void)
+
+ static void diag224_delete_name_table(void)
+ {
+- free_page((unsigned long) diag224_cpu_names);
++ kfree(diag224_cpu_names);
+ }
+
+ static int diag224_idx2name(int index, char *name)
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 602af69..0332317 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -192,7 +192,7 @@ struct task_struct;
+ struct mm_struct;
+ struct seq_file;
+
+-typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
++typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+ void dump_trace(dump_trace_func_t func, void *data,
+ struct task_struct *task, unsigned long sp);
+
+diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
+index 518f615..6693383 100644
+--- a/arch/s390/kernel/dumpstack.c
++++ b/arch/s390/kernel/dumpstack.c
+@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+- if (func(data, sf->gprs[8], 0))
+- return sp;
+ /* Follow the backchain. */
+ while (1) {
++ if (func(data, sf->gprs[8]))
++ return sp;
+ low = sp;
+ sp = sf->back_chain;
+ if (!sp)
+@@ -49,8 +49,6 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+- if (func(data, sf->gprs[8], 1))
+- return sp;
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+@@ -58,7 +56,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
+ return sp;
+ regs = (struct pt_regs *) sp;
+ if (!user_mode(regs)) {
+- if (func(data, regs->psw.addr, 1))
++ if (func(data, regs->psw.addr))
+ return sp;
+ }
+ low = sp;
+@@ -92,7 +90,7 @@ struct return_address_data {
+ int depth;
+ };
+
+-static int __return_address(void *data, unsigned long address, int reliable)
++static int __return_address(void *data, unsigned long address)
+ {
+ struct return_address_data *rd = data;
+
+@@ -111,12 +109,9 @@ unsigned long return_address(int depth)
+ }
+ EXPORT_SYMBOL_GPL(return_address);
+
+-static int show_address(void *data, unsigned long address, int reliable)
++static int show_address(void *data, unsigned long address)
+ {
+- if (reliable)
+- printk(" [<%016lx>] %pSR \n", address, (void *)address);
+- else
+- printk("([<%016lx>] %pSR)\n", address, (void *)address);
++ printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ return 0;
+ }
+
+diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
+index 955a7b6..17431f6 100644
+--- a/arch/s390/kernel/perf_event.c
++++ b/arch/s390/kernel/perf_event.c
+@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
+ }
+ arch_initcall(service_level_perf_register);
+
+-static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
++static int __perf_callchain_kernel(void *data, unsigned long address)
+ {
+ struct perf_callchain_entry_ctx *entry = data;
+
+diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
+index 355db9d..44f84b2 100644
+--- a/arch/s390/kernel/stacktrace.c
++++ b/arch/s390/kernel/stacktrace.c
+@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
+ return 1;
+ }
+
+-static int save_address(void *data, unsigned long address, int reliable)
++static int save_address(void *data, unsigned long address)
+ {
+ return __save_address(data, address, 0);
+ }
+
+-static int save_address_nosched(void *data, unsigned long address, int reliable)
++static int save_address_nosched(void *data, unsigned long address)
+ {
+ return __save_address(data, address, 1);
+ }
+diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
+index 9a4de45..16f4c39 100644
+--- a/arch/s390/oprofile/init.c
++++ b/arch/s390/oprofile/init.c
+@@ -13,7 +13,7 @@
+ #include <linux/init.h>
+ #include <asm/processor.h>
+
+-static int __s390_backtrace(void *data, unsigned long address, int reliable)
++static int __s390_backtrace(void *data, unsigned long address)
+ {
+ unsigned int *depth = data;
+
+diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
+index 9976fce..77f28ce 100644
+--- a/arch/x86/entry/Makefile
++++ b/arch/x86/entry/Makefile
+@@ -5,8 +5,8 @@
+ OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
+ OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+
+-CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
+-CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
++CFLAGS_syscall_64.o += -Wno-override-init
++CFLAGS_syscall_32.o += -Wno-override-init
+ obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+ obj-y += common.o
+
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index d99ca57..fbd1944 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -453,7 +453,6 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
+ polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
+
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+- acpi_penalize_sci_irq(bus_irq, trigger, polarity);
+
+ /*
+ * stash over-ride to indicate we've been here
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index caea575..60746ef 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
+ ghes_do_proc(ghes, ghes->estatus);
+ out:
+ ghes_clear_estatus(ghes);
+- return rc;
++ return 0;
+ }
+
+ static void ghes_add_timer(struct ghes *ghes)
+diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
+index bc3d914..c983bf7 100644
+--- a/drivers/acpi/pci_link.c
++++ b/drivers/acpi/pci_link.c
+@@ -87,7 +87,6 @@ struct acpi_pci_link {
+
+ static LIST_HEAD(acpi_link_list);
+ static DEFINE_MUTEX(acpi_link_lock);
+-static int sci_irq = -1, sci_penalty;
+
+ /* --------------------------------------------------------------------------
+ PCI Link Device Management
+@@ -497,13 +496,25 @@ static int acpi_irq_get_penalty(int irq)
+ {
+ int penalty = 0;
+
+- if (irq == sci_irq)
+- penalty += sci_penalty;
++ /*
++ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
++ * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
++ * use for PCI IRQs.
++ */
++ if (irq == acpi_gbl_FADT.sci_interrupt) {
++ u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
++
++ if (type != IRQ_TYPE_LEVEL_LOW)
++ penalty += PIRQ_PENALTY_ISA_ALWAYS;
++ else
++ penalty += PIRQ_PENALTY_PCI_USING;
++ }
+
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
+- return penalty + acpi_irq_pci_sharing_penalty(irq);
++ penalty += acpi_irq_pci_sharing_penalty(irq);
++ return penalty;
+ }
+
+ int __init acpi_irq_penalty_init(void)
+@@ -608,10 +619,6 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ } else {
+- if (link->irq.active < ACPI_MAX_ISA_IRQS)
+- acpi_isa_irq_penalty[link->irq.active] +=
+- PIRQ_PENALTY_PCI_USING;
+-
+ printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device), link->irq.active);
+@@ -842,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
+ continue;
+
+ if (used)
+- new_penalty = acpi_isa_irq_penalty[irq] +
++ new_penalty = acpi_irq_get_penalty(irq) +
+ PIRQ_PENALTY_ISA_USED;
+ else
+ new_penalty = 0;
+@@ -864,7 +871,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
+ void acpi_penalize_isa_irq(int irq, int active)
+ {
+ if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
+- acpi_isa_irq_penalty[irq] +=
++ acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+ }
+
+@@ -874,17 +881,6 @@ bool acpi_isa_irq_available(int irq)
+ acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+ }
+
+-void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+-{
+- sci_irq = irq;
+-
+- if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+- polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+- sci_penalty = PIRQ_PENALTY_PCI_USING;
+- else
+- sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+-}
+-
+ /*
+ * Over-ride default table to reserve additional IRQs for use by ISA
+ * e.g. acpi_irq_isa=5
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index 8348272..100be55 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
+ drbd_update_congested(connection);
+ }
+ do {
+- rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
++ rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ if (rv == -EAGAIN) {
+ if (we_should_drop_the_connection(connection, sock))
+ break;
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 0f7d28a..4431129 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -845,8 +845,6 @@ void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int flags)
+ {
+ intel_private.driver->write_entry(addr, pg, flags);
+- if (intel_private.driver->chipset_flush)
+- intel_private.driver->chipset_flush();
+ }
+ EXPORT_SYMBOL(intel_gtt_insert_page);
+
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 340f96e..9203f2d 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
+
+ static void add_early_randomness(struct hwrng *rng)
+ {
++ unsigned char bytes[16];
+ int bytes_read;
+- size_t size = min_t(size_t, 16, rng_buffer_size());
+
+ mutex_lock(&reading_mutex);
+- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
++ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0)
+- add_device_randomness(rng_buffer, bytes_read);
++ add_device_randomness(bytes, bytes_read);
+ }
+
+ static inline void cleanup_rng(struct kref *kref)
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index 80ae2a5..20b1055 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -700,7 +700,6 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ const struct clk_ops *ops,
+ unsigned long min_rate,
+- unsigned long max_rate,
+ unsigned long pct80_rate,
+ const char *fmt, int idx)
+ {
+@@ -729,8 +728,6 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
+ continue;
+ if (rate < min_rate)
+ continue;
+- if (rate > max_rate)
+- continue;
+
+ parent_names[j] = div->name;
+ hwc->parent_to_clksel[j] = i;
+@@ -762,7 +759,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ struct mux_hwclock *hwc;
+ const struct clockgen_pll_div *div;
+ unsigned long plat_rate, min_rate;
+- u64 max_rate, pct80_rate;
++ u64 pct80_rate;
+ u32 clksel;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+@@ -790,8 +787,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ return NULL;
+ }
+
+- max_rate = clk_get_rate(div->clk);
+- pct80_rate = max_rate * 8;
++ pct80_rate = clk_get_rate(div->clk);
++ pct80_rate *= 8;
+ do_div(pct80_rate, 10);
+
+ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
+@@ -801,7 +798,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+ else
+ min_rate = plat_rate / 2;
+
+- return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
++ return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ pct80_rate, "cg-cmux%d", idx);
+ }
+
+@@ -816,7 +813,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
+ hwc->reg = cg->regs + 0x20 * idx + 0x10;
+ hwc->info = cg->info.hwaccel[idx];
+
+- return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
++ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ "cg-hwaccel%d", idx);
+ }
+
+diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
+index 0fa91f3..bdf8b97 100644
+--- a/drivers/clk/samsung/clk-exynos-audss.c
++++ b/drivers/clk/samsung/clk-exynos-audss.c
+@@ -82,7 +82,6 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
+ .data = (void *)TYPE_EXYNOS5420, },
+ {},
+ };
+-MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
+
+ static void exynos_audss_clk_teardown(void)
+ {
+diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
+index 4f87f3e..c184eb8 100644
+--- a/drivers/clocksource/timer-sun5i.c
++++ b/drivers/clocksource/timer-sun5i.c
+@@ -152,13 +152,6 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
+-static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+-{
+- struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+-
+- return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+-}
+-
+ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
+ unsigned long event, void *data)
+ {
+@@ -217,13 +210,8 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
+ writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ base + TIMER_CTL_REG(1));
+
+- cs->clksrc.name = node->name;
+- cs->clksrc.rating = 340;
+- cs->clksrc.read = sun5i_clksrc_read;
+- cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+- cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+-
+- ret = clocksource_register_hz(&cs->clksrc, rate);
++ ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
++ rate, 340, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Couldn't register clock source.\n");
+ goto err_remove_notifier;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 1ed6132..cd5dc27 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
+ {
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+- u32 mask = d->mask;
++ u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+ irq_gc_lock(gc);
+- writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
++ writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ irq_gc_unlock(gc);
+ }
+
+@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv &= ~mask;
+@@ -319,7 +319,8 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv |= mask;
+@@ -332,7 +333,8 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv &= ~mask;
+@@ -345,7 +347,8 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+- u32 mask = d->mask;
++
++ u32 mask = 1 << (d->irq - gc->irq_base);
+
+ irq_gc_lock(gc);
+ ct->mask_cache_priv |= mask;
+@@ -459,7 +462,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
+ for (i = 0; i < mvchip->chip.ngpio; i++) {
+ int irq;
+
+- irq = irq_find_mapping(mvchip->domain, i);
++ irq = mvchip->irqbase + i;
+
+ if (!(cause & (1 << i)))
+ continue;
+@@ -652,7 +655,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ struct irq_chip_type *ct;
+ struct clk *clk;
+ unsigned int ngpios;
+- bool have_irqs;
+ int soc_variant;
+ int i, cpu, id;
+ int err;
+@@ -663,9 +665,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ else
+ soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+
+- /* Some gpio controllers do not provide irq support */
+- have_irqs = of_irq_count(np) != 0;
+-
+ mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
+ GFP_KERNEL);
+ if (!mvchip)
+@@ -698,8 +697,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ mvchip->chip.get = mvebu_gpio_get;
+ mvchip->chip.direction_output = mvebu_gpio_direction_output;
+ mvchip->chip.set = mvebu_gpio_set;
+- if (have_irqs)
+- mvchip->chip.to_irq = mvebu_gpio_to_irq;
++ mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
+ mvchip->chip.ngpio = ngpios;
+ mvchip->chip.can_sleep = false;
+@@ -760,30 +758,34 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
+
+ /* Some gpio controllers do not provide irq support */
+- if (!have_irqs)
++ if (!of_irq_count(np))
+ return 0;
+
+- mvchip->domain =
+- irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+- if (!mvchip->domain) {
+- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+- mvchip->chip.label);
+- return -ENODEV;
++ /* Setup the interrupt handlers. Each chip can have up to 4
++ * interrupt handlers, with each handler dealing with 8 GPIO
++ * pins. */
++ for (i = 0; i < 4; i++) {
++ int irq = platform_get_irq(pdev, i);
++
++ if (irq < 0)
++ continue;
++ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
++ mvchip);
+ }
+
+- err = irq_alloc_domain_generic_chips(
+- mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+- IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+- if (err) {
+- dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+- mvchip->chip.label);
+- goto err_domain;
++ mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
++ if (mvchip->irqbase < 0) {
++ dev_err(&pdev->dev, "no irqs\n");
++ return mvchip->irqbase;
++ }
++
++ gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
++ mvchip->membase, handle_level_irq);
++ if (!gc) {
++ dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
++ return -ENOMEM;
+ }
+
+- /* NOTE: The common accessors cannot be used because of the percpu
+- * access to the mask registers
+- */
+- gc = irq_get_domain_generic_chip(mvchip->domain, 0);
+ gc->private = mvchip;
+ ct = &gc->chip_types[0];
+ ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
+@@ -801,23 +803,27 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
+ ct->handler = handle_edge_irq;
+ ct->chip.name = mvchip->chip.label;
+
+- /* Setup the interrupt handlers. Each chip can have up to 4
+- * interrupt handlers, with each handler dealing with 8 GPIO
+- * pins.
+- */
+- for (i = 0; i < 4; i++) {
+- int irq = platform_get_irq(pdev, i);
++ irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
++ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+
+- if (irq < 0)
+- continue;
+- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+- mvchip);
++ /* Setup irq domain on top of the generic chip. */
++ mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
++ mvchip->irqbase,
++ &irq_domain_simple_ops,
++ mvchip);
++ if (!mvchip->domain) {
++ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
++ mvchip->chip.label);
++ err = -ENODEV;
++ goto err_generic_chip;
+ }
+
+ return 0;
+
+-err_domain:
+- irq_domain_remove(mvchip->domain);
++err_generic_chip:
++ irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
++ IRQ_LEVEL | IRQ_NOPROBE);
++ kfree(gc);
+
+ return err;
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index e3fc901..a28feb3 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -26,18 +26,14 @@
+
+ #include "gpiolib.h"
+
+-static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
++static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+ {
+- struct of_phandle_args *gpiospec = data;
+-
+- return chip->gpiodev->dev.of_node == gpiospec->np &&
+- chip->of_xlate(chip, gpiospec, NULL) >= 0;
++ return chip->gpiodev->dev.of_node == data;
+ }
+
+-static struct gpio_chip *of_find_gpiochip_by_xlate(
+- struct of_phandle_args *gpiospec)
++static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+ {
+- return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
++ return gpiochip_find(np, of_gpiochip_match_node);
+ }
+
+ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
+@@ -83,7 +79,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
+ return ERR_PTR(ret);
+ }
+
+- chip = of_find_gpiochip_by_xlate(&gpiospec);
++ chip = of_find_gpiochip_by_node(gpiospec.np);
+ if (!chip) {
+ desc = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+index 2057683..892d60f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+@@ -395,11 +395,8 @@ static int acp_hw_fini(void *handle)
+ {
+ int i, ret;
+ struct device *dev;
+- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- /* return early if no ACP */
+- if (!adev->acp.acp_genpd)
+- return 0;
++ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 414a160..9aa533c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -605,7 +605,6 @@ static int __init amdgpu_init(void)
+ {
+ amdgpu_sync_init();
+ amdgpu_fence_slab_init();
+- amd_sched_fence_slab_init();
+ if (vgacon_text_force()) {
+ DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
+ return -EINVAL;
+@@ -625,7 +624,6 @@ static void __exit amdgpu_exit(void)
+ drm_pci_exit(driver, pdriver);
+ amdgpu_unregister_atpx_handler();
+ amdgpu_sync_fini();
+- amd_sched_fence_slab_fini();
+ amdgpu_fence_slab_fini();
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index c82b95b8..0b109ae 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -68,7 +68,6 @@ int amdgpu_fence_slab_init(void)
+
+ void amdgpu_fence_slab_fini(void)
+ {
+- rcu_barrier();
+ kmem_cache_destroy(amdgpu_fence_slab);
+ }
+ /*
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 1ed64ae..e24a8af 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -99,8 +99,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+
+ if ((amdgpu_runtime_pm != 0) &&
+ amdgpu_has_atpx() &&
+- (amdgpu_is_atpx_hybrid() ||
+- amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((flags & AMD_IS_APU) == 0))
+ flags |= AMD_IS_PX;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index e86ca39..80120fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1654,6 +1654,5 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+ fence_put(adev->vm_manager.ids[i].first);
+ amdgpu_sync_free(&adev->vm_manager.ids[i].active);
+ fence_put(id->flushed_updates);
+- fence_put(id->last_flush);
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index ffe1f85..963a24d 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -34,6 +34,9 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
+ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
+
++struct kmem_cache *sched_fence_slab;
++atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
++
+ /* Initialize a given run queue struct */
+ static void amd_sched_rq_init(struct amd_sched_rq *rq)
+ {
+@@ -615,6 +618,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
+ INIT_LIST_HEAD(&sched->ring_mirror_list);
+ spin_lock_init(&sched->job_list_lock);
+ atomic_set(&sched->hw_rq_count, 0);
++ if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
++ sched_fence_slab = kmem_cache_create(
++ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
++ SLAB_HWCACHE_ALIGN, NULL);
++ if (!sched_fence_slab)
++ return -ENOMEM;
++ }
+
+ /* Each scheduler will run on a seperate kernel thread */
+ sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+@@ -635,4 +645,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
+ {
+ if (sched->thread)
+ kthread_stop(sched->thread);
++ if (atomic_dec_and_test(&sched_fence_slab_ref))
++ kmem_cache_destroy(sched_fence_slab);
+ }
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 51068e6..7cbbbfb 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -30,6 +30,9 @@
+ struct amd_gpu_scheduler;
+ struct amd_sched_rq;
+
++extern struct kmem_cache *sched_fence_slab;
++extern atomic_t sched_fence_slab_ref;
++
+ /**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their
+@@ -142,9 +145,6 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
+ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+-int amd_sched_fence_slab_init(void);
+-void amd_sched_fence_slab_fini(void);
+-
+ struct amd_sched_fence *amd_sched_fence_create(
+ struct amd_sched_entity *s_entity, void *owner);
+ void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
+diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+index 93ad2e1..6b63bea 100644
+--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
+@@ -27,25 +27,6 @@
+ #include <drm/drmP.h>
+ #include "gpu_scheduler.h"
+
+-static struct kmem_cache *sched_fence_slab;
+-
+-int amd_sched_fence_slab_init(void)
+-{
+- sched_fence_slab = kmem_cache_create(
+- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+- SLAB_HWCACHE_ALIGN, NULL);
+- if (!sched_fence_slab)
+- return -ENOMEM;
+-
+- return 0;
+-}
+-
+-void amd_sched_fence_slab_fini(void)
+-{
+- rcu_barrier();
+- kmem_cache_destroy(sched_fence_slab);
+-}
+-
+ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
+ {
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index d46fa22..5de36d8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1490,6 +1490,8 @@ static int i915_drm_suspend(struct drm_device *dev)
+
+ dev_priv->suspend_count++;
+
++ intel_display_set_init_power(dev_priv, false);
++
+ intel_csr_ucode_suspend(dev_priv);
+
+ out:
+@@ -1506,8 +1508,6 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+
+ disable_rpm_wakeref_asserts(dev_priv);
+
+- intel_display_set_init_power(dev_priv, false);
+-
+ fw_csr = !IS_BROXTON(dev_priv) &&
+ suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e26f889..63462f2 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9737,29 +9737,6 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+ bxt_set_cdclk(to_i915(dev), req_cdclk);
+ }
+
+-static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+- int pixel_rate)
+-{
+- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+-
+- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+-
+- /* BSpec says "Do not use DisplayPort with CDCLK less than
+- * 432 MHz, audio enabled, port width x4, and link rate
+- * HBR2 (5.4 GHz), or else there may be audio corruption or
+- * screen corruption."
+- */
+- if (intel_crtc_has_dp_encoder(crtc_state) &&
+- crtc_state->has_audio &&
+- crtc_state->port_clock >= 540000 &&
+- crtc_state->lane_count == 4)
+- pixel_rate = max(432000, pixel_rate);
+-
+- return pixel_rate;
+-}
+-
+ /* compute the max rate for new configuration */
+ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+ {
+@@ -9785,9 +9762,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+
+ pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+- if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+- pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+- pixel_rate);
++ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
++ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
++ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ intel_state->min_pixclk[i] = pixel_rate;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 1421270..c3aa9e6 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1759,50 +1759,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ }
+
+-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+- enum port port)
+-{
+- const struct ddi_vbt_port_info *info =
+- &dev_priv->vbt.ddi_port_info[port];
+- u8 ddc_pin;
+-
+- if (info->alternate_ddc_pin) {
+- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+- info->alternate_ddc_pin, port_name(port));
+- return info->alternate_ddc_pin;
+- }
+-
+- switch (port) {
+- case PORT_B:
+- if (IS_BROXTON(dev_priv))
+- ddc_pin = GMBUS_PIN_1_BXT;
+- else
+- ddc_pin = GMBUS_PIN_DPB;
+- break;
+- case PORT_C:
+- if (IS_BROXTON(dev_priv))
+- ddc_pin = GMBUS_PIN_2_BXT;
+- else
+- ddc_pin = GMBUS_PIN_DPC;
+- break;
+- case PORT_D:
+- if (IS_CHERRYVIEW(dev_priv))
+- ddc_pin = GMBUS_PIN_DPD_CHV;
+- else
+- ddc_pin = GMBUS_PIN_DPD;
+- break;
+- default:
+- MISSING_CASE(port);
+- ddc_pin = GMBUS_PIN_DPB;
+- break;
+- }
+-
+- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+- ddc_pin, port_name(port));
+-
+- return ddc_pin;
+-}
+-
+ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector)
+ {
+@@ -1812,6 +1768,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dig_port->port;
++ uint8_t alternate_ddc_pin;
+
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+@@ -1829,10 +1786,12 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
+
+- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+-
+ switch (port) {
+ case PORT_B:
++ if (IS_BROXTON(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+ /*
+ * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+ * interrupts to check the external panel connection.
+@@ -1843,17 +1802,46 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ intel_encoder->hpd_pin = HPD_PORT_B;
+ break;
+ case PORT_C:
++ if (IS_BROXTON(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+ intel_encoder->hpd_pin = HPD_PORT_C;
+ break;
+ case PORT_D:
++ if (WARN_ON(IS_BROXTON(dev_priv)))
++ intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
++ else if (IS_CHERRYVIEW(dev_priv))
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
++ else
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+ intel_encoder->hpd_pin = HPD_PORT_D;
+ break;
+ case PORT_E:
++ /* On SKL PORT E doesn't have seperate GMBUS pin
++ * We rely on VBT to set a proper alternate GMBUS pin. */
++ alternate_ddc_pin =
++ dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
++ switch (alternate_ddc_pin) {
++ case DDC_PIN_B:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
++ break;
++ case DDC_PIN_C:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
++ break;
++ case DDC_PIN_D:
++ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
++ break;
++ default:
++ MISSING_CASE(alternate_ddc_pin);
++ }
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
++ case PORT_A:
++ intel_encoder->hpd_pin = HPD_PORT_A;
++ /* Internal port only for eDP. */
+ default:
+- MISSING_CASE(port);
+- return;
++ BUG();
+ }
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index edd2d03..554ca71 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -104,14 +104,6 @@ static const char radeon_family_name[][16] = {
+ "LAST",
+ };
+
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_has_atpx_dgpu_power_cntl(void);
+-bool radeon_is_atpx_hybrid(void);
+-#else
+-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+-static inline bool radeon_is_atpx_hybrid(void) { return false; }
+-#endif
+-
+ #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+@@ -168,11 +160,6 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+-
+- /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+- if (!radeon_is_atpx_hybrid() &&
+- !radeon_has_atpx_dgpu_power_cntl())
+- rdev->flags &= ~RADEON_IS_PX;
+ }
+
+ /**
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index ce69048..da3fb06 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+- *val = adata->current_fullscale->gain / 1000000;
+- *val2 = adata->current_fullscale->gain % 1000000;
++ *val = 0;
++ *val2 = adata->current_fullscale->gain;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adata->odr;
+@@ -763,13 +763,9 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
+ int err;
+
+ switch (mask) {
+- case IIO_CHAN_INFO_SCALE: {
+- int gain;
+-
+- gain = val * 1000000 + val2;
+- err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
++ case IIO_CHAN_INFO_SCALE:
++ err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ break;
+- }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val2)
+ return -EINVAL;
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+index b5beea53..dc33c1d 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+@@ -30,26 +30,26 @@ static struct {
+ u32 usage_id;
+ int unit; /* 0 for default others from HID sensor spec */
+ int scale_val0; /* scale, whole number */
+- int scale_val1; /* scale, fraction in nanos */
++ int scale_val1; /* scale, fraction in micros */
+ } unit_conversion[] = {
+- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
++ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+ HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
+ {HID_USAGE_SENSOR_ACCEL_3D,
+- HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
++ HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+
+- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
++ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
+ {HID_USAGE_SENSOR_GYRO_3D,
+- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
++ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+
+- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
++ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
+
+- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
++ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
++ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D,
+ HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
+
+@@ -57,7 +57,7 @@ static struct {
+ {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
+
+ {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
++ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ };
+
+ static int pow_10(unsigned power)
+@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
+ /*
+ * This fuction applies the unit exponent to the scale.
+ * For example:
+- * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+- * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+- * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+- * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+- * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+- * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+- * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
++ * 9.806650 ->exp:2-> val0[980]val1[665000]
++ * 9.000806 ->exp:2-> val0[900]val1[80600]
++ * 0.174535 ->exp:2-> val0[17]val1[453500]
++ * 1.001745 ->exp:0-> val0[1]val1[1745]
++ * 1.001745 ->exp:2-> val0[100]val1[174500]
++ * 1.001745 ->exp:4-> val0[10017]val1[450000]
++ * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ */
+-static void adjust_exponent_nano(int *val0, int *val1, int scale0,
++static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+ int scale1, int exp)
+ {
+ int i;
+@@ -285,32 +285,32 @@ static void adjust_exponent_nano(int *val0, int *val1, int scale0,
+ if (exp > 0) {
+ *val0 = scale0 * pow_10(exp);
+ res = 0;
+- if (exp > 9) {
++ if (exp > 6) {
+ *val1 = 0;
+ return;
+ }
+ for (i = 0; i < exp; ++i) {
+- x = scale1 / pow_10(8 - i);
++ x = scale1 / pow_10(5 - i);
+ res += (pow_10(exp - 1 - i) * x);
+- scale1 = scale1 % pow_10(8 - i);
++ scale1 = scale1 % pow_10(5 - i);
+ }
+ *val0 += res;
+ *val1 = scale1 * pow_10(exp);
+ } else if (exp < 0) {
+ exp = abs(exp);
+- if (exp > 9) {
++ if (exp > 6) {
+ *val0 = *val1 = 0;
+ return;
+ }
+ *val0 = scale0 / pow_10(exp);
+ rem = scale0 % pow_10(exp);
+ res = 0;
+- for (i = 0; i < (9 - exp); ++i) {
+- x = scale1 / pow_10(8 - i);
+- res += (pow_10(8 - exp - i) * x);
+- scale1 = scale1 % pow_10(8 - i);
++ for (i = 0; i < (6 - exp); ++i) {
++ x = scale1 / pow_10(5 - i);
++ res += (pow_10(5 - exp - i) * x);
++ scale1 = scale1 % pow_10(5 - i);
+ }
+- *val1 = rem * pow_10(9 - exp) + res;
++ *val1 = rem * pow_10(6 - exp) + res;
+ } else {
+ *val0 = scale0;
+ *val1 = scale1;
+@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
+ unit_conversion[i].unit == attr_info->units) {
+ exp = hid_sensor_convert_exponent(
+ attr_info->unit_expo);
+- adjust_exponent_nano(val0, val1,
++ adjust_exponent_micro(val0, val1,
+ unit_conversion[i].scale_val0,
+ unit_conversion[i].scale_val1, exp);
+ break;
+ }
+ }
+
+- return IIO_VAL_INT_PLUS_NANO;
++ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ EXPORT_SYMBOL(hid_sensor_format_scale);
+
+diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
+index 32a5946..2d5282e 100644
+--- a/drivers/iio/common/st_sensors/st_sensors_core.c
++++ b/drivers/iio/common/st_sensors/st_sensors_core.c
+@@ -619,7 +619,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
+ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+- int i, len = 0, q, r;
++ int i, len = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+@@ -628,10 +628,8 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
+ break;
+
+- q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+- r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+-
+- len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
++ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
++ sdata->sensor_settings->fs.fs_avl[i].gain);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ buf[len - 1] = '\n';
+diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
+index a97e802c..b98b9d9 100644
+--- a/drivers/iio/orientation/hid-sensor-rotation.c
++++ b/drivers/iio/orientation/hid-sensor-rotation.c
+@@ -335,7 +335,6 @@ static struct platform_driver hid_dev_rot_platform_driver = {
+ .id_table = hid_dev_rot_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+- .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_dev_rot_probe,
+ .remove = hid_dev_rot_remove,
+diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
+index 1ebc2c1..6f2e0e4 100644
+--- a/drivers/input/rmi4/rmi_i2c.c
++++ b/drivers/input/rmi4/rmi_i2c.c
+@@ -221,21 +221,6 @@ static const struct of_device_id rmi_i2c_of_match[] = {
+ MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
+ #endif
+
+-static void rmi_i2c_regulator_bulk_disable(void *data)
+-{
+- struct rmi_i2c_xport *rmi_i2c = data;
+-
+- regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+- rmi_i2c->supplies);
+-}
+-
+-static void rmi_i2c_unregister_transport(void *data)
+-{
+- struct rmi_i2c_xport *rmi_i2c = data;
+-
+- rmi_unregister_transport_device(&rmi_i2c->xport);
+-}
+-
+ static int rmi_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -279,12 +264,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ if (retval < 0)
+ return retval;
+
+- retval = devm_add_action_or_reset(&client->dev,
+- rmi_i2c_regulator_bulk_disable,
+- rmi_i2c);
+- if (retval)
+- return retval;
+-
+ of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
+ &rmi_i2c->startup_delay);
+
+@@ -315,11 +294,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ client->addr);
+ return retval;
+ }
+- retval = devm_add_action_or_reset(&client->dev,
+- rmi_i2c_unregister_transport,
+- rmi_i2c);
+- if (retval)
+- return retval;
+
+ retval = rmi_i2c_init_irq(client);
+ if (retval < 0)
+@@ -330,6 +304,17 @@ static int rmi_i2c_probe(struct i2c_client *client,
+ return 0;
+ }
+
++static int rmi_i2c_remove(struct i2c_client *client)
++{
++ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
++
++ rmi_unregister_transport_device(&rmi_i2c->xport);
++ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
++ rmi_i2c->supplies);
++
++ return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int rmi_i2c_suspend(struct device *dev)
+ {
+@@ -446,6 +431,7 @@ static struct i2c_driver rmi_i2c_driver = {
+ },
+ .id_table = rmi_id,
+ .probe = rmi_i2c_probe,
++ .remove = rmi_i2c_remove,
+ };
+
+ module_i2c_driver(rmi_i2c_driver);
+diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
+index 4ebef60..55bd1b3 100644
+--- a/drivers/input/rmi4/rmi_spi.c
++++ b/drivers/input/rmi4/rmi_spi.c
+@@ -396,13 +396,6 @@ static inline int rmi_spi_of_probe(struct spi_device *spi,
+ }
+ #endif
+
+-static void rmi_spi_unregister_transport(void *data)
+-{
+- struct rmi_spi_xport *rmi_spi = data;
+-
+- rmi_unregister_transport_device(&rmi_spi->xport);
+-}
+-
+ static int rmi_spi_probe(struct spi_device *spi)
+ {
+ struct rmi_spi_xport *rmi_spi;
+@@ -471,11 +464,6 @@ static int rmi_spi_probe(struct spi_device *spi)
+ dev_err(&spi->dev, "failed to register transport.\n");
+ return retval;
+ }
+- retval = devm_add_action_or_reset(&spi->dev,
+- rmi_spi_unregister_transport,
+- rmi_spi);
+- if (retval)
+- return retval;
+
+ retval = rmi_spi_init_irq(spi);
+ if (retval < 0)
+@@ -485,6 +473,15 @@ static int rmi_spi_probe(struct spi_device *spi)
+ return 0;
+ }
+
++static int rmi_spi_remove(struct spi_device *spi)
++{
++ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
++
++ rmi_unregister_transport_device(&rmi_spi->xport);
++
++ return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int rmi_spi_suspend(struct device *dev)
+ {
+@@ -580,6 +577,7 @@ static struct spi_driver rmi_spi_driver = {
+ },
+ .id_table = rmi_id,
+ .probe = rmi_spi_probe,
++ .remove = rmi_spi_remove,
+ };
+
+ module_spi_driver(rmi_spi_driver);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 822fc4a..96de97a 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1654,9 +1654,6 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
+
+ free_pagetable(&dom->domain);
+
+- if (dom->domain.id)
+- domain_id_free(dom->domain.id);
+-
+ kfree(dom);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 1257b0b..ebb5bf3 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1711,7 +1711,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
+ if (!iommu->domains || !iommu->domain_ids)
+ return;
+
+-again:
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+ struct dmar_domain *domain;
+@@ -1724,19 +1723,10 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
+
+ domain = info->domain;
+
+- __dmar_remove_one_dev_info(info);
++ dmar_remove_one_dev_info(domain, info->dev);
+
+- if (!domain_type_is_vm_or_si(domain)) {
+- /*
+- * The domain_exit() function can't be called under
+- * device_domain_lock, as it takes this lock itself.
+- * So release the lock here and re-run the loop
+- * afterwards.
+- */
+- spin_unlock_irqrestore(&device_domain_lock, flags);
++ if (!domain_type_is_vm_or_si(domain))
+ domain_exit(domain);
+- goto again;
+- }
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
+index f50e51c..def8ca1 100644
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -633,10 +633,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+ {
+ struct arm_v7s_io_pgtable *data;
+
+-#ifdef PHYS_OFFSET
+- if (upper_32_bits(PHYS_OFFSET))
+- return NULL;
+-#endif
+ if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+ return NULL;
+
+diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
+index f73e108..bf890c3 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_core.c
++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
+@@ -677,7 +677,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ struct dvb_usb_device *d = purb->context;
+ struct dib0700_rc_response *poll_reply;
+ enum rc_type protocol;
+- u32 keycode;
++ u32 uninitialized_var(keycode);
+ u8 toggle;
+
+ deb_info("%s()\n", __func__);
+@@ -719,8 +719,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ poll_reply->nec.data == 0x00 &&
+ poll_reply->nec.not_data == 0xff) {
+ poll_reply->data_state = 2;
+- rc_repeat(d->rc_dev);
+- goto resubmit;
++ break;
+ }
+
+ if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
+diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
+index 75b9d4a..e9e6ea3 100644
+--- a/drivers/misc/mei/bus-fixup.c
++++ b/drivers/misc/mei/bus-fixup.c
+@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+
+ ret = 0;
+ bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+- if (bytes_recv < if_version_length) {
++ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ dev_err(bus->dev, "Could not read IF version\n");
+ ret = -EIO;
+ goto err;
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 6ef1e3c..c57eb32 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -26,8 +26,6 @@
+ #include "mmc_ops.h"
+ #include "sd_ops.h"
+
+-#define DEFAULT_CMD6_TIMEOUT_MS 500
+-
+ static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+@@ -573,7 +571,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ card->erased_byte = 0x0;
+
+ /* eMMC v4.5 or later */
+- card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 44ecebd..d839147 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, mmc);
+
+- spin_lock_init(&host->lock);
+-
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ dev_name(&pdev->dev), host);
+ if (ret)
+ goto out_free_dma;
+
++ spin_lock_init(&host->lock);
++
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto out_free_dma;
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 90ed2e1..8ef44a2a 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -647,7 +647,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
+ msm_host->pwr_irq);
+- ret = msm_host->pwr_irq;
+ goto clk_disable;
+ }
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 6eb8f07..a8a022a 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2269,8 +2269,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+- if (mrq)
++ if (mrq) {
++ host->mrqs_done[i] = NULL;
+ break;
++ }
+ }
+
+ if (!mrq) {
+@@ -2301,17 +2303,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ * upon error conditions.
+ */
+ if (sdhci_needs_reset(host, mrq)) {
+- /*
+- * Do not finish until command and data lines are available for
+- * reset. Note there can only be one other mrq, so it cannot
+- * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+- * would both be null.
+- */
+- if (host->cmd || host->data_cmd) {
+- spin_unlock_irqrestore(&host->lock, flags);
+- return true;
+- }
+-
+ /* Some controllers need this kick or reset won't work here */
+ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
+ /* This is to force an update */
+@@ -2319,8 +2310,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
+
+ /* Spec says we should do both at the same time, but Ricoh
+ controllers do not like that. */
+- sdhci_do_reset(host, SDHCI_RESET_CMD);
+- sdhci_do_reset(host, SDHCI_RESET_DATA);
++ if (!host->cmd)
++ sdhci_do_reset(host, SDHCI_RESET_CMD);
++ if (!host->data_cmd)
++ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
+ host->pending_reset = false;
+ }
+@@ -2328,8 +2321,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
+ if (!sdhci_has_requests(host))
+ sdhci_led_deactivate(host);
+
+- host->mrqs_done[i] = NULL;
+-
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+@@ -2509,6 +2500,9 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ if (!host->data) {
+ struct mmc_command *data_cmd = host->data_cmd;
+
++ if (data_cmd)
++ host->data_cmd = NULL;
++
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+@@ -2516,13 +2510,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
+ */
+ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+- host->data_cmd = NULL;
+ data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, data_cmd->mrq);
+ return;
+ }
+ if (intmask & SDHCI_INT_DATA_END) {
+- host->data_cmd = NULL;
+ /*
+ * Some cards handle busy-end interrupt
+ * before the command completed, so make
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 6b46a37..c74d164 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -9001,7 +9001,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+- 0, 0, nlflags, filter_mask, NULL);
++ nlflags, 0, 0, filter_mask, NULL);
+ }
+
+ /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
+diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
+index 6f9563a..83deda4 100644
+--- a/drivers/nfc/mei_phy.c
++++ b/drivers/nfc/mei_phy.c
+@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
+ return -ENOMEM;
+
+ bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
+- if (bytes_recv < 0 || bytes_recv < if_version_length) {
++ if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ pr_err("Could not read IF version\n");
+ r = -EIO;
+ goto err;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index da134a0..60f7eab 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1531,9 +1531,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
+ return 0;
+ }
+
+-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
++static void nvme_disable_io_queues(struct nvme_dev *dev)
+ {
+- int pass;
++ int pass, queues = dev->online_queues - 1;
+ unsigned long timeout;
+ u8 opcode = nvme_admin_delete_sq;
+
+@@ -1678,7 +1678,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
+
+ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ {
+- int i, queues;
++ int i;
+ u32 csts = -1;
+
+ del_timer_sync(&dev->watchdog_timer);
+@@ -1689,7 +1689,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ csts = readl(dev->bar + NVME_REG_CSTS);
+ }
+
+- queues = dev->online_queues - 1;
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev->queues[i]);
+
+@@ -1701,7 +1700,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
+ } else {
+- nvme_disable_io_queues(dev, queues);
++ nvme_disable_io_queues(dev);
+ nvme_disable_admin_queue(dev, shutdown);
+ }
+ nvme_pci_disable(dev);
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 9526e34..66c4d8f 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -121,14 +121,6 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
+ return -EINVAL;
+ }
+
+- /*
+- * If we have a shadow copy in RAM, the PCI device doesn't respond
+- * to the shadow range, so we don't need to claim it, and upstream
+- * bridges don't need to route the range to the device.
+- */
+- if (res->flags & IORESOURCE_ROM_SHADOW)
+- return 0;
+-
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+index 5d1e505c3..7f77007 100644
+--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
++++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
+
+ static int __init iproc_gpio_init(void)
+ {
+- return platform_driver_register(&iproc_gpio_driver);
++ return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe);
+ }
+ arch_initcall_sync(iproc_gpio_init);
+diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+index c8deb8b..35783db 100644
+--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
++++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
+
+ static int __init nsp_gpio_init(void)
+ {
+- return platform_driver_register(&nsp_gpio_driver);
++ return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe);
+ }
+ arch_initcall_sync(nsp_gpio_init);
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index bc31504..0fe8fad 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1634,15 +1634,12 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+-static int chv_pinctrl_suspend_noirq(struct device *dev)
++static int chv_pinctrl_suspend(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+- unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&chv_lock, flags);
+-
+ pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
+
+ for (i = 0; i < pctrl->community->npins; i++) {
+@@ -1663,20 +1660,15 @@ static int chv_pinctrl_suspend_noirq(struct device *dev)
+ ctx->padctrl1 = readl(reg);
+ }
+
+- raw_spin_unlock_irqrestore(&chv_lock, flags);
+-
+ return 0;
+ }
+
+-static int chv_pinctrl_resume_noirq(struct device *dev)
++static int chv_pinctrl_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+- unsigned long flags;
+ int i;
+
+- raw_spin_lock_irqsave(&chv_lock, flags);
+-
+ /*
+ * Mask all interrupts before restoring per-pin configuration
+ * registers because we don't know in which state BIOS left them
+@@ -1721,15 +1713,12 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
+ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
+ chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+
+- raw_spin_unlock_irqrestore(&chv_lock, flags);
+-
+ return 0;
+ }
+ #endif
+
+ static const struct dev_pm_ops chv_pinctrl_pm_ops = {
+- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
+- chv_pinctrl_resume_noirq)
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+ };
+
+ static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
+diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
+index 2df07ee..feac457 100644
+--- a/drivers/platform/x86/toshiba-wmi.c
++++ b/drivers/platform/x86/toshiba-wmi.c
+@@ -24,15 +24,14 @@
+ #include <linux/acpi.h>
+ #include <linux/input.h>
+ #include <linux/input/sparse-keymap.h>
+-#include <linux/dmi.h>
+
+ MODULE_AUTHOR("Azael Avalos");
+ MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
+ MODULE_LICENSE("GPL");
+
+-#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
++#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+
+-MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
++MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID);
+
+ static struct input_dev *toshiba_wmi_input_dev;
+
+@@ -64,16 +63,6 @@ static void toshiba_wmi_notify(u32 value, void *context)
+ kfree(response.pointer);
+ }
+
+-static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
+- {
+- .ident = "Toshiba laptop",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+- },
+- },
+- {}
+-};
+-
+ static int __init toshiba_wmi_input_setup(void)
+ {
+ acpi_status status;
+@@ -92,7 +81,7 @@ static int __init toshiba_wmi_input_setup(void)
+ if (err)
+ goto err_free_dev;
+
+- status = wmi_install_notify_handler(WMI_EVENT_GUID,
++ status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID,
+ toshiba_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ err = -EIO;
+@@ -106,7 +95,7 @@ static int __init toshiba_wmi_input_setup(void)
+ return 0;
+
+ err_remove_notifier:
+- wmi_remove_notify_handler(WMI_EVENT_GUID);
++ wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ err_free_keymap:
+ sparse_keymap_free(toshiba_wmi_input_dev);
+ err_free_dev:
+@@ -116,7 +105,7 @@ static int __init toshiba_wmi_input_setup(void)
+
+ static void toshiba_wmi_input_destroy(void)
+ {
+- wmi_remove_notify_handler(WMI_EVENT_GUID);
++ wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ sparse_keymap_free(toshiba_wmi_input_dev);
+ input_unregister_device(toshiba_wmi_input_dev);
+ }
+@@ -125,8 +114,7 @@ static int __init toshiba_wmi_init(void)
+ {
+ int ret;
+
+- if (!wmi_has_guid(WMI_EVENT_GUID) ||
+- !dmi_check_system(toshiba_wmi_dmi_table))
++ if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ return -ENODEV;
+
+ ret = toshiba_wmi_input_setup();
+@@ -142,7 +130,7 @@ static int __init toshiba_wmi_init(void)
+
+ static void __exit toshiba_wmi_exit(void)
+ {
+- if (wmi_has_guid(WMI_EVENT_GUID))
++ if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ toshiba_wmi_input_destroy();
+ }
+
+diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
+index 8895f77..b4478cc 100644
+--- a/drivers/rtc/rtc-pcf2123.c
++++ b/drivers/rtc/rtc-pcf2123.c
+@@ -182,8 +182,7 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
+ }
+
+ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
+- const char *buffer, size_t count)
+-{
++ const char *buffer, size_t count) {
+ struct pcf2123_sysfs_reg *r;
+ unsigned long reg;
+ unsigned long val;
+@@ -200,7 +199,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
+ if (ret)
+ return ret;
+
+- ret = pcf2123_write_reg(dev, reg, val);
++ pcf2123_write_reg(dev, reg, val);
+ if (ret < 0)
+ return -EIO;
+ return count;
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 920c421..752b5c9 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -792,7 +792,6 @@ static void alua_rtpg_work(struct work_struct *work)
+ WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
+ WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
+ spin_unlock_irqrestore(&pg->lock, flags);
+- kref_put(&pg->kref, release_port_group);
+ return;
+ }
+ if (pg->flags & ALUA_SYNC_STPG)
+@@ -890,7 +889,6 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
+ /* Do not queue if the worker is already running */
+ if (!(pg->flags & ALUA_PG_RUNNING)) {
+ kref_get(&pg->kref);
+- sdev = NULL;
+ start_queue = 1;
+ }
+ }
+@@ -902,8 +900,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
+ if (start_queue &&
+ !queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
+- if (sdev)
+- scsi_device_put(sdev);
++ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+ }
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 46c0f5e..4cb7990 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
++ sas_target_priv_data->raid_device = raid_device;
+ if (ioc->is_warpdrive)
+- sas_target_priv_data->raid_device = raid_device;
+- raid_device->starget = starget;
++ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index e46e2c5..2674f4c 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -2341,8 +2341,6 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+- if (test_bit(UNLOADING, &vha->dpc_flags))
+- return 1;
+ if (!vha->host)
+ return 1;
+ if (time > vha->hw->loop_reset_delay * HZ)
+diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
+index 5ab49a7..7043eb0 100644
+--- a/drivers/staging/comedi/drivers/ni_tio.c
++++ b/drivers/staging/comedi/drivers/ni_tio.c
+@@ -207,8 +207,7 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
+ * clock period is specified by user with prescaling
+ * already taken into account.
+ */
+- *period_ps = counter->clock_period_ps;
+- return 0;
++ return counter->clock_period_ps;
+ }
+
+ switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
+index 98d9473..24c348d 100644
+--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
+@@ -655,7 +655,6 @@ static void ad5933_work(struct work_struct *work)
+ __be16 buf[2];
+ int val[2];
+ unsigned char status;
+- int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (st->state == AD5933_CTRL_INIT_START_FREQ) {
+@@ -663,22 +662,19 @@ static void ad5933_work(struct work_struct *work)
+ ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
+ st->state = AD5933_CTRL_START_SWEEP;
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+- goto out;
++ mutex_unlock(&indio_dev->mlock);
++ return;
+ }
+
+- ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+- if (ret)
+- goto out;
++ ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+
+ if (status & AD5933_STAT_DATA_VALID) {
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+- ret = ad5933_i2c_read(st->client,
++ ad5933_i2c_read(st->client,
+ test_bit(1, indio_dev->active_scan_mask) ?
+ AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
+ scan_count * 2, (u8 *)buf);
+- if (ret)
+- goto out;
+
+ if (scan_count == 2) {
+ val[0] = be16_to_cpu(buf[0]);
+@@ -690,7 +686,8 @@ static void ad5933_work(struct work_struct *work)
+ } else {
+ /* no data available - try again later */
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+- goto out;
++ mutex_unlock(&indio_dev->mlock);
++ return;
+ }
+
+ if (status & AD5933_STAT_SWEEP_DONE) {
+@@ -703,7 +700,7 @@ static void ad5933_work(struct work_struct *work)
+ ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
+ schedule_delayed_work(&st->work, st->poll_time_jiffies);
+ }
+-out:
++
+ mutex_unlock(&indio_dev->mlock);
+ }
+
+diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
+index 499952c..a324322 100644
+--- a/drivers/staging/nvec/nvec_ps2.c
++++ b/drivers/staging/nvec/nvec_ps2.c
+@@ -106,12 +106,13 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+ {
+ struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
+ struct serio *ser_dev;
++ char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
+
+- ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
++ ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ if (!ser_dev)
+ return -ENOMEM;
+
+- ser_dev->id.type = SERIO_8042;
++ ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->write = ps2_sendcommand;
+ ser_dev->start = ps2_startstreaming;
+ ser_dev->stop = ps2_stopstreaming;
+@@ -126,6 +127,9 @@ static int nvec_mouse_probe(struct platform_device *pdev)
+
+ serio_register_port(ser_dev);
+
++ /* mouse reset */
++ nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
++
+ return 0;
+ }
+
+diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
+index 4ed6d8d..9552479 100644
+--- a/drivers/staging/sm750fb/ddk750_reg.h
++++ b/drivers/staging/sm750fb/ddk750_reg.h
+@@ -601,13 +601,13 @@
+
+ #define PANEL_PLANE_TL 0x08001C
+ #define PANEL_PLANE_TL_TOP_SHIFT 16
+-#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
+-#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
++#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
++#define PANEL_PLANE_TL_LEFT_MASK 0xeff
+
+ #define PANEL_PLANE_BR 0x080020
+ #define PANEL_PLANE_BR_BOTTOM_SHIFT 16
+-#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+-#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
++#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
++#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
+
+ #define PANEL_HORIZONTAL_TOTAL 0x080024
+ #define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 21aeac5..8bbde52 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2026,7 +2026,6 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
+ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+ {
+- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned long flags;
+ unsigned int old_mode, mode, imr, quot, baud;
+
+@@ -2130,29 +2129,11 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ mode |= ATMEL_US_USMODE_RS485;
+ } else if (termios->c_cflag & CRTSCTS) {
+ /* RS232 with hardware handshake (RTS/CTS) */
+- if (atmel_use_fifo(port) &&
+- !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+- /*
+- * with ATMEL_US_USMODE_HWHS set, the controller will
+- * be able to drive the RTS pin high/low when the RX
+- * FIFO is above RXFTHRES/below RXFTHRES2.
+- * It will also disable the transmitter when the CTS
+- * pin is high.
+- * This mode is not activated if CTS pin is a GPIO
+- * because in this case, the transmitter is always
+- * disabled (there must be an internal pull-up
+- * responsible for this behaviour).
+- * If the RTS pin is a GPIO, the controller won't be
+- * able to drive it according to the FIFO thresholds,
+- * but it will be handled by the driver.
+- */
+- mode |= ATMEL_US_USMODE_HWHS;
++ if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
++ dev_info(port->dev, "not enabling hardware flow control because DMA is used");
++ termios->c_cflag &= ~CRTSCTS;
+ } else {
+- /*
+- * For platforms without FIFO, the flow control is
+- * handled by the driver.
+- */
+- mode |= ATMEL_US_USMODE_NORMAL;
++ mode |= ATMEL_US_USMODE_HWHS;
+ }
+ } else {
+ /* RS232 without hadware handshake */
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 3ca9fdb..0f3f62e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -946,6 +946,8 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
+ DECLARE_WAITQUEUE(wait, current);
+ struct async_icount old, new;
+
++ if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
++ return -EINVAL;
+ do {
+ spin_lock_irq(&acm->read_lock);
+ old = acm->oldcount;
+@@ -1173,8 +1175,6 @@ static int acm_probe(struct usb_interface *intf,
+ if (quirks == IGNORE_DEVICE)
+ return -ENODEV;
+
+- memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
+-
+ num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
+
+ /* handle quirks deadly to normal probing*/
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2d47010..35d0924 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -669,14 +669,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ return 0;
+
+ err4:
+- phy_power_off(dwc->usb3_generic_phy);
++ phy_power_off(dwc->usb2_generic_phy);
+
+ err3:
+- phy_power_off(dwc->usb2_generic_phy);
++ phy_power_off(dwc->usb3_generic_phy);
+
+ err2:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
++ dwc3_core_exit(dwc);
+
+ err1:
+ usb_phy_shutdown(dwc->usb2_phy);
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index f590ada..9b9e71f 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -585,6 +585,14 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+
+ req->length = length;
+
++ /* throttle high/super speed IRQ rate back slightly */
++ if (gadget_is_dualspeed(dev->gadget))
++ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
++ dev->gadget->speed == USB_SPEED_SUPER)) &&
++ !list_empty(&dev->tx_reqs))
++ ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
++ : 0;
++
+ retval = usb_ep_queue(in, req, GFP_ATOMIC);
+ switch (retval) {
+ default:
+diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
+index 74265b2..6abb83c 100644
+--- a/drivers/watchdog/watchdog_core.c
++++ b/drivers/watchdog/watchdog_core.c
+@@ -349,7 +349,7 @@ int devm_watchdog_register_device(struct device *dev,
+ struct watchdog_device **rcwdd;
+ int ret;
+
+- rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
++ rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+ GFP_KERNEL);
+ if (!rcwdd)
+ return -ENOMEM;
+diff --git a/fs/coredump.c b/fs/coredump.c
+index eb9c92c..281b768 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -1,7 +1,6 @@
+ #include <linux/slab.h>
+ #include <linux/file.h>
+ #include <linux/fdtable.h>
+-#include <linux/freezer.h>
+ #include <linux/mm.h>
+ #include <linux/stat.h>
+ #include <linux/fcntl.h>
+@@ -424,9 +423,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
+ if (core_waiters > 0) {
+ struct core_thread *ptr;
+
+- freezer_do_not_count();
+ wait_for_completion(&core_state->startup);
+- freezer_count();
+ /*
+ * Wait for all the threads to become inactive, so that
+ * all the thread context (extended register state, like
+diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
+index 150c5a1..b629730 100644
+--- a/fs/nfs/nfs4session.c
++++ b/fs/nfs/nfs4session.c
+@@ -178,14 +178,12 @@ static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
+ __must_hold(&tbl->slot_tbl_lock)
+ {
+ struct nfs4_slot *slot;
+- int ret;
+
+ slot = nfs4_lookup_slot(tbl, slotid);
+- ret = PTR_ERR_OR_ZERO(slot);
+- if (!ret)
+- *seq_nr = slot->seq_nr;
+-
+- return ret;
++ if (IS_ERR(slot))
++ return PTR_ERR(slot);
++ *seq_nr = slot->seq_nr;
++ return 0;
+ }
+
+ /*
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 67d1d3e..c5eaf2f 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -318,7 +318,6 @@ struct pci_dev;
+ int acpi_pci_irq_enable (struct pci_dev *dev);
+ void acpi_penalize_isa_irq(int irq, int active);
+ bool acpi_isa_irq_available(int irq);
+-void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+ void acpi_pci_irq_disable (struct pci_dev *dev);
+
+ extern int ec_read(u8 addr, u8 *val);
+diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
+index 1d18af0..c46d2aa 100644
+--- a/include/linux/frontswap.h
++++ b/include/linux/frontswap.h
+@@ -106,9 +106,8 @@ static inline void frontswap_invalidate_area(unsigned type)
+
+ static inline void frontswap_init(unsigned type, unsigned long *map)
+ {
+-#ifdef CONFIG_FRONTSWAP
+- __frontswap_init(type, map);
+-#endif
++ if (frontswap_enabled())
++ __frontswap_init(type, map);
+ }
+
+ #endif /* _LINUX_FRONTSWAP_H */
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index 3584bc8..d6917b8 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -86,7 +86,6 @@ struct svc_rdma_op_ctxt {
+ unsigned long flags;
+ enum dma_data_direction direction;
+ int count;
+- unsigned int mapped_sges;
+ struct ib_sge sge[RPCSVC_MAXPAGES];
+ struct page *pages[RPCSVC_MAXPAGES];
+ };
+@@ -194,14 +193,6 @@ struct svcxprt_rdma {
+
+ #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+
+-/* Track DMA maps for this transport and context */
+-static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
+- struct svc_rdma_op_ctxt *ctxt)
+-{
+- ctxt->mapped_sges++;
+- atomic_inc(&rdma->sc_dma_used);
+-}
+-
+ /* svc_rdma_backchannel.c */
+ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
+ struct rpcrdma_msg *rmsgp,
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index 144fe6b..0a11396 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+ struct gen_pool_chunk *chunk;
+ unsigned long addr = 0;
+ int order = pool->min_alloc_order;
+- int nbits, start_bit, end_bit, remain;
++ int nbits, start_bit = 0, end_bit, remain;
+
+ #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+@@ -307,7 +307,6 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+ if (size > atomic_read(&chunk->avail))
+ continue;
+
+- start_bit = 0;
+ end_bit = chunk_size(chunk) >> order;
+ retry:
+ start_bit = algo(chunk->bits, end_bit, start_bit,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0ddce6a..770d83e 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1826,17 +1826,11 @@ static void return_unused_surplus_pages(struct hstate *h,
+ * is not the case is if a reserve map was changed between calls. It
+ * is the responsibility of the caller to notice the difference and
+ * take appropriate action.
+- *
+- * vma_add_reservation is used in error paths where a reservation must
+- * be restored when a newly allocated huge page must be freed. It is
+- * to be called after calling vma_needs_reservation to determine if a
+- * reservation exists.
+ */
+ enum vma_resv_mode {
+ VMA_NEEDS_RESV,
+ VMA_COMMIT_RESV,
+ VMA_END_RESV,
+- VMA_ADD_RESV,
+ };
+ static long __vma_reservation_common(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr,
+@@ -1862,14 +1856,6 @@ static long __vma_reservation_common(struct hstate *h,
+ region_abort(resv, idx, idx + 1);
+ ret = 0;
+ break;
+- case VMA_ADD_RESV:
+- if (vma->vm_flags & VM_MAYSHARE)
+- ret = region_add(resv, idx, idx + 1);
+- else {
+- region_abort(resv, idx, idx + 1);
+- ret = region_del(resv, idx, idx + 1);
+- }
+- break;
+ default:
+ BUG();
+ }
+@@ -1917,56 +1903,6 @@ static void vma_end_reservation(struct hstate *h,
+ (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
+ }
+
+-static long vma_add_reservation(struct hstate *h,
+- struct vm_area_struct *vma, unsigned long addr)
+-{
+- return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
+-}
+-
+-/*
+- * This routine is called to restore a reservation on error paths. In the
+- * specific error paths, a huge page was allocated (via alloc_huge_page)
+- * and is about to be freed. If a reservation for the page existed,
+- * alloc_huge_page would have consumed the reservation and set PagePrivate
+- * in the newly allocated page. When the page is freed via free_huge_page,
+- * the global reservation count will be incremented if PagePrivate is set.
+- * However, free_huge_page can not adjust the reserve map. Adjust the
+- * reserve map here to be consistent with global reserve count adjustments
+- * to be made by free_huge_page.
+- */
+-static void restore_reserve_on_error(struct hstate *h,
+- struct vm_area_struct *vma, unsigned long address,
+- struct page *page)
+-{
+- if (unlikely(PagePrivate(page))) {
+- long rc = vma_needs_reservation(h, vma, address);
+-
+- if (unlikely(rc < 0)) {
+- /*
+- * Rare out of memory condition in reserve map
+- * manipulation. Clear PagePrivate so that
+- * global reserve count will not be incremented
+- * by free_huge_page. This will make it appear
+- * as though the reservation for this page was
+- * consumed. This may prevent the task from
+- * faulting in the page at a later time. This
+- * is better than inconsistent global huge page
+- * accounting of reserve counts.
+- */
+- ClearPagePrivate(page);
+- } else if (rc) {
+- rc = vma_add_reservation(h, vma, address);
+- if (unlikely(rc < 0))
+- /*
+- * See above comment about rare out of
+- * memory condition.
+- */
+- ClearPagePrivate(page);
+- } else
+- vma_end_reservation(h, vma, address);
+- }
+-}
+-
+ struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
+@@ -3562,7 +3498,6 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ out_release_all:
+- restore_reserve_on_error(h, vma, address, new_page);
+ put_page(new_page);
+ out_release_old:
+ put_page(old_page);
+@@ -3745,7 +3680,6 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ spin_unlock(ptl);
+ backout_unlocked:
+ unlock_page(page);
+- restore_reserve_on_error(h, vma, address, page);
+ put_page(page);
+ goto out;
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 19e796d..de88f33 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1112,10 +1112,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ }
+
+ if (!PageHuge(p) && PageTransHuge(hpage)) {
+- lock_page(p);
+- if (!PageAnon(p) || unlikely(split_huge_page(p))) {
+- unlock_page(p);
+- if (!PageAnon(p))
++ lock_page(hpage);
++ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
++ unlock_page(hpage);
++ if (!PageAnon(hpage))
+ pr_err("Memory failure: %#lx: non anonymous thp\n",
+ pfn);
+ else
+@@ -1126,7 +1126,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ put_hwpoison_page(p);
+ return -EBUSY;
+ }
+- unlock_page(p);
++ unlock_page(hpage);
++ get_hwpoison_page(p);
++ put_hwpoison_page(hpage);
+ VM_BUG_ON_PAGE(!page_count(p), p);
+ hpage = compound_head(p);
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 38aa5e0..971fc83 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1483,8 +1483,6 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ copy_highpage(newpage, oldpage);
+ flush_dcache_page(newpage);
+
+- __SetPageLocked(newpage);
+- __SetPageSwapBacked(newpage);
+ SetPageUptodate(newpage);
+ set_page_private(newpage, swap_index);
+ SetPageSwapCache(newpage);
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 329b038..71f0b28 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -533,8 +533,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
+
+ s = create_cache(cache_name, root_cache->object_size,
+ root_cache->size, root_cache->align,
+- root_cache->flags & CACHE_CREATE_MASK,
+- root_cache->ctor, memcg, root_cache);
++ root_cache->flags, root_cache->ctor,
++ memcg, root_cache);
+ /*
+ * If we could not create a memcg cache, do not complain, because
+ * that's not critical at all as we can always proceed with the root
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index bf262e4..2657acc 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2218,8 +2218,6 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
+ swab32s(&swap_header->info.version);
+ swab32s(&swap_header->info.last_page);
+ swab32s(&swap_header->info.nr_badpages);
+- if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+- return 0;
+ for (i = 0; i < swap_header->info.nr_badpages; i++)
+ swab32s(&swap_header->info.badpages[i]);
+ }
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 3e9667e..3940b5d 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -537,7 +537,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
+ if (bat_priv->algo_ops->neigh.hardif_init)
+ bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
+
+- hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
++ hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+
+ out:
+ spin_unlock_bh(&hard_iface->neigh_list_lock);
+diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
+index dcbe67f..7d54e94 100644
+--- a/net/ceph/ceph_fs.c
++++ b/net/ceph/ceph_fs.c
+@@ -34,8 +34,7 @@ void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+ fl->object_size = le32_to_cpu(legacy->fl_object_size);
+ fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+- if (fl->pool_id == 0 && fl->stripe_unit == 0 &&
+- fl->stripe_count == 0 && fl->object_size == 0)
++ if (fl->pool_id == 0)
+ fl->pool_id = -1;
+ }
+ EXPORT_SYMBOL(ceph_file_layout_from_legacy);
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 1df2c8d..aa5847a 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -420,7 +420,7 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
+ char buf[NFLOGGER_NAME_LEN];
+ int r = 0;
+ int tindex = (unsigned long)table->extra1;
+- struct net *net = table->extra2;
++ struct net *net = current->nsproxy->net_ns;
+
+ if (write) {
+ struct ctl_table tmp = *table;
+@@ -474,6 +474,7 @@ static int netfilter_log_sysctl_init(struct net *net)
+ 3, "%d", i);
+ nf_log_sysctl_table[i].procname =
+ nf_log_sysctl_fnames[i];
++ nf_log_sysctl_table[i].data = NULL;
+ nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN;
+ nf_log_sysctl_table[i].mode = 0644;
+ nf_log_sysctl_table[i].proc_handler =
+@@ -483,9 +484,6 @@ static int netfilter_log_sysctl_init(struct net *net)
+ }
+ }
+
+- for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
+- table[i].extra2 = net;
+-
+ net->nf.nf_log_dir_header = register_net_sysctl(net,
+ "net/netfilter/nf_log",
+ table);
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 2761377..892b5e1 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -44,20 +44,18 @@
+ * being done.
+ *
+ * When the underlying transport disconnects, MRs are left in one of
+- * four states:
++ * three states:
+ *
+ * INVALID: The MR was not in use before the QP entered ERROR state.
++ * (Or, the LOCAL_INV WR has not completed or flushed yet).
+ *
+- * VALID: The MR was registered before the QP entered ERROR state.
+- *
+- * FLUSHED_FR: The MR was being registered when the QP entered ERROR
+- * state, and the pending WR was flushed.
++ * STALE: The MR was being registered or unregistered when the QP
++ * entered ERROR state, and the pending WR was flushed.
+ *
+- * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
+- * state, and the pending WR was flushed.
++ * VALID: The MR was registered before the QP entered ERROR state.
+ *
+- * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
+- * with ib_dereg_mr and then are re-initialized. Because MR recovery
++ * When frwr_op_map encounters STALE and VALID MRs, they are recovered
++ * with ib_dereg_mr and then are re-initialized. Beause MR recovery
+ * allocates fresh resources, it is deferred to a workqueue, and the
+ * recovered MRs are placed back on the rb_mws list when recovery is
+ * complete. frwr_op_map allocates another MR for the current RPC while
+@@ -177,15 +175,12 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+ static void
+ frwr_op_recover_mr(struct rpcrdma_mw *mw)
+ {
+- enum rpcrdma_frmr_state state = mw->frmr.fr_state;
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ int rc;
+
+ rc = __frwr_reset_mr(ia, mw);
+- if (state != FRMR_FLUSHED_LI)
+- ib_dma_unmap_sg(ia->ri_device,
+- mw->mw_sg, mw->mw_nents, mw->mw_dir);
++ ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (rc)
+ goto out_release;
+
+@@ -266,8 +261,10 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
+ }
+
+ static void
+-__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
++__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
++ const char *wr)
+ {
++ frmr->fr_state = FRMR_IS_STALE;
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
+ wr, ib_wc_status_msg(wc->status),
+@@ -290,8 +287,7 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->status != IB_WC_SUCCESS) {
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- frmr->fr_state = FRMR_FLUSHED_FR;
+- __frwr_sendcompletion_flush(wc, "fastreg");
++ __frwr_sendcompletion_flush(wc, frmr, "fastreg");
+ }
+ }
+
+@@ -311,8 +307,7 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->status != IB_WC_SUCCESS) {
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- frmr->fr_state = FRMR_FLUSHED_LI;
+- __frwr_sendcompletion_flush(wc, "localinv");
++ __frwr_sendcompletion_flush(wc, frmr, "localinv");
+ }
+ }
+
+@@ -332,11 +327,9 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
+ /* WARNING: Only wr_cqe and status are reliable at this point */
+ cqe = wc->wr_cqe;
+ frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
+- if (wc->status != IB_WC_SUCCESS) {
+- frmr->fr_state = FRMR_FLUSHED_LI;
+- __frwr_sendcompletion_flush(wc, "localinv");
+- }
+- complete(&frmr->fr_linv_done);
++ if (wc->status != IB_WC_SUCCESS)
++ __frwr_sendcompletion_flush(wc, frmr, "localinv");
++ complete_all(&frmr->fr_linv_done);
+ }
+
+ /* Post a REG_MR Work Request to register a memory region
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index cd0c558..a2a7519 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -129,7 +129,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
+ ret = -EIO;
+ goto out_unmap;
+ }
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+
+ memset(&send_wr, 0, sizeof(send_wr));
+ ctxt->cqe.done = svc_rdma_wc_send;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index ad1df97..2c25606 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -159,7 +159,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
+ ctxt->sge[pno].addr);
+ if (ret)
+ goto err;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+
+ ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
+ ctxt->sge[pno].length = len;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 3b95b19..54d53330 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -280,7 +280,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device,
+ sge[sge_no].addr))
+ goto err;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+ sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
+ ctxt->count++;
+ sge_off = 0;
+@@ -489,7 +489,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ ctxt->sge[0].length, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
+ goto err;
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+
+ ctxt->direction = DMA_TO_DEVICE;
+
+@@ -505,7 +505,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ if (ib_dma_mapping_error(rdma->sc_cm_id->device,
+ ctxt->sge[sge_no].addr))
+ goto err;
+- svc_rdma_count_mappings(rdma, ctxt);
++ atomic_inc(&rdma->sc_dma_used);
+ ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
+ ctxt->sge[sge_no].length = sge_bytes;
+ }
+@@ -523,9 +523,23 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
+ ctxt->count++;
+ rqstp->rq_respages[page_no] = NULL;
++ /*
++ * If there are more pages than SGE, terminate SGE
++ * list so that svc_rdma_unmap_dma doesn't attempt to
++ * unmap garbage.
++ */
++ if (page_no+1 >= sge_no)
++ ctxt->sge[page_no+1].length = 0;
+ }
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
++ /* The loop above bumps sc_dma_used for each sge. The
++ * xdr_buf.tail gets a separate sge, but resides in the
++ * same page as xdr_buf.head. Don't count it twice.
++ */
++ if (sge_no > ctxt->count)
++ atomic_dec(&rdma->sc_dma_used);
++
+ if (sge_no > rdma->sc_max_sge) {
+ pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ goto err;
+@@ -621,7 +635,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
+ inline_bytes);
+ if (ret < 0)
+- goto err0;
++ goto err1;
+
+ svc_rdma_put_req_map(rdma, vec);
+ dprintk("svcrdma: send_reply returns %d\n", ret);
+@@ -678,7 +692,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+ svc_rdma_put_context(ctxt, 1);
+ return;
+ }
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+
+ /* Prepare SEND WR */
+ memset(&err_wr, 0, sizeof(err_wr));
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 924271c..dd94401 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -198,7 +198,6 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+
+ out:
+ ctxt->count = 0;
+- ctxt->mapped_sges = 0;
+ ctxt->frmr = NULL;
+ return ctxt;
+
+@@ -222,27 +221,22 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
+ {
+ struct svcxprt_rdma *xprt = ctxt->xprt;
+- struct ib_device *device = xprt->sc_cm_id->device;
+- u32 lkey = xprt->sc_pd->local_dma_lkey;
+- unsigned int i, count;
+-
+- for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
++ int i;
++ for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
+ /*
+ * Unmap the DMA addr in the SGE if the lkey matches
+ * the local_dma_lkey, otherwise, ignore it since it is
+ * an FRMR lkey and will be unmapped later when the
+ * last WR that uses it completes.
+ */
+- if (ctxt->sge[i].lkey == lkey) {
+- count++;
+- ib_dma_unmap_page(device,
++ if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) {
++ atomic_dec(&xprt->sc_dma_used);
++ ib_dma_unmap_page(xprt->sc_cm_id->device,
+ ctxt->sge[i].addr,
+ ctxt->sge[i].length,
+ ctxt->direction);
+ }
+ }
+- ctxt->mapped_sges = 0;
+- atomic_sub(count, &xprt->sc_dma_used);
+ }
+
+ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
+@@ -606,7 +600,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
+ goto err_put_ctxt;
+- svc_rdma_count_mappings(xprt, ctxt);
++ atomic_inc(&xprt->sc_dma_used);
+ ctxt->sge[sge_no].addr = pa;
+ ctxt->sge[sge_no].length = PAGE_SIZE;
+ ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index edc0344..a71b0f5 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -207,8 +207,7 @@ struct rpcrdma_rep {
+ enum rpcrdma_frmr_state {
+ FRMR_IS_INVALID, /* ready to be used */
+ FRMR_IS_VALID, /* in use */
+- FRMR_FLUSHED_FR, /* flushed FASTREG WR */
+- FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
++ FRMR_IS_STALE, /* failed completion */
+ };
+
+ struct rpcrdma_frmr {
+diff --git a/sound/core/info.c b/sound/core/info.c
+index 8ab72e0..895362a 100644
+--- a/sound/core/info.c
++++ b/sound/core/info.c
+@@ -325,15 +325,10 @@ static ssize_t snd_info_text_entry_write(struct file *file,
+ size_t next;
+ int err = 0;
+
+- if (!entry->c.text.write)
+- return -EIO;
+ pos = *offset;
+ if (!valid_pos(pos, count))
+ return -EIO;
+ next = pos + count;
+- /* don't handle too large text inputs */
+- if (next > 16 * 1024)
+- return -EIO;
+ mutex_lock(&entry->access);
+ buf = data->wbuffer;
+ if (!buf) {
+@@ -371,9 +366,7 @@ static int snd_info_seq_show(struct seq_file *seq, void *p)
+ struct snd_info_private_data *data = seq->private;
+ struct snd_info_entry *entry = data->entry;
+
+- if (!entry->c.text.read) {
+- return -EIO;
+- } else {
++ if (entry->c.text.read) {
+ data->rbuffer->buffer = (char *)seq; /* XXX hack! */
+ entry->c.text.read(entry, data->rbuffer);
+ }
+diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
+index 3670086b..e07807d 100644
+--- a/sound/soc/codecs/cs4270.c
++++ b/sound/soc/codecs/cs4270.c
+@@ -148,11 +148,11 @@ SND_SOC_DAPM_OUTPUT("AOUTR"),
+ };
+
+ static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
+- { "Capture", NULL, "AINL" },
+- { "Capture", NULL, "AINR" },
++ { "Capture", NULL, "AINA" },
++ { "Capture", NULL, "AINB" },
+
+- { "AOUTL", NULL, "Playback" },
+- { "AOUTR", NULL, "Playback" },
++ { "AOUTA", NULL, "Playback" },
++ { "AOUTB", NULL, "Playback" },
+ };
+
+ /**
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 7b7a380..e3e7641 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -785,7 +785,8 @@ static void skl_remove(struct pci_dev *pci)
+
+ release_firmware(skl->tplg);
+
+- pm_runtime_get_noresume(&pci->dev);
++ if (pci_dev_run_wake(pci))
++ pm_runtime_get_noresume(&pci->dev);
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(ebus);
+diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
+index 03c18db..44f170c 100644
+--- a/sound/soc/sunxi/sun4i-codec.c
++++ b/sound/soc/sunxi/sun4i-codec.c
+@@ -738,11 +738,11 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+- return ERR_PTR(-ENOMEM);
++ return NULL;
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+- return ERR_PTR(-ENOMEM);
++ return NULL;
+
+ card->dev = dev;
+ card->name = "sun4i-codec";
+@@ -842,8 +842,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
+ }
+
+ card = sun4i_codec_create_card(&pdev->dev);
+- if (IS_ERR(card)) {
+- ret = PTR_ERR(card);
++ if (!card) {
+ dev_err(&pdev->dev, "Failed to create our card\n");
+ goto err_unregister_codec;
+ }
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 4ad1eac..7aee954 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -595,8 +595,7 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
+ u64 nr_entries;
+ hbt->timer(hbt->arg);
+
+- if (hist_browser__has_filter(browser) ||
+- symbol_conf.report_hierarchy)
++ if (hist_browser__has_filter(browser))
+ hist_browser__update_nr_entries(browser);
+
+ nr_entries = hist_browser__nr_entries(browser);
+diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
+index 1eef0ae..b4bf769 100644
+--- a/tools/power/cpupower/utils/cpufreq-set.c
++++ b/tools/power/cpupower/utils/cpufreq-set.c
+@@ -296,7 +296,7 @@ int cmd_freq_set(int argc, char **argv)
+ struct cpufreq_affected_cpus *cpus;
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+- cpupower_is_cpu_online(cpu) != 1)
++ cpupower_is_cpu_online(cpu))
+ continue;
+
+ cpus = cpufreq_get_related_cpus(cpu);
+@@ -316,7 +316,10 @@ int cmd_freq_set(int argc, char **argv)
+ cpu <= bitmask_last(cpus_chosen); cpu++) {
+
+ if (!bitmask_isbitset(cpus_chosen, cpu) ||
+- cpupower_is_cpu_online(cpu) != 1)
++ cpupower_is_cpu_online(cpu))
++ continue;
++
++ if (cpupower_is_cpu_online(cpu) != 1)
+ continue;
+
+ printf(_("Setting cpu: %d\n"), cpu);
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index d1b080c..3bad3c5 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -453,33 +453,17 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
+ return container_of(dev, struct vgic_io_device, dev);
+ }
+
+-static bool check_region(const struct kvm *kvm,
+- const struct vgic_register_region *region,
++static bool check_region(const struct vgic_register_region *region,
+ gpa_t addr, int len)
+ {
+- int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+-
+- switch (len) {
+- case sizeof(u8):
+- flags = VGIC_ACCESS_8bit;
+- break;
+- case sizeof(u32):
+- flags = VGIC_ACCESS_32bit;
+- break;
+- case sizeof(u64):
+- flags = VGIC_ACCESS_64bit;
+- break;
+- default:
+- return false;
+- }
+-
+- if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
+- if (!region->bits_per_irq)
+- return true;
+-
+- /* Do we access a non-allocated IRQ? */
+- return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
+- }
++ if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
++ return true;
++ if ((region->access_flags & VGIC_ACCESS_32bit) &&
++ len == sizeof(u32) && !(addr & 3))
++ return true;
++ if ((region->access_flags & VGIC_ACCESS_64bit) &&
++ len == sizeof(u64) && !(addr & 7))
++ return true;
+
+ return false;
+ }
+@@ -493,7 +477,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+- if (!region || !check_region(vcpu->kvm, region, addr, len)) {
++ if (!region || !check_region(region, addr, len)) {
+ memset(val, 0, len);
+ return 0;
+ }
+@@ -526,7 +510,10 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+- if (!region || !check_region(vcpu->kvm, region, addr, len))
++ if (!region)
++ return 0;
++
++ if (!check_region(region, addr, len))
+ return 0;
+
+ switch (iodev->iodev_type) {
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
+index ba63d91..0b3ecf9 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.h
++++ b/virt/kvm/arm/vgic/vgic-mmio.h
+@@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
+ #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
+
+ /*
+- * (addr & mask) gives us the _byte_ offset for the INT ID.
+- * We multiply this by 8 the get the _bit_ offset, then divide this by
+- * the number of bits to learn the actual INT ID.
+- * But instead of a division (which requires a "long long div" implementation),
+- * we shift by the binary logarithm of <bits>.
+- * This assumes that <bits> is a power of two.
++ * (addr & mask) gives us the byte offset for the INT ID, so we want to
++ * divide this with 'bytes per irq' to get the INT ID, which is given
++ * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
++ * take advantage of the fact that division by a fraction equals
++ * multiplication with the inverted fraction, and scale up both the
++ * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ */
+ #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
+- 8 >> ilog2(bits))
++ 64 / (bits) / 8)
+
+ /*
+ * Some VGIC registers store per-IRQ information, with a different number