summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.8.15/1012_linux-4.8.13.patch')
-rw-r--r--4.8.15/1012_linux-4.8.13.patch1063
1 files changed, 1063 insertions, 0 deletions
diff --git a/4.8.15/1012_linux-4.8.13.patch b/4.8.15/1012_linux-4.8.13.patch
new file mode 100644
index 0000000..c742393
--- /dev/null
+++ b/4.8.15/1012_linux-4.8.13.patch
@@ -0,0 +1,1063 @@
+diff --git a/Makefile b/Makefile
+index 7b0c92f..b38abe9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 8
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+
+diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
+index 08e7e2a..a36e860 100644
+--- a/arch/arc/include/asm/delay.h
++++ b/arch/arc/include/asm/delay.h
+@@ -22,10 +22,11 @@
+ static inline void __delay(unsigned long loops)
+ {
+ __asm__ __volatile__(
+- " lp 1f \n"
+- " nop \n"
+- "1: \n"
+- : "+l"(loops));
++ " mov lp_count, %0 \n"
++ " lp 1f \n"
++ " nop \n"
++ "1: \n"
++ : : "r"(loops));
+ }
+
+ extern void __bad_udelay(void);
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 89eeb37..e94ca72 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+ #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
+
+ /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
+index 123a58b..f0b857d 100644
+--- a/arch/arm64/boot/dts/arm/juno-r1.dts
++++ b/arch/arm64/boot/dts/arm/juno-r1.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
+index 007be82..26aaa6a 100644
+--- a/arch/arm64/boot/dts/arm/juno-r2.dts
++++ b/arch/arm64/boot/dts/arm/juno-r2.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
+index a7270ef..6e154d9 100644
+--- a/arch/arm64/boot/dts/arm/juno.dts
++++ b/arch/arm64/boot/dts/arm/juno.dts
+@@ -76,7 +76,7 @@
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ local-timer-stop;
+- entry-latency-us = <300>;
++ entry-latency-us = <400>;
+ exit-latency-us = <1200>;
+ min-residency-us = <2500>;
+ };
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 7099f26..b96346b 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -90,7 +90,7 @@ struct arm64_cpu_capabilities {
+ u16 capability;
+ int def_scope; /* default scope */
+ bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
+- void (*enable)(void *); /* Called on all active CPUs */
++ int (*enable)(void *); /* Called on all active CPUs */
+ union {
+ struct { /* To be used for erratum handling only */
+ u32 midr_model;
+diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
+index db0563c..f7865dd 100644
+--- a/arch/arm64/include/asm/exec.h
++++ b/arch/arm64/include/asm/exec.h
+@@ -18,6 +18,9 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
++#include <linux/sched.h>
++
+ extern unsigned long arch_align_stack(unsigned long sp);
++void uao_thread_switch(struct task_struct *next);
+
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index ace0a96..3be0ab0 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -190,8 +190,8 @@ static inline void spin_lock_prefetch(const void *ptr)
+
+ #endif
+
+-void cpu_enable_pan(void *__unused);
+-void cpu_enable_uao(void *__unused);
+-void cpu_enable_cache_maint_trap(void *__unused);
++int cpu_enable_pan(void *__unused);
++int cpu_enable_uao(void *__unused);
++int cpu_enable_cache_maint_trap(void *__unused);
+
+ #endif /* __ASM_PROCESSOR_H */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 62272ea..94a0330 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -19,7 +19,9 @@
+ #define pr_fmt(fmt) "CPU features: " fmt
+
+ #include <linux/bsearch.h>
++#include <linux/cpumask.h>
+ #include <linux/sort.h>
++#include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+@@ -936,7 +938,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+ for (; caps->matches; caps++)
+ if (caps->enable && cpus_have_cap(caps->capability))
+- on_each_cpu(caps->enable, NULL, true);
++ /*
++ * Use stop_machine() as it schedules the work allowing
++ * us to modify PSTATE, instead of on_each_cpu() which
++ * uses an IPI, giving us a PSTATE that disappears when
++ * we return.
++ */
++ stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+
+ /*
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 6cd2612..9cc8667 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -49,6 +49,7 @@
+ #include <asm/alternative.h>
+ #include <asm/compat.h>
+ #include <asm/cacheflush.h>
++#include <asm/exec.h>
+ #include <asm/fpsimd.h>
+ #include <asm/mmu_context.h>
+ #include <asm/processor.h>
+@@ -303,7 +304,7 @@ static void tls_thread_switch(struct task_struct *next)
+ }
+
+ /* Restore the UAO state depending on next's addr_limit */
+-static void uao_thread_switch(struct task_struct *next)
++void uao_thread_switch(struct task_struct *next)
+ {
+ if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+ if (task_thread_info(next)->addr_limit == KERNEL_DS)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index b616e365..23ddf55 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,8 +1,11 @@
+ #include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
++#include <asm/alternative.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
++#include <asm/exec.h>
+ #include <asm/pgtable.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -48,6 +51,14 @@ void notrace __cpu_suspend_exit(void)
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
+ /*
++ * PSTATE was not saved over suspend/resume, re-enable any detected
++ * features that might not have been set correctly.
++ */
++ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
++ CONFIG_ARM64_PAN));
++ uao_thread_switch(current);
++
++ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 771a01a7f..9595d3d 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ }
+
+-void cpu_enable_cache_maint_trap(void *__unused)
++int cpu_enable_cache_maint_trap(void *__unused)
+ {
+ config_sctlr_el1(SCTLR_EL1_UCI, 0);
++ return 0;
+ }
+
+ #define __user_cache_maint(insn, address, res) \
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 05d2bd7..67506c3 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -29,7 +29,9 @@
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
+ #include <linux/perf_event.h>
++#include <linux/preempt.h>
+
++#include <asm/bug.h>
+ #include <asm/cpufeature.h>
+ #include <asm/exception.h>
+ #include <asm/debug-monitors.h>
+@@ -671,9 +673,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
+ NOKPROBE_SYMBOL(do_debug_exception);
+
+ #ifdef CONFIG_ARM64_PAN
+-void cpu_enable_pan(void *__unused)
++int cpu_enable_pan(void *__unused)
+ {
++ /*
++ * We modify PSTATE. This won't work from irq context as the PSTATE
++ * is discarded once we return from the exception.
++ */
++ WARN_ON_ONCE(in_interrupt());
++
+ config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++ asm(SET_PSTATE_PAN(1));
++ return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
+
+@@ -684,8 +694,9 @@ void cpu_enable_pan(void *__unused)
+ * We need to enable the feature at runtime (instead of adding it to
+ * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+ */
+-void cpu_enable_uao(void *__unused)
++int cpu_enable_uao(void *__unused)
+ {
+ asm(SET_PSTATE_UAO(1));
++ return 0;
+ }
+ #endif /* CONFIG_ARM64_UAO */
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index d0efb5c..a4e070a 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2344,7 +2344,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ frame.next_frame = 0;
+ frame.return_address = 0;
+
+- if (!access_ok(VERIFY_READ, fp, 8))
++ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
+@@ -2354,9 +2354,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
+ if (bytes != 0)
+ break;
+
+- if (!valid_user_frame(fp, sizeof(frame)))
+- break;
+-
+ perf_callchain_store(entry, cs_base + frame.return_address);
+ fp = compat_ptr(ss_base + frame.next_frame);
+ }
+@@ -2405,7 +2402,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ frame.next_frame = NULL;
+ frame.return_address = 0;
+
+- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
++ if (!valid_user_frame(fp, sizeof(frame)))
+ break;
+
+ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
+@@ -2415,9 +2412,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+ if (bytes != 0)
+ break;
+
+- if (!valid_user_frame(fp, sizeof(frame)))
+- break;
+-
+ perf_callchain_store(entry, frame.return_address);
+ fp = (void __user *)frame.next_frame;
+ }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index e207b33..1e007a9 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+ desc[1] = tf->command; /* status */
+ desc[2] = tf->device;
+ desc[3] = tf->nsect;
+- desc[0] = 0;
++ desc[7] = 0;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[8] |= 0x80;
+ if (tf->hob_nsect)
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 04365b1..5163c8f 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
+ zram = idr_find(&zram_index_idr, dev_id);
+ if (zram) {
+ ret = zram_remove(zram);
+- idr_remove(&zram_index_idr, dev_id);
++ if (!ret)
++ idr_remove(&zram_index_idr, dev_id);
+ } else {
+ ret = -ENODEV;
+ }
+diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
+index 838b22a..f2c9274 100644
+--- a/drivers/clk/sunxi/clk-sunxi.c
++++ b/drivers/clk/sunxi/clk-sunxi.c
+@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
+ else
+ calcp = 3;
+
+- calcm = (req->parent_rate >> calcp) - 1;
++ calcm = (div >> calcp) - 1;
+
+ req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+ req->m = calcm;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 10b5ddf..1ed085f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -33,6 +33,7 @@ struct amdgpu_atpx {
+
+ static struct amdgpu_atpx_priv {
+ bool atpx_detected;
++ bool bridge_pm_usable;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle other_handle;
+@@ -200,7 +201,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+- atpx->functions.power_cntl = false;
++ /*
++ * Disable legacy PM methods only when pcie port PM is usable,
++ * otherwise the device might fail to power off or power on.
++ */
++ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
+
+@@ -546,17 +551,25 @@ static bool amdgpu_atpx_detect(void)
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
++ bool d3_supported = false;
++ struct pci_dev *parent_pdev;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ if (has_atpx && vga_count == 2) {
+@@ -564,6 +577,7 @@ static bool amdgpu_atpx_detect(void)
+ printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ amdgpu_atpx_priv.atpx_detected = true;
++ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
+ amdgpu_atpx_init();
+ return true;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index a77ce99..b8e3854 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2540,7 +2540,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+- goto err_pages;
++ goto err_sg;
+ }
+ }
+ #ifdef CONFIG_SWIOTLB
+@@ -2583,8 +2583,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+
+ return 0;
+
+-err_pages:
++err_sg:
+ sg_mark_end(sg);
++err_pages:
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e26f889..35d385d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -11791,7 +11791,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+ ret = -EIO;
+- goto cleanup;
++ goto unlock;
+ }
+
+ atomic_inc(&intel_crtc->unpin_work_count);
+@@ -11877,6 +11877,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ if (!IS_ERR_OR_NULL(request))
+ i915_add_request_no_flush(request);
+ atomic_dec(&intel_crtc->unpin_work_count);
++unlock:
+ mutex_unlock(&dev->struct_mutex);
+ cleanup:
+ crtc->primary->fb = old_fb;
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 8f62671f..54acfcc 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -249,13 +249,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
+ if (irq < 0)
+ return irq;
+
+- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+- IRQF_TRIGGER_NONE, dev_name(dev), priv);
+- if (ret < 0) {
+- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+- return ret;
+- }
+-
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+@@ -271,6 +264,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, priv);
+
++ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
++ IRQF_TRIGGER_NONE, dev_name(dev), priv);
++ if (ret < 0) {
++ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
++ return ret;
++ }
++
+ ret = component_add(dev, &mtk_disp_ovl_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index ddef0d4..34b4ace 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -33,6 +33,7 @@ struct radeon_atpx {
+
+ static struct radeon_atpx_priv {
+ bool atpx_detected;
++ bool bridge_pm_usable;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ struct radeon_atpx atpx;
+@@ -198,7 +199,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+- atpx->functions.power_cntl = false;
++ /*
++ * Disable legacy PM methods only when pcie port PM is usable,
++ * otherwise the device might fail to power off or power on.
++ */
++ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
+
+@@ -543,11 +548,16 @@ static bool radeon_atpx_detect(void)
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
++ bool d3_supported = false;
++ struct pci_dev *parent_pdev;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ /* some newer PX laptops mark the dGPU as a non-VGA display device */
+@@ -555,6 +565,9 @@ static bool radeon_atpx_detect(void)
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
++
++ parent_pdev = pci_upstream_bridge(pdev);
++ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ }
+
+ if (has_atpx && vga_count == 2) {
+@@ -562,6 +575,7 @@ static bool radeon_atpx_detect(void)
+ printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
++ radeon_atpx_priv.bridge_pm_usable = d3_supported;
+ radeon_atpx_init();
+ return true;
+ }
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index 5784e20..9f6203c 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
+ if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
+ &max_proto, set_properties, true))
+ return PSMOUSE_TOUCHKIT_PS2;
+-
+- if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
+- &max_proto, set_properties, true))
+- return PSMOUSE_BYD;
+ }
+
+ /*
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index a8ff969..cbc7dfa 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -2203,8 +2203,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ is_scanning_required = 1;
+ } else {
+ mwifiex_dbg(priv->adapter, MSG,
+- "info: trying to associate to '%s' bssid %pM\n",
+- (char *)req_ssid.ssid, bss->bssid);
++ "info: trying to associate to '%.*s' bssid %pM\n",
++ req_ssid.ssid_len, (char *)req_ssid.ssid,
++ bss->bssid);
+ memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+ break;
+ }
+@@ -2264,8 +2265,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ }
+
+ mwifiex_dbg(adapter, INFO,
+- "info: Trying to associate to %s and bssid %pM\n",
+- (char *)sme->ssid, sme->bssid);
++ "info: Trying to associate to %.*s and bssid %pM\n",
++ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+@@ -2398,8 +2399,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ }
+
+ mwifiex_dbg(priv->adapter, MSG,
+- "info: trying to join to %s and bssid %pM\n",
+- (char *)params->ssid, params->bssid);
++ "info: trying to join to %.*s and bssid %pM\n",
++ params->ssid_len, (char *)params->ssid, params->bssid);
+
+ mwifiex_set_ibss_params(priv, params);
+
+diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
+index db553dc..2b6a592 100644
+--- a/drivers/pci/pcie/aer/aer_inject.c
++++ b/drivers/pci/pcie/aer/aer_inject.c
+@@ -307,20 +307,6 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
+ return 0;
+ }
+
+-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+-{
+- while (1) {
+- if (!pci_is_pcie(dev))
+- break;
+- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+- return dev;
+- if (!dev->bus->self)
+- break;
+- dev = dev->bus->self;
+- }
+- return NULL;
+-}
+-
+ static int find_aer_device_iter(struct device *device, void *data)
+ {
+ struct pcie_device **result = data;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 93f280d..f6eff4a 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ }
+
++static bool pcie_root_rcb_set(struct pci_dev *dev)
++{
++ struct pci_dev *rp = pcie_find_root_port(dev);
++ u16 lnkctl;
++
++ if (!rp)
++ return false;
++
++ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
++ if (lnkctl & PCI_EXP_LNKCTL_RCB)
++ return true;
++
++ return false;
++}
++
+ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ {
+ int pos;
+@@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+
+ /* Initialize Link Control Register */
+- if (pcie_cap_has_lnkctl(dev))
++ if (pcie_cap_has_lnkctl(dev)) {
++
++ /*
++ * If the Root Port supports Read Completion Boundary of
++ * 128, set RCB to 128. Otherwise, clear it.
++ */
++ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
++ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
++ if (pcie_root_rcb_set(dev))
++ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
++
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+ ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
++ }
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index 0296d81..a813239 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+ if (test_bit(PWMF_EXPORTED, &pwm->flags))
+ pwm_unexport_child(parent, pwm);
+ }
++
++ put_device(parent);
+ }
+
+ static int __init pwm_sysfs_init(void)
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 030d002..5138a84 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2007,7 +2007,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+
+ static int hpsa_slave_alloc(struct scsi_device *sdev)
+ {
+- struct hpsa_scsi_dev_t *sd;
++ struct hpsa_scsi_dev_t *sd = NULL;
+ unsigned long flags;
+ struct ctlr_info *h;
+
+@@ -2024,7 +2024,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
+ sd->target = sdev_id(sdev);
+ sd->lun = sdev->lun;
+ }
+- } else
++ }
++ if (!sd)
+ sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+
+@@ -3805,6 +3806,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
++ this_device->rev = inq_buff[2];
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
+@@ -3887,10 +3889,14 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* physical device, target and lun filled in later */
+- if (is_hba_lunid(lunaddrbytes))
++ if (is_hba_lunid(lunaddrbytes)) {
++ int bus = HPSA_HBA_BUS;
++
++ if (!device->rev)
++ bus = HPSA_LEGACY_HBA_BUS;
+ hpsa_set_bus_target_lun(device,
+- HPSA_HBA_BUS, 0, lunid & 0x3fff);
+- else
++ bus, 0, lunid & 0x3fff);
++ } else
+ /* defer target, lun assignment for physical devices */
+ hpsa_set_bus_target_lun(device,
+ HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index a1487e6..9d45dde 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t {
+ u64 sas_address;
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
++ unsigned char rev; /* byte 2 of inquiry data */
+ unsigned char raid_level; /* from inquiry page 0xC1 */
+ unsigned char volume_offline; /* discovered via TUR or VPD */
+ u16 queue_depth; /* max queue_depth for this device */
+@@ -403,6 +404,7 @@ struct offline_device_entry {
+ #define HPSA_RAID_VOLUME_BUS 1
+ #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
+ #define HPSA_HBA_BUS 0
++#define HPSA_LEGACY_HBA_BUS 3
+
+ /*
+ Send the command to the hardware
+diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
+index 04ce7cf..50c7167 100644
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+ fc_stats = &lport->host_stats;
+ memset(fc_stats, 0, sizeof(struct fc_host_statistics));
+
+- fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
++ fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
+
+ for_each_possible_cpu(cpu) {
+ struct fc_stats *stats;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a78415d..78be4ae 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -329,11 +329,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
+ if (!real)
+ goto bug;
+
++ /* Handle recursion */
++ real = d_real(real, inode, open_flags);
++
+ if (!inode || inode == d_inode(real))
+ return real;
+-
+- /* Handle recursion */
+- return d_real(real, inode, open_flags);
+ bug:
+ WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
+ inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 573c5a1..0a0b2d5 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -256,7 +256,9 @@
+ #endif
+ #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
+
+-#if GCC_VERSION >= 50000
++#if GCC_VERSION >= 70000
++#define KASAN_ABI_VERSION 5
++#elif GCC_VERSION >= 50000
+ #define KASAN_ABI_VERSION 4
+ #elif GCC_VERSION >= 40902
+ #define KASAN_ABI_VERSION 3
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 01e8443..d47cc4a 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -364,16 +364,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
+ }
+
+ /*
+- * Get the offset in PAGE_SIZE.
+- * (TODO: hugepage should have ->index in PAGE_SIZE)
++ * Get index of the page with in radix-tree
++ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ */
+-static inline pgoff_t page_to_pgoff(struct page *page)
++static inline pgoff_t page_to_index(struct page *page)
+ {
+ pgoff_t pgoff;
+
+- if (unlikely(PageHeadHuge(page)))
+- return page->index << compound_order(page);
+-
+ if (likely(!PageTransTail(page)))
+ return page->index;
+
+@@ -387,6 +384,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
+ }
+
+ /*
++ * Get the offset in PAGE_SIZE.
++ * (TODO: hugepage should have ->index in PAGE_SIZE)
++ */
++static inline pgoff_t page_to_pgoff(struct page *page)
++{
++ if (unlikely(PageHeadHuge(page)))
++ return page->index << compound_order(page);
++
++ return page_to_index(page);
++}
++
++/*
+ * Return byte-offset into filesystem object for page.
+ */
+ static inline loff_t page_offset(struct page *page)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 0ab8359..03f3df0 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1896,6 +1896,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
+ return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+ }
+
++static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
++{
++ while (1) {
++ if (!pci_is_pcie(dev))
++ break;
++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
++ return dev;
++ if (!dev->bus->self)
++ break;
++ dev = dev->bus->self;
++ }
++ return NULL;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
+index d6d071f..3af60ee 100644
+--- a/include/uapi/linux/input-event-codes.h
++++ b/include/uapi/linux/input-event-codes.h
+@@ -640,7 +640,7 @@
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+-#define KEY_DATA 0x275
++#define KEY_DATA 0x277
+
+ #define BTN_TRIGGER_HAPPY 0x2c0
+ #define BTN_TRIGGER_HAPPY1 0x2c0
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 0082fce..85c5a88 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2173,6 +2173,7 @@ static int rcu_nocb_kthread(void *arg)
+ cl++;
+ c++;
+ local_bh_enable();
++ cond_resched_rcu_qs();
+ list = next;
+ }
+ trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
+index e5c2181f..03f4545 100644
+--- a/mm/kasan/kasan.h
++++ b/mm/kasan/kasan.h
+@@ -53,6 +53,9 @@ struct kasan_global {
+ #if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+ #endif
++#if KASAN_ABI_VERSION >= 5
++ char *odr_indicator;
++#endif
+ };
+
+ /**
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 728d779..87e1a7ca 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
+ .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
+ };
+
++#ifdef CONFIG_SYSFS
+ static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
+ .attrs = khugepaged_attr,
+ .name = "khugepaged",
+ };
++#endif /* CONFIG_SYSFS */
+
+ #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
+
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 14645be..9c91acc 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
+ */
+ spin_lock_irq(zone_lru_lock(zone));
+
+- nr_pages = hpage_nr_pages(page);
+- if (!TestClearPageMlocked(page))
++ if (!TestClearPageMlocked(page)) {
++ /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
++ nr_pages = 1;
+ goto unlock_out;
++ }
+
++ nr_pages = hpage_nr_pages(page);
+ __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
+
+ if (__munlock_isolate_lru_page(page, true)) {
+diff --git a/mm/truncate.c b/mm/truncate.c
+index a01cce4..8d8c62d 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+
+ if (!trylock_page(page))
+ continue;
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ if (PageWriteback(page)) {
+ unlock_page(page);
+ continue;
+@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
+ }
+
+ lock_page(page);
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ wait_on_page_writeback(page);
+ truncate_inode_page(mapping, page);
+ unlock_page(page);
+@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ if (!trylock_page(page))
+ continue;
+
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+
+ /* Middle of THP: skip */
+ if (PageTransTail(page)) {
+@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
+ }
+
+ lock_page(page);
+- WARN_ON(page_to_pgoff(page) != index);
++ WARN_ON(page_to_index(page) != index);
+ if (page->mapping != mapping) {
+ unlock_page(page);
+ continue;
+diff --git a/mm/workingset.c b/mm/workingset.c
+index 617475f..fb1f918 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
+ local_irq_enable();
+
+- if (memcg_kmem_enabled()) {
++ if (sc->memcg) {
+ pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
+ LRU_ALL_FILE);
+ } else {
+diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
+index 2333777..8af1611 100644
+--- a/net/batman-adv/tp_meter.c
++++ b/net/batman-adv/tp_meter.c
+@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (unlikely(!primary_if)) {
+ err = BATADV_TP_REASON_DST_UNREACHABLE;
++ tp_vars->reason = err;
+ goto out;
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 0bf6709..6fb4314 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+
+ WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
+
+- kvm_notify_acked_irq(vcpu->kvm, 0,
+- intid - VGIC_NR_PRIVATE_IRQS);
++ /* Only SPIs require notification */
++ if (vgic_valid_spi(vcpu->kvm, intid))
++ kvm_notify_acked_irq(vcpu->kvm, 0,
++ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 9f0dae3..5c9f974 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+
+ WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
+
+- kvm_notify_acked_irq(vcpu->kvm, 0,
+- intid - VGIC_NR_PRIVATE_IRQS);
++ /* Only SPIs require notification */
++ if (vgic_valid_spi(vcpu->kvm, intid))
++ kvm_notify_acked_irq(vcpu->kvm, 0,
++ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+
+ /*
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1950782..690d15e 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2852,10 +2852,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+
+ ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
+ if (ret < 0) {
+- ops->destroy(dev);
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ mutex_unlock(&kvm->lock);
++ ops->destroy(dev);
+ return ret;
+ }
+